From 1a9c8535a44c99c4e132441ed8654e77e5a5aa45 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 18 Oct 2022 12:00:22 -0500 Subject: [PATCH 001/362] Update comment in Arborist nginx conf (#2057) --- kube/services/revproxy/gen3.nginx.conf/arborist-service.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf b/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf index 942307017..0e492eb71 100644 --- a/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/arborist-service.conf @@ -87,7 +87,7 @@ location = /authz/mapping { set $proxy_service "arborist"; set $upstream http://${arborist_release_name}-service.$namespace.svc.cluster.local; - # Do not pass the username arg here! Otherwise anyone can see anyone's access. + # Do not pass the username arg here! Otherwise anyone can see anyone's access for Arborist<4.0.0. # Arborist will fall back to parsing the jwt for username. proxy_pass $upstream/auth/mapping; } From 08960990f3a80e39f5ce50db3b72bf568324b162 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Fri, 21 Oct 2022 15:00:17 -0500 Subject: [PATCH 002/362] update squid webwhitelist (#2060) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 1bf67da16..9955eff9c 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -143,6 +143,7 @@ sa-update.space-pro.be security.debian.org services.mathworks.com streaming.stat.iastate.edu +us-east4-docker.pkg.dev us-central1-docker.pkg.dev www.google.com www.icpsr.umich.edu From 22a738fc848bcb73754307ca351dd6c513ddb3d1 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Tue, 25 Oct 2022 13:03:53 -0500 Subject: [PATCH 003/362] adding .pedscommons.org to whitelist (#2062) --- files/squid_whitelist/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 1b87923e4..1421f6d5d 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -75,6 +75,7 @@ .paloaltonetworks.com .pandemicresponsecommons.org .perl.org +.pedscommons.org .planx-ci.io .planx-pla.net .postgresql.org From 90f30caa534eb50b774c3204ae1934cb1955345a Mon Sep 17 00:00:00 2001 From: cmlsn <100160785+cmlsn@users.noreply.github.com> Date: Wed, 26 Oct 2022 14:50:44 -0700 Subject: [PATCH 004/362] chore - modifying nginx settings to allow larger uploads that are causing errors on manifest upload. (#2063) --- gen3/lib/manifestDefaults/modsec/modsecurity.conf | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/gen3/lib/manifestDefaults/modsec/modsecurity.conf b/gen3/lib/manifestDefaults/modsec/modsecurity.conf index 117d92e00..508834620 100644 --- a/gen3/lib/manifestDefaults/modsec/modsecurity.conf +++ b/gen3/lib/manifestDefaults/modsec/modsecurity.conf @@ -39,15 +39,15 @@ SecRule REQUEST_HEADERS:Content-Type "application/json" \ # to the size of data, with files excluded. You want to keep that value as # low as practical. # -SecRequestBodyLimit 13107200 -SecRequestBodyNoFilesLimit 131072 +SecRequestBodyLimit 524288000 +SecRequestBodyNoFilesLimit 1048576 # What do do if the request body size is above our configured limit. # Keep in mind that this setting will automatically be set to ProcessPartial # when SecRuleEngine is set to DetectionOnly mode in order to minimize # disruptions when initially deploying ModSecurity. # -SecRequestBodyLimitAction Reject +SecRequestBodyLimitAction ProcessPartial # Verify that we've correctly processed the request body. # As a rule of thumb, when failing to process a request body From 902f442b77a721977f88db2836a3c617ca00264b Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 27 Oct 2022 12:50:39 -0700 Subject: [PATCH 005/362] feat(datadog): enable for DICOM Viewer and DICOM Server (Orthanc) (#2065) --- .../dicom-server/dicom-server-deploy.yaml | 25 +++++++++++++++++++ .../dicom-viewer/dicom-viewer-deploy.yaml | 25 +++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml index a2c1a2c03..b2ef0834e 100644 --- a/kube/services/dicom-server/dicom-server-deploy.yaml +++ b/kube/services/dicom-server/dicom-server-deploy.yaml @@ -24,6 +24,31 @@ spec: containers: - name: dicom-server GEN3_DICOM-SERVER_IMAGE + env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP imagePullPolicy: Always readinessProbe: httpGet: diff --git a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml index e7d05903b..d1fb8ce55 100644 --- a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml @@ -20,6 +20,31 @@ spec: containers: - name: dicom-viewer GEN3_DICOM-VIEWER_IMAGE + env: + - name: DD_ENABLED + valueFrom: + configMapKeyRef: + name: manifest-global + key: dd_enabled + optional: true + - name: DD_ENV + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/env'] + - name: DD_SERVICE + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/service'] + - name: DD_VERSION + valueFrom: + fieldRef: + fieldPath: metadata.labels['tags.datadoghq.com/version'] + - name: DD_LOGS_INJECTION + value: "true" + - name: DD_AGENT_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP imagePullPolicy: Always readinessProbe: httpGet: From 2bb923a07ce3aa392154cf2940e21984f4679e3d Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Mon, 31 Oct 2022 13:11:36 -0700 Subject: [PATCH 006/362] update jenkins versions (#2066) --- Docker/Jenkins-CI-Worker/Dockerfile | 2 +- Docker/Jenkins-Worker/Dockerfile | 2 +- Docker/Jenkins/Dockerfile | 2 +- Docker/Jenkins2/Dockerfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/Jenkins-CI-Worker/Dockerfile index 3ed282c80..6da26cb87 100644 --- a/Docker/Jenkins-CI-Worker/Dockerfile +++ b/Docker/Jenkins-CI-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jnlp-slave:4.9-1 +FROM jenkins/jnlp-slave:4.13.3-1 USER root diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/Jenkins-Worker/Dockerfile index 5fd7db839..2136d76e3 100644 --- a/Docker/Jenkins-Worker/Dockerfile +++ b/Docker/Jenkins-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jnlp-slave:4.3-1 +FROM jenkins/jnlp-slave:4.13.3-1 USER root diff --git a/Docker/Jenkins/Dockerfile b/Docker/Jenkins/Dockerfile index e06eb7b71..3db580b91 100644 --- a/Docker/Jenkins/Dockerfile +++ b/Docker/Jenkins/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.298 +FROM jenkins/jenkins:2.375 USER root diff --git a/Docker/Jenkins2/Dockerfile b/Docker/Jenkins2/Dockerfile index 26f81c143..9d07df981 100644 --- a/Docker/Jenkins2/Dockerfile +++ b/Docker/Jenkins2/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.298 +FROM jenkins/jenkins:2.375 USER root From da2478d9364ec9dc2f107eccc8920e491db163c8 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Tue, 1 Nov 2022 08:36:52 -0700 Subject: [PATCH 007/362] GH Actions updated fror jenkins images (#2067) * GH Actions updated fror jenkins images * fix names * full path * try relative path * revert * add buildcontext * remove python-pip * remove python-virutalenv * update docker file * update docker files --- .github/workflows/image_build_push.yaml | 36 +++++++++++++++++++++++++ .secrets.baseline | 8 +++--- Docker/Jenkins-CI-Worker/Dockerfile | 8 +++--- Docker/Jenkins-Worker/Dockerfile | 8 +++--- Docker/Jenkins/Dockerfile | 9 +++---- 5 files changed, 49 insertions(+), 20 deletions(-) diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml index 51543f0fe..898a65670 100644 --- a/.github/workflows/image_build_push.yaml +++ b/.github/workflows/image_build_push.yaml @@ -40,3 +40,39 @@ jobs: ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins: + name: Jenkins Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/Jenkins/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins" + OVERRIDE_REPO_NAME: "jenkins" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins-ci-worker: + name: Jenkins-CI-Worker Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/Jenkins-CI-Worker/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins-CI-Worker" + OVERRIDE_REPO_NAME: "jenkins-ci-worker" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins-qa-worker: + name: Jenkins-QA-Worker Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/Jenkins-Worker/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins-Worker" + OVERRIDE_REPO_NAME: "jenkins-qa-worker" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/.secrets.baseline b/.secrets.baseline index 7a459b129..ddc2050f3 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-07-29T15:31:31Z", + "generated_at": "2022-10-31T23:55:07Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -79,7 +79,7 @@ "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_secret": false, "is_verified": false, - "line_number": 124, + "line_number": 122, "type": "Secret Keyword" } ], @@ -88,7 +88,7 @@ "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_secret": false, "is_verified": false, - "line_number": 138, + "line_number": 136, "type": "Secret Keyword" } ], @@ -97,7 +97,7 @@ "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_secret": false, "is_verified": false, - "line_number": 113, + "line_number": 110, "type": "Secret Keyword" } ], diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/Jenkins-CI-Worker/Dockerfile index 6da26cb87..a22d81248 100644 --- a/Docker/Jenkins-CI-Worker/Dockerfile +++ b/Docker/Jenkins-CI-Worker/Dockerfile @@ -5,7 +5,7 @@ USER root ENV DEBIAN_FRONTEND=noninteractive # install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ @@ -72,7 +72,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_12.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN apt-get update && apt-get install -y nodejs # add psql: https://www.postgresql.org/download/linux/debian/ @@ -98,9 +98,7 @@ RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade - -RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3.8 - +RUN set -xe && python3.8 -m pip install --upgrade pip && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade # install terraform RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \ diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/Jenkins-Worker/Dockerfile index 2136d76e3..58d098d85 100644 --- a/Docker/Jenkins-Worker/Dockerfile +++ b/Docker/Jenkins-Worker/Dockerfile @@ -5,7 +5,7 @@ USER root ENV DEBIAN_FRONTEND=noninteractive # install python and pip and aws cli -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential libgit2-dev zip unzip less vim gettext-base +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential libgit2-dev zip unzip less vim gettext-base wget RUN set -xe && python -m pip install awscli --upgrade && python -m pip install pytest --upgrade && python -m pip install PyYAML --upgrade && python -m pip install lxml --upgrade RUN set -xe && python3 -m pip install pytest --upgrade && python3 -m pip install PyYAML --upgrade RUN set -xe && python -m pip install yq --upgrade && python3 -m pip install yq --upgrade @@ -84,7 +84,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN apt-get update && apt-get install -y nodejs # install chrome (supports headless mode) @@ -129,9 +129,7 @@ RUN sed -i 's/python3/python3.7/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.7/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade - -RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3.8 - +RUN set -xe && python3.8 -m pip install --upgrade pip && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade # update /etc/sudoers RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ diff --git a/Docker/Jenkins/Dockerfile b/Docker/Jenkins/Dockerfile index 3db580b91..b0d579ec3 100644 --- a/Docker/Jenkins/Dockerfile +++ b/Docker/Jenkins/Dockerfile @@ -5,7 +5,7 @@ USER root ENV DEBIAN_FRONTEND=noninteractive # install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ @@ -25,7 +25,6 @@ RUN set -xe && apt-get update \ libbz2-dev \ libexpat1-dev \ liblzma-dev \ - python-virtualenv \ lua5.3 \ r-base \ software-properties-common \ @@ -60,7 +59,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_12.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN apt-get update && apt-get install -y nodejs # add psql: https://www.postgresql.org/download/linux/debian/ @@ -86,9 +85,7 @@ RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade - -RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 - +RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade # install chrome (supports headless mode) RUN set -xe \ From 5c3ba19151404cc6995f012dfd347f9fca69ca5e Mon Sep 17 00:00:00 2001 From: jawadqur <55899496+jawadqur@users.noreply.github.com> Date: Tue, 1 Nov 2022 15:40:23 -0500 Subject: [PATCH 008/362] BRH-301: Extend hatchery to launch prismacloud containers (#2061) --- gen3/bin/kube-setup-hatchery.sh | 9 +++++++++ kube/services/hatchery/hatchery-deploy.yaml | 12 ++++++++++++ 2 files changed, 21 insertions(+) diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index b3eb659b8..1192c293e 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -82,6 +82,15 @@ if ! g3kubectl get sa "$saName" -o json | jq -e '.metadata.annotations | ."eks.a gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1 fi +if [[ -f "$(gen3_secrets_folder)/prisma/apikey.json" ]]; then + ACCESSKEYID=$(jq -r .AccessKeyID "$(gen3_secrets_folder)/prisma/apikey.json") + SECRETKEY=$(jq -r .SecretKey "$(gen3_secrets_folder)/prisma/apikey.json") + if [[ ! -z "$ACCESSKEYID" && ! -z "$SECRETKEY" ]]; then + gen3_log_info "Found prisma apikey, creating kubernetes secret so hatchery can do prismacloud stuff.." + g3kubectl delete secret prisma-secret --ignore-not-found + g3kubectl create secret generic prisma-secret --from-literal=AccessKeyId=$ACCESSKEYID --from-literal=SecretKey=$SECRETKEY + fi +fi g3kubectl apply -f "${GEN3_HOME}/kube/services/hatchery/hatchery-service.yaml" gen3 roll hatchery diff --git a/kube/services/hatchery/hatchery-deploy.yaml b/kube/services/hatchery/hatchery-deploy.yaml index 5ac1bb805..f67100098 100644 --- a/kube/services/hatchery/hatchery-deploy.yaml +++ b/kube/services/hatchery/hatchery-deploy.yaml @@ -104,6 +104,18 @@ spec: valueFrom: fieldRef: fieldPath: status.hostIP + - name: PRISMA_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: prisma-secret + key: AccessKeyId + optional: true + - name: PRISMA_SECRET_KEY + valueFrom: + secretKeyRef: + name: prisma-secret + key: SecretKey + optional: true volumeMounts: - name: hatchery-config readOnly: true From 38476a3f879a4c24453b3937bfb6cd90268825ef Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 2 Nov 2022 07:43:32 -0700 Subject: [PATCH 009/362] use jdk11 images for jenkins workers (#2068) * use jdk11 images for jenkins workers * update jenkins main deployment --- Docker/Jenkins-CI-Worker/Dockerfile | 2 +- Docker/Jenkins-Worker/Dockerfile | 2 +- Docker/Jenkins/Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/Jenkins-CI-Worker/Dockerfile index a22d81248..08d047e52 100644 --- a/Docker/Jenkins-CI-Worker/Dockerfile +++ b/Docker/Jenkins-CI-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jnlp-slave:4.13.3-1 +FROM jenkins/jnlp-slave:4.13.3-1-jdk11 USER root diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/Jenkins-Worker/Dockerfile index 58d098d85..0ad941def 100644 --- a/Docker/Jenkins-Worker/Dockerfile +++ b/Docker/Jenkins-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jnlp-slave:4.13.3-1 +FROM jenkins/jnlp-slave:4.13.3-1-jdk11 USER root diff --git a/Docker/Jenkins/Dockerfile b/Docker/Jenkins/Dockerfile index b0d579ec3..a872ee1dd 100644 --- a/Docker/Jenkins/Dockerfile +++ b/Docker/Jenkins/Dockerfile @@ -114,7 +114,7 @@ RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ # add our custom start script COPY jenkins.sh /opt/cdis/bin/jenkins.sh RUN chmod -R a+rx /opt/cdis -ENTRYPOINT ["/sbin/tini", "--", "/opt/cdis/bin/jenkins.sh"] +ENTRYPOINT ["/usr/bin/tini", "--", "/opt/cdis/bin/jenkins.sh"] USER jenkins From a395b08cc8e2eaa2c05b96aa736b302f932a06a6 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Mon, 7 Nov 2022 12:23:48 -0600 Subject: [PATCH 010/362] fix: dbohdsi is the new label to have Atlas DB access (#2071) * fix: dbohdsi is the new label to have Atlas DB access * fix: change label for OHDSI WebAPI --- kube/services/cohort-middleware/cohort-middleware-deploy.yaml | 2 +- kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index a21d97900..e301856e5 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -19,7 +19,7 @@ spec: metadata: labels: app: cohort-middleware - dbatlas: "yes" + dbohdsi: "yes" dbomop-data: "yes" public: "yes" tags.datadoghq.com/service: "cohort-middleware" diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index f720ec530..2f4e57d47 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -17,7 +17,7 @@ spec: metadata: labels: app: ohdsi-webapi - dbohdsi-webapi: "yes" + dbohdsi: "yes" dbomop-data: "yes" internet: "yes" public: "yes" From 015dcb1356f75ab25a3b20500fd1b82920f66632 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 8 Nov 2022 14:26:03 -0600 Subject: [PATCH 011/362] feat(ohdsi): updates for 2.12 (#2072) --- gen3/bin/kube-setup-ohdsi.sh | 4 ++-- .../ohdsi-atlas-config-local.yaml} | 0 .../ohdsi-webapi-config.yaml} | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) rename kube/services/{ohdsi/ohdsi-configmap.yaml => ohdsi-atlas/ohdsi-atlas-config-local.yaml} (100%) rename kube/services/{ohdsi/ohdsi-secrets.yaml => ohdsi-webapi/ohdsi-webapi-config.yaml} (97%) diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh index 0a4d3b7a8..52ae20b33 100644 --- a/gen3/bin/kube-setup-ohdsi.sh +++ b/gen3/bin/kube-setup-ohdsi.sh @@ -90,7 +90,7 @@ setup_secrets() { export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration" export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds") export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds") - envsubst <"${GEN3_HOME}/kube/services/ohdsi/ohdsi-secrets.yaml" | g3kubectl apply -f - + envsubst <"${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml" | g3kubectl apply -f - envsubst '$hostname' <"${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-reverse-proxy-config.yaml" | g3kubectl apply -f - ) @@ -123,7 +123,7 @@ setup_creds setup_secrets setup_ingress -envsubst <${GEN3_HOME}/kube/services/ohdsi/ohdsi-configmap.yaml | g3kubectl apply -f - +envsubst <${GEN3_HOME}/kube/services/ohdsi/ohdsi-atlas-config-local.yaml | g3kubectl apply -f - gen3 roll ohdsi-webapi g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml" diff --git a/kube/services/ohdsi/ohdsi-configmap.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml similarity index 100% rename from kube/services/ohdsi/ohdsi-configmap.yaml rename to kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml diff --git a/kube/services/ohdsi/ohdsi-secrets.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml similarity index 97% rename from kube/services/ohdsi/ohdsi-secrets.yaml rename to kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml index 7b84c7964..5cd46edd9 100644 --- a/kube/services/ohdsi/ohdsi-secrets.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml @@ -19,13 +19,12 @@ stringData: flyway_datasource_username: $DB_USER flyway_datasource_password: $DB_PASS flyway_locations: classpath:db/migration/postgresql - # Zoe testing Atlas-Fence + security_cors_enabled: "true" security_origin: "*" security_token_expiration: "43200" security_ssl_enabled: "false" -# security_provider: DisabledSecurity security_provider: AtlasRegularSecurity security_auth_windows_enabled: "false" @@ -50,6 +49,7 @@ stringData: security_oid_url: https://$hostname/.well-known/openid-configuration security_oid_redirectUrl: https://atlas.$hostname/atlas/#/welcome security_oid_logoutUrl: https://atlas.$hostname/atlas/#/home + security_oid_extraScopes: user security_oauth_callback_ui: https://atlas.$hostname/atlas/#/welcome security_oauth_callback_api: https://atlas.$hostname/WebAPI/user/oauth/callback From 4b944f4f69acb98737b0463cb0b64639f335154e Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 8 Nov 2022 16:22:02 -0600 Subject: [PATCH 012/362] fix(ohdsi): fix path for Atlas config (#2073) --- gen3/bin/kube-setup-ohdsi.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh index 52ae20b33..d586570db 100644 --- a/gen3/bin/kube-setup-ohdsi.sh +++ b/gen3/bin/kube-setup-ohdsi.sh @@ -123,7 +123,7 @@ setup_creds setup_secrets setup_ingress -envsubst <${GEN3_HOME}/kube/services/ohdsi/ohdsi-atlas-config-local.yaml | g3kubectl apply -f - +envsubst <${GEN3_HOME}/kube/services/ohdsi-atlas/ohdsi-atlas-config-local.yaml | g3kubectl apply -f - gen3 roll ohdsi-webapi g3kubectl apply -f "${GEN3_HOME}/kube/services/ohdsi-webapi/ohdsi-webapi-service.yaml" From 27d182bd6507689e725314825d63c9544d533f77 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 11 Nov 2022 13:21:52 -0800 Subject: [PATCH 013/362] Chore/GitHub actions for jenkins images (#2074) * build jenkins images when Dockerfile changes * add path filter * remove jenkins action from the generic workflow --- .github/workflows/image_build_push.yaml | 36 -------------- .../workflows/image_build_push_jenkins.yaml | 47 +++++++++++++++++++ .../Jenkins-CI-Worker/Dockerfile | 0 .../{ => jenkins}/Jenkins-CI-Worker/README.md | 0 .../Jenkins-CI-Worker/certfix.sh | 0 .../Jenkins-CI-Worker/install-python3.8.sh | 0 .../{ => jenkins}/Jenkins-Worker/Dockerfile | 0 Docker/{ => jenkins}/Jenkins-Worker/README.md | 0 .../Jenkins-Worker/install-python3.8.sh | 0 Docker/{ => jenkins}/Jenkins/Dockerfile | 0 Docker/{ => jenkins}/Jenkins/README.md | 0 .../Jenkins/install-python3.8.sh | 0 Docker/{ => jenkins}/Jenkins/jenkins.sh | 0 Docker/{ => jenkins}/Jenkins2/Dockerfile | 0 Docker/{ => jenkins}/Jenkins2/README.md | 0 .../Jenkins2/install-python3.8.sh | 0 .../Jenkins2/jenkins-master-deployment.yaml | 0 Docker/{ => jenkins}/Jenkins2/jenkins.values | 0 Docker/{ => jenkins}/Jenkins2/jenkins2.sh | 0 .../k8sjenkins-agent-master-policy.yaml | 0 .../Jenkins2/k8sjenkins-agent-policy.yaml | 0 21 files changed, 47 insertions(+), 36 deletions(-) create mode 100644 .github/workflows/image_build_push_jenkins.yaml rename Docker/{ => jenkins}/Jenkins-CI-Worker/Dockerfile (100%) rename Docker/{ => jenkins}/Jenkins-CI-Worker/README.md (100%) rename Docker/{ => jenkins}/Jenkins-CI-Worker/certfix.sh (100%) rename Docker/{ => jenkins}/Jenkins-CI-Worker/install-python3.8.sh (100%) rename Docker/{ => jenkins}/Jenkins-Worker/Dockerfile (100%) rename Docker/{ => jenkins}/Jenkins-Worker/README.md (100%) rename Docker/{ => jenkins}/Jenkins-Worker/install-python3.8.sh (100%) rename Docker/{ => jenkins}/Jenkins/Dockerfile (100%) rename Docker/{ => jenkins}/Jenkins/README.md (100%) rename Docker/{ => jenkins}/Jenkins/install-python3.8.sh (100%) rename Docker/{ => jenkins}/Jenkins/jenkins.sh (100%) rename Docker/{ => jenkins}/Jenkins2/Dockerfile (100%) rename Docker/{ => jenkins}/Jenkins2/README.md (100%) rename Docker/{ => jenkins}/Jenkins2/install-python3.8.sh (100%) rename Docker/{ => jenkins}/Jenkins2/jenkins-master-deployment.yaml (100%) rename Docker/{ => jenkins}/Jenkins2/jenkins.values (100%) rename Docker/{ => jenkins}/Jenkins2/jenkins2.sh (100%) rename Docker/{ => jenkins}/Jenkins2/k8sjenkins-agent-master-policy.yaml (100%) rename Docker/{ => jenkins}/Jenkins2/k8sjenkins-agent-policy.yaml (100%) diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml index 898a65670..51543f0fe 100644 --- a/.github/workflows/image_build_push.yaml +++ b/.github/workflows/image_build_push.yaml @@ -40,39 +40,3 @@ jobs: ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - jenkins: - name: Jenkins Build and Push - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/Jenkins/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins" - OVERRIDE_REPO_NAME: "jenkins" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - jenkins-ci-worker: - name: Jenkins-CI-Worker Build and Push - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/Jenkins-CI-Worker/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins-CI-Worker" - OVERRIDE_REPO_NAME: "jenkins-ci-worker" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - jenkins-qa-worker: - name: Jenkins-QA-Worker Build and Push - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/Jenkins-Worker/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/Jenkins-Worker" - OVERRIDE_REPO_NAME: "jenkins-qa-worker" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml new file mode 100644 index 000000000..466fc1f68 --- /dev/null +++ b/.github/workflows/image_build_push_jenkins.yaml @@ -0,0 +1,47 @@ +name: Build Python Base Images and Push to Quay and ECR + +on: + push: + paths: + - Docker/jenkins/** + +jobs: + jenkins: + name: Jenkins Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins" + OVERRIDE_REPO_NAME: "jenkins" + USE_QUAY_ONLY: true + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins-ci-worker: + name: Jenkins-CI-Worker Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-CI-Worker/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-CI-Worker" + OVERRIDE_REPO_NAME: "jenkins-ci-worker" + USE_QUAY_ONLY: true + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins-qa-worker: + name: Jenkins-QA-Worker Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-Worker/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-Worker" + OVERRIDE_REPO_NAME: "jenkins-qa-worker" + USE_QUAY_ONLY: true + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} diff --git a/Docker/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile similarity index 100% rename from Docker/Jenkins-CI-Worker/Dockerfile rename to Docker/jenkins/Jenkins-CI-Worker/Dockerfile diff --git a/Docker/Jenkins-CI-Worker/README.md b/Docker/jenkins/Jenkins-CI-Worker/README.md similarity index 100% rename from Docker/Jenkins-CI-Worker/README.md rename to Docker/jenkins/Jenkins-CI-Worker/README.md diff --git a/Docker/Jenkins-CI-Worker/certfix.sh b/Docker/jenkins/Jenkins-CI-Worker/certfix.sh similarity index 100% rename from Docker/Jenkins-CI-Worker/certfix.sh rename to Docker/jenkins/Jenkins-CI-Worker/certfix.sh diff --git a/Docker/Jenkins-CI-Worker/install-python3.8.sh b/Docker/jenkins/Jenkins-CI-Worker/install-python3.8.sh similarity index 100% rename from Docker/Jenkins-CI-Worker/install-python3.8.sh rename to Docker/jenkins/Jenkins-CI-Worker/install-python3.8.sh diff --git a/Docker/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile similarity index 100% rename from Docker/Jenkins-Worker/Dockerfile rename to Docker/jenkins/Jenkins-Worker/Dockerfile diff --git a/Docker/Jenkins-Worker/README.md b/Docker/jenkins/Jenkins-Worker/README.md similarity index 100% rename from Docker/Jenkins-Worker/README.md rename to Docker/jenkins/Jenkins-Worker/README.md diff --git a/Docker/Jenkins-Worker/install-python3.8.sh b/Docker/jenkins/Jenkins-Worker/install-python3.8.sh similarity index 100% rename from Docker/Jenkins-Worker/install-python3.8.sh rename to Docker/jenkins/Jenkins-Worker/install-python3.8.sh diff --git a/Docker/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile similarity index 100% rename from Docker/Jenkins/Dockerfile rename to Docker/jenkins/Jenkins/Dockerfile diff --git a/Docker/Jenkins/README.md b/Docker/jenkins/Jenkins/README.md similarity index 100% rename from Docker/Jenkins/README.md rename to Docker/jenkins/Jenkins/README.md diff --git a/Docker/Jenkins/install-python3.8.sh b/Docker/jenkins/Jenkins/install-python3.8.sh similarity index 100% rename from Docker/Jenkins/install-python3.8.sh rename to Docker/jenkins/Jenkins/install-python3.8.sh diff --git a/Docker/Jenkins/jenkins.sh b/Docker/jenkins/Jenkins/jenkins.sh similarity index 100% rename from Docker/Jenkins/jenkins.sh rename to Docker/jenkins/Jenkins/jenkins.sh diff --git a/Docker/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile similarity index 100% rename from Docker/Jenkins2/Dockerfile rename to Docker/jenkins/Jenkins2/Dockerfile diff --git a/Docker/Jenkins2/README.md b/Docker/jenkins/Jenkins2/README.md similarity index 100% rename from Docker/Jenkins2/README.md rename to Docker/jenkins/Jenkins2/README.md diff --git a/Docker/Jenkins2/install-python3.8.sh b/Docker/jenkins/Jenkins2/install-python3.8.sh similarity index 100% rename from Docker/Jenkins2/install-python3.8.sh rename to Docker/jenkins/Jenkins2/install-python3.8.sh diff --git a/Docker/Jenkins2/jenkins-master-deployment.yaml b/Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml similarity index 100% rename from Docker/Jenkins2/jenkins-master-deployment.yaml rename to Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml diff --git a/Docker/Jenkins2/jenkins.values b/Docker/jenkins/Jenkins2/jenkins.values similarity index 100% rename from Docker/Jenkins2/jenkins.values rename to Docker/jenkins/Jenkins2/jenkins.values diff --git a/Docker/Jenkins2/jenkins2.sh b/Docker/jenkins/Jenkins2/jenkins2.sh similarity index 100% rename from Docker/Jenkins2/jenkins2.sh rename to Docker/jenkins/Jenkins2/jenkins2.sh diff --git a/Docker/Jenkins2/k8sjenkins-agent-master-policy.yaml b/Docker/jenkins/Jenkins2/k8sjenkins-agent-master-policy.yaml similarity index 100% rename from Docker/Jenkins2/k8sjenkins-agent-master-policy.yaml rename to Docker/jenkins/Jenkins2/k8sjenkins-agent-master-policy.yaml diff --git a/Docker/Jenkins2/k8sjenkins-agent-policy.yaml b/Docker/jenkins/Jenkins2/k8sjenkins-agent-policy.yaml similarity index 100% rename from Docker/Jenkins2/k8sjenkins-agent-policy.yaml rename to Docker/jenkins/Jenkins2/k8sjenkins-agent-policy.yaml From 644044fc7847334797ce4f8a081377e626d6a02f Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Fri, 18 Nov 2022 11:02:45 -0700 Subject: [PATCH 014/362] enabling USM for datadog (#2080) --- .secrets.baseline | 4 ++-- kube/services/datadog/values.yaml | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index ddc2050f3..e087e9243 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-10-31T23:55:07Z", + "generated_at": "2022-11-17T21:04:51Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -740,7 +740,7 @@ "hashed_secret": "52330dffa4d0795b4199a66428e54eca228e1661", "is_secret": false, "is_verified": false, - "line_number": 15, + "line_number": 20, "type": "Secret Keyword" } ], diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 47896e4f0..95ec57239 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -10,6 +10,11 @@ datadog: useHostPort: true nonLocalTraffic: true + #Enables Optional Universal Service Monitoring + ## ref: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=helm + serviceMonitoring: + enabled: true + # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. ## If set, this parameter takes precedence over "apiKey". apiKeyExistingSecret: "datadog-agent" From 6e4fe582a5c6c53a63bf6d699c26294a76ff12ad Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Fri, 18 Nov 2022 12:03:34 -0600 Subject: [PATCH 015/362] (feat): add opencost report job for argo -> s3 (#2077) * (feat): add opencost report job for argo -> s3 --- .../jobs/opencost-report-argo-job.yaml | 55 +++++++++++++++++++ kube/services/jobs/opencost-report-job.yaml | 36 ------------ 2 files changed, 55 insertions(+), 36 deletions(-) create mode 100644 kube/services/jobs/opencost-report-argo-job.yaml delete mode 100644 kube/services/jobs/opencost-report-job.yaml diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml new file mode 100644 index 000000000..9b8809cd7 --- /dev/null +++ b/kube/services/jobs/opencost-report-argo-job.yaml @@ -0,0 +1,55 @@ +# +# run with: +# gen3 job run opencost-report-argo \ +# BUCKET_NAME $GEN3_BUCKET_NAME \ +# OPENCOST_URL $OPENCOST_URL \ +# +# BUCKET_NAME(required) +# Name of the bucket to upload the generated reports to. +# Make sure that there is a service account called "reports-service-account" with access to this bucket. +# +# OPENCOST_URL(optional) +# URL to query OpenCost API's. Default is https://kubecost-cost-analyzer.kubecost +# +# +# Example +# gen3 job run opencost-report-argo BUCKET_NAME opencost-report-bucket +# +# Cronjob Example +# gen3 job cron opencost-report-argo @daily BUCKET_NAME opencost-report-bucket +apiVersion: batch/v1 +kind: Job +metadata: + name: opencost-report-argo +spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: reports-service-account + containers: + - name: send-report + GEN3_OPENCOST-REPORTER_IMAGE|-image: quay.io/cdis/proto-opencost-reporter:master-| + imagePullPolicy: Always + env: + - name: OPENCOST_URL + GEN3_OPENCOST_URL|-value: https://kubecost-cost-analyzer.kubecost-| + - name: ENV + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: BUCKET_NAME + GEN3_BUCKET_NAME|-value: ""-| + command: [ "/bin/bash" ] + args: + - "-c" + - | + proto-opencost-reporter GetAllocationReport \ + --from_days_before 9 \ + --to_days_before 1 \ + --aggregate_by label:gen3username \ + --filter_namespaces argo \ + --share_idle_by_node + restartPolicy: Never \ No newline at end of file diff --git a/kube/services/jobs/opencost-report-job.yaml b/kube/services/jobs/opencost-report-job.yaml deleted file mode 100644 index e74aa1084..000000000 --- a/kube/services/jobs/opencost-report-job.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: opencost-report - namespace: kubecost -spec: - template: - metadata: - labels: - app: gen3job - spec: - serviceAccountName: report-service-account - containers: - - name: send-report - image: quay.io/cdis/awshelper:master - imagePullPolicy: Always - env: - - name: gen3Env - valueFrom: - configMapKeyRef: - name: global - key: environment - - name: JENKINS_HOME - value: "devterm" - - name: GEN3_HOME - value: /home/ubuntu/cloud-automation - - name: bucketName - GEN3_BUCKET_NAME|-value: ""-| - command: [ "/bin/bash" ] - args: - - "-c" - - | - curl -k "https://kubecost-cost-analyzer.kubecost/model/allocation/summary?aggregate=label%3Agen3username&window=7d&accumulate=true&shareIdle=false&idleByNode=false&shareTenancyCosts=true&shareNamespaces=&shareLabels=&shareCost=NaN&shareSplit=weighted" | jq -r . > "report-$(date +"%m-%d-%y").json" - aws s3 cp ./report*.json s3://$bucketName - restartPolicy: Never From 2974cecd8b63e3398b6b32beb23290b061425ddd Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Fri, 18 Nov 2022 12:04:20 -0600 Subject: [PATCH 016/362] updated aws account list for ecr access (#2059) --- gen3/bin/ecr.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 5b41f8d2c..23254c5de 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -31,6 +31,7 @@ accountList=( 980870151884 205252583234 885078588865 +922467707295 ) principalStr="" From 07989eff6fcd3e25895cc79fe5f4121789d2760f Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Mon, 21 Nov 2022 07:42:15 -0800 Subject: [PATCH 017/362] clean up jenkins2 config and upgrade version (#2076) * clean up jenkins2 config and upgrade version * Update jenkins2.sh update jenkins url * fix quay repo names Co-authored-by: Ajo Augustine --- .../workflows/image_build_push_jenkins.yaml | 17 +- Docker/jenkins/Jenkins2/Dockerfile | 11 +- .../Jenkins2/jenkins-master-deployment.yaml | 355 ------------------ Docker/jenkins/Jenkins2/jenkins.values | 39 -- Docker/jenkins/Jenkins2/jenkins2.sh | 14 +- .../k8sjenkins-agent-master-policy.yaml | 18 - .../Jenkins2/k8sjenkins-agent-policy.yaml | 19 - 7 files changed, 32 insertions(+), 441 deletions(-) delete mode 100755 Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml delete mode 100644 Docker/jenkins/Jenkins2/jenkins.values delete mode 100644 Docker/jenkins/Jenkins2/k8sjenkins-agent-master-policy.yaml delete mode 100644 Docker/jenkins/Jenkins2/k8sjenkins-agent-policy.yaml diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml index 466fc1f68..d08ac737d 100644 --- a/.github/workflows/image_build_push_jenkins.yaml +++ b/.github/workflows/image_build_push_jenkins.yaml @@ -19,13 +19,26 @@ jobs: ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + jenkins2: + name: Jenkins2 Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins2/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins2" + OVERRIDE_REPO_NAME: "jenkins2" + USE_QUAY_ONLY: true + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} jenkins-ci-worker: name: Jenkins-CI-Worker Build and Push uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-CI-Worker/Dockerfile" DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-CI-Worker" - OVERRIDE_REPO_NAME: "jenkins-ci-worker" + OVERRIDE_REPO_NAME: "gen3-ci-worker" USE_QUAY_ONLY: true secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} @@ -38,7 +51,7 @@ jobs: with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-Worker/Dockerfile" DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-Worker" - OVERRIDE_REPO_NAME: "jenkins-qa-worker" + OVERRIDE_REPO_NAME: "gen3-qa-worker" USE_QUAY_ONLY: true secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} diff --git a/Docker/jenkins/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile index 9d07df981..59cb5672e 100644 --- a/Docker/jenkins/Jenkins2/Dockerfile +++ b/Docker/jenkins/Jenkins2/Dockerfile @@ -5,7 +5,7 @@ USER root ENV DEBIAN_FRONTEND=noninteractive # install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ @@ -25,7 +25,6 @@ RUN set -xe && apt-get update \ libbz2-dev \ libexpat1-dev \ liblzma-dev \ - python-virtualenv \ lua5.3 \ r-base \ software-properties-common \ @@ -60,7 +59,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_12.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - RUN apt-get update && apt-get install -y nodejs # add psql: https://www.postgresql.org/download/linux/debian/ @@ -86,9 +85,7 @@ RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade - -RUN curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python3 - +RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade # install chrome (supports headless mode) RUN set -xe \ @@ -117,7 +114,7 @@ RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ # add our custom start script COPY jenkins2.sh /opt/cdis/bin/jenkins2.sh RUN chmod -R a+rx /opt/cdis -ENTRYPOINT ["/sbin/tini", "--", "/opt/cdis/bin/jenkins2.sh"] +ENTRYPOINT ["/usr/bin/tini", "--", "/opt/cdis/bin/jenkins2.sh"] USER jenkins diff --git a/Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml b/Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml deleted file mode 100755 index 922711ad1..000000000 --- a/Docker/jenkins/Jenkins2/jenkins-master-deployment.yaml +++ /dev/null @@ -1,355 +0,0 @@ -{{- if .Capabilities.APIVersions.Has "apps/v1" }} -apiVersion: apps/v1 -{{- else }} -apiVersion: apps/v1 -{{- end }} -kind: Deployment -metadata: - name: {{ template "jenkins.fullname" . }} - namespace: {{ template "jenkins.namespace" . }} - labels: - "app.kubernetes.io/name": '{{ template "jenkins.name" .}}' - "helm.sh/chart": "{{ .Chart.Name }}-{{ .Chart.Version }}" - "app.kubernetes.io/managed-by": "{{ .Release.Service }}" - "app.kubernetes.io/instance": "{{ .Release.Name }}" - "app.kubernetes.io/component": "{{ .Values.master.componentName }}" - {{- range $key, $val := .Values.master.deploymentLabels }} - {{ $key }}: {{ $val | quote }} - {{- end}} -spec: - replicas: 1 - strategy: - type: {{ if .Values.persistence.enabled }}Recreate{{ else }}RollingUpdate - rollingUpdate: -{{ toYaml .Values.master.rollingUpdate | indent 6 }} - {{- end }} - selector: - matchLabels: - "app.kubernetes.io/component": "{{ .Values.master.componentName }}" - "app.kubernetes.io/instance": "{{ .Release.Name }}" - template: - metadata: - labels: - "app.kubernetes.io/name": '{{ template "jenkins.name" .}}' - "helm.sh/chart": "{{ .Chart.Name }}-{{ .Chart.Version }}" - "app.kubernetes.io/managed-by": "{{ .Release.Service }}" - "app.kubernetes.io/instance": "{{ .Release.Name }}" - "app.kubernetes.io/component": "{{ .Values.master.componentName }}" - {{- range $key, $val := .Values.master.podLabels }} - {{ $key }}: {{ $val | quote }} - {{- end}} - annotations: - {{- if .Values.master.podAnnotations }} -{{ toYaml .Values.master.podAnnotations | indent 8 }} - {{- end }} - spec: - {{- if .Values.master.nodeSelector }} - nodeSelector: -{{ toYaml .Values.master.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.master.tolerations }} - tolerations: -{{ toYaml .Values.master.tolerations | indent 8 }} - {{- end }} - {{- if .Values.master.affinity }} - affinity: -{{ toYaml .Values.master.affinity | indent 8 }} - {{- end }} - {{- if and (.Capabilities.APIVersions.Has "scheduling.k8s.io/v1beta1") (.Values.master.priorityClassName) }} - priorityClassName: {{ .Values.master.priorityClassName }} - {{- end }} -{{- if .Values.master.usePodSecurityContext }} - securityContext: - runAsUser: {{ default 0 .Values.master.runAsUser }} -{{- if and (.Values.master.runAsUser) (.Values.master.fsGroup) }} -{{- if not (eq .Values.master.runAsUser 0.0) }} - fsGroup: {{ .Values.master.fsGroup }} -{{- end }} -{{- end }} -{{- end }} - serviceAccountName: "{{ template "jenkins.serviceAccountName" . }}" -{{- if .Values.master.hostNetworking }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet -{{- end }} - {{- if .Values.master.hostAliases }} - hostAliases: - {{- toYaml .Values.master.hostAliases | nindent 8 }} - {{- end }} - initContainers: -{{- if .Values.master.customInitContainers }} -{{ tpl (toYaml .Values.master.customInitContainers) . | indent 8 }} -{{- end }} - - name: "copy-default-config" -{{- if .Values.master.imageTag }} - image: "{{ .Values.master.image }}:{{ .Values.master.imageTag }}" -{{- else }} - image: "{{ .Values.master.image }}:{{ .Values.master.tag }}" -{{- end }} - imagePullPolicy: "{{ .Values.master.imagePullPolicy }}" - command: ["sh", "/var/jenkins_config/apply_config.sh"] - env: - {{- if .Values.master.useSecurity }} - - name: ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "jenkins.fullname" . }} - key: jenkins-admin-password - - name: ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ template "jenkins.fullname" . }} - key: jenkins-admin-user - {{- end }} - {{- if .Values.master.initContainerEnv }} -{{ toYaml .Values.master.initContainerEnv | indent 12 }} - {{- end }} - resources: -{{ toYaml .Values.master.resources | indent 12 }} - volumeMounts: - - mountPath: /tmp - name: tmp - - mountPath: /var/jenkins_home - name: jenkins-home - {{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} - {{- end }} - - mountPath: /var/jenkins_config - name: jenkins-config - {{- if .Values.master.enableXmlConfig }} - {{- if .Values.master.credentialsXmlSecret }} - - mountPath: /var/jenkins_credentials - name: jenkins-credentials - readOnly: true - {{- end }} - {{- if .Values.master.jobs }} - - mountPath: /var/jenkins_jobs - name: jenkins-jobs - readOnly: true - {{- end }} - - mountPath: /usr/share/jenkins/ref/secrets/ - name: secrets-dir - {{- end }} - {{- if .Values.master.secretsFilesSecret }} - - mountPath: /var/jenkins_secrets - name: jenkins-secrets - readOnly: true - {{- end }} - - mountPath: /usr/share/jenkins/ref/plugins - name: plugins - - mountPath: /var/jenkins_plugins - name: plugin-dir - containers: - - name: jenkins -{{- if .Values.master.imageTag }} - image: "{{ .Values.master.image }}:{{ .Values.master.imageTag }}" -{{- else }} - image: "{{ .Values.master.image }}:{{ .Values.master.tag }}" -{{- end }} - imagePullPolicy: "{{ .Values.master.imagePullPolicy }}" - {{- if .Values.master.useSecurity }} - command: -{{ toYaml .Values.master.command | indent 10 }} - args: -{{ toYaml .Values.master.args | indent 10 }} - {{- end }} - {{- if .Values.master.lifecycle }} - lifecycle: -{{ toYaml .Values.master.lifecycle | indent 12 }} - {{- end }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name -{{ toYaml .Values.master.additionalEnv | indent 12 }} - - name: JAVA_OPTS - value: > - {{ default "" .Values.master.javaOpts }} - {{- if .Values.master.sidecars.configAutoReload.enabled }} -Dcasc.reload.token=$(POD_NAME) {{end}} - - name: JENKINS_OPTS - value: "{{ if .Values.master.jenkinsUriPrefix }}--prefix={{ .Values.master.jenkinsUriPrefix }} {{ end }}{{ default "" .Values.master.jenkinsOpts}}" - - name: JENKINS_SLAVE_AGENT_PORT - value: "{{ .Values.master.slaveListenerPort }}" - {{- if .Values.master.useSecurity }} - - name: ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "jenkins.fullname" . }} - key: jenkins-admin-password - - name: ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ template "jenkins.fullname" . }} - key: jenkins-admin-user - {{- end }} - {{- if .Values.master.containerEnv }} -{{ toYaml .Values.master.containerEnv | indent 12 }} - {{- end }} - {{- if .Values.master.JCasC.enabled }} - - name: CASC_JENKINS_CONFIG - value: {{ .Values.master.sidecars.configAutoReload.folder | default "/var/jenkins_home/casc_configs" | quote }} - {{- end }} - ports: - - containerPort: 8080 - name: http - - containerPort: {{ .Values.master.slaveListenerPort }} - name: slavelistener - {{- if .Values.master.slaveHostPort }} - hostPort: {{ .Values.master.slaveHostPort }} - {{- end }} - {{- if .Values.master.jmxPort }} - - containerPort: {{ .Values.master.jmxPort }} - name: jmx - {{- end }} -{{- range $index, $port := .Values.master.extraPorts }} - - containerPort: {{ $port.port }} - name: {{ $port.name }} -{{- end }} -{{- if .Values.master.healthProbes }} - livenessProbe: - httpGet: - path: "{{ default "" .Values.master.jenkinsUriPrefix }}/login" - port: http - initialDelaySeconds: {{ .Values.master.healthProbeLivenessInitialDelay }} - periodSeconds: {{ .Values.master.healthProbeLivenessPeriodSeconds }} - timeoutSeconds: {{ .Values.master.healthProbesLivenessTimeout }} - failureThreshold: {{ .Values.master.healthProbeLivenessFailureThreshold }} - readinessProbe: - httpGet: - path: "{{ default "" .Values.master.jenkinsUriPrefix }}/login" - port: http - initialDelaySeconds: {{ .Values.master.healthProbeReadinessInitialDelay }} - periodSeconds: {{ .Values.master.healthProbeReadinessPeriodSeconds }} - timeoutSeconds: {{ .Values.master.healthProbesReadinessTimeout }} - failureThreshold: {{ .Values.master.healthProbeReadinessFailureThreshold }} -{{- end }} - - resources: -{{ toYaml .Values.master.resources | indent 12 }} - volumeMounts: -{{- if .Values.persistence.mounts }} -{{ toYaml .Values.persistence.mounts | indent 12 }} -{{- end }} - - mountPath: /tmp - name: tmp - - mountPath: /var/jenkins_home - name: jenkins-home - readOnly: false - {{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} - {{- end }} - - mountPath: /var/jenkins_config - name: jenkins-config - readOnly: true - {{- if .Values.master.enableXmlConfig }} - {{- if .Values.master.credentialsXmlSecret }} - - mountPath: /var/jenkins_credentials - name: jenkins-credentials - readOnly: true - {{- end }} - {{- if .Values.master.jobs }} - - mountPath: /var/jenkins_jobs - name: jenkins-jobs - readOnly: true - {{- end }} - - mountPath: /usr/share/jenkins/ref/secrets/ - name: secrets-dir - readOnly: false - {{- end }} - {{- if or .Values.master.secretsFilesSecret }} - - mountPath: /var/jenkins_secrets - name: jenkins-secrets - readOnly: true - {{- end }} - - mountPath: /usr/share/jenkins/ref/plugins/ - name: plugin-dir - readOnly: false - {{- if and (.Values.master.JCasC.enabled) (.Values.master.sidecars.configAutoReload.enabled) }} - - name: sc-config-volume - mountPath: {{ .Values.master.sidecars.configAutoReload.folder | default "/var/jenkins_home/casc_configs" | quote }} - {{- end }} - -{{- if and (.Values.master.JCasC.enabled) (.Values.master.sidecars.configAutoReload.enabled) }} - - name: jenkins-sc-config - image: "{{ .Values.master.sidecars.configAutoReload.image }}" - imagePullPolicy: {{ .Values.master.sidecars.configAutoReload.imagePullPolicy }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: LABEL - value: "{{ template "jenkins.fullname" . }}-jenkins-config" - - name: FOLDER - value: "{{ .Values.master.sidecars.configAutoReload.folder }}" - - name: NAMESPACE - value: "{{ .Values.master.sidecars.configAutoReload.searchNamespace | default .Release.Namespace }}" - - name: REQ_URL - value: "http://localhost:8080/reload-configuration-as-code/?casc-reload-token=$(POD_NAME)" - - name: REQ_METHOD - value: "POST" - resources: -{{ toYaml .Values.master.sidecars.configAutoReload.resources | indent 12 }} - volumeMounts: - - name: sc-config-volume - mountPath: {{ .Values.master.sidecars.configAutoReload.folder | quote }} - - name: jenkins-home - mountPath: /var/jenkins_home - {{- if .Values.persistence.subPath }} - subPath: {{ .Values.persistence.subPath }} - {{- end }} -{{- end}} - - -{{- if .Values.master.sidecars.other}} -{{ tpl (toYaml .Values.master.sidecars.other | indent 8) .}} -{{- end }} - - volumes: -{{- if .Values.persistence.volumes }} -{{ tpl (toYaml .Values.persistence.volumes | indent 6) . }} -{{- end }} - - name: plugins - emptyDir: {} - - name: tmp - emptyDir: {} - - name: jenkins-config - configMap: - name: {{ template "jenkins.fullname" . }} - {{- if .Values.master.enableXmlConfig }} - {{- if .Values.master.credentialsXmlSecret }} - - name: jenkins-credentials - secret: - secretName: {{ .Values.master.credentialsXmlSecret }} - {{- end }} - {{- if .Values.master.jobs }} - - name: jenkins-jobs - configMap: - name: {{ template "jenkins.fullname" . }}-jobs - {{- end }} - - name: secrets-dir - emptyDir: {} - {{- end }} - {{- if .Values.master.secretsFilesSecret }} - - name: jenkins-secrets - secret: - secretName: {{ .Values.master.secretsFilesSecret }} - {{- end }} - - name: plugin-dir - emptyDir: {} - - name: jenkins-home - {{- if .Values.persistence.enabled }} - persistentVolumeClaim: - claimName: {{ .Values.persistence.existingClaim | default (include "jenkins.fullname" .) }} - {{- else }} - emptyDir: {} - {{- end -}} - {{- if .Values.master.JCasC.enabled }} - - name: sc-config-volume - emptyDir: {} - {{- end }} -{{- if .Values.master.imagePullSecretName }} - imagePullSecrets: - - name: {{ .Values.master.imagePullSecretName }} -{{- end -}} diff --git a/Docker/jenkins/Jenkins2/jenkins.values b/Docker/jenkins/Jenkins2/jenkins.values deleted file mode 100644 index 404b59b49..000000000 --- a/Docker/jenkins/Jenkins2/jenkins.values +++ /dev/null @@ -1,39 +0,0 @@ -master: - # Used for label app.kubernetes.io/component - componentName: "k8s-jenkins-master-deployment" - serviceType: NodePort - NodePort: 32323 - adminUser: "admin" - # adminPassword: "" - - image: "quay.io/cdis/k8s-jenkins-master" - tag: "latest" - - installPlugins: false - - podLabels: - app: jenkins - - additionalEnv: - - name: AWS_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: jenkins-secret - key: aws_access_key_id - - name: AWS_SECRET_ACCESS_KEY - valueFrom: - secretKeyRef: - name: jenkins-secret - key: aws_secret_access_key - - command: - - /sbin/tini - args: - - -- - - /opt/cdis/bin/jenkins2.sh - -rbac: - create: true -persistence: - size: "200Gi" - diff --git a/Docker/jenkins/Jenkins2/jenkins2.sh b/Docker/jenkins/Jenkins2/jenkins2.sh index c0fb0e4ea..fe4c53329 100644 --- a/Docker/jenkins/Jenkins2/jenkins2.sh +++ b/Docker/jenkins/Jenkins2/jenkins2.sh @@ -16,14 +16,25 @@ if [ -z "$JENKINS_S3_PATH" ]; then JENKINS_S3_PATH="s3://cdis-terraform-state/Jenkins2Backup" fi +# # Setup ~/.aws to support cloud-automation/gen3 +# terraform stuff wants a profile to query +# mkdir -p ~/.aws cat - > ~/.aws/config < ~/.aws/credentials < Date: Mon, 21 Nov 2022 10:52:06 -0800 Subject: [PATCH 018/362] Fix name of workflow (#2083) * Fix name of workflow --- .github/workflows/image_build_push_jenkins.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml index d08ac737d..ffea50ace 100644 --- a/.github/workflows/image_build_push_jenkins.yaml +++ b/.github/workflows/image_build_push_jenkins.yaml @@ -1,4 +1,4 @@ -name: Build Python Base Images and Push to Quay and ECR +name: Build Jenkins images and push to Quay on: push: From a985b2a1afe0a25e2e5ab25ba279403837ae2229 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 28 Nov 2022 08:40:42 -0600 Subject: [PATCH 019/362] Fix apiversion of cronjobs and fix cluster-autoscaler (#2082) --- gen3/bin/job.sh | 2 +- gen3/bin/kube-setup-networkpolicy.sh | 2 +- .../cluster-autoscaler-autodiscover.yaml | 14 ++++++++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/gen3/bin/job.sh b/gen3/bin/job.sh index 4a1c03542..09d305957 100644 --- a/gen3/bin/job.sh +++ b/gen3/bin/job.sh @@ -60,7 +60,7 @@ g3k_job2cronjson(){ local cronScript="$(cat - < Date: Tue, 29 Nov 2022 11:26:38 -0600 Subject: [PATCH 020/362] Add addtional label to opencost report (#2085) VADC-328 --- kube/services/jobs/opencost-report-argo-job.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml index 9b8809cd7..0f31eca40 100644 --- a/kube/services/jobs/opencost-report-argo-job.yaml +++ b/kube/services/jobs/opencost-report-argo-job.yaml @@ -49,7 +49,7 @@ spec: proto-opencost-reporter GetAllocationReport \ --from_days_before 9 \ --to_days_before 1 \ - --aggregate_by label:gen3username \ + --aggregate_by label:gen3username label:workflows.argoproj.io/workflow \ --filter_namespaces argo \ --share_idle_by_node - restartPolicy: Never \ No newline at end of file + restartPolicy: Never From 89a983210031d5a4f4db2e318033c7f2702017e1 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 29 Nov 2022 11:53:37 -0700 Subject: [PATCH 021/362] Hiding the revproxy-service-elb behind a flag in the manifest.json (#2079) * Hiding the revproxy-service-elb behind a flag in the manifest.json * updating the documentation to explain the shift from ELB to ALB and difference in WAF defployments. Co-authored-by: J. Q <55899496+jawadqur@users.noreply.github.com> --- .secrets.baseline | 6 +++--- doc/kube-setup-ingress.md | 4 +++- doc/kube-setup-revproxy.md | 3 ++- gen3/bin/kube-setup-revproxy.sh | 15 ++++++++++++++- kube/services/revproxy/README.md | 2 ++ 5 files changed, 24 insertions(+), 6 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index e087e9243..8d7d9afb8 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -426,21 +426,21 @@ "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", "is_secret": false, "is_verified": false, - "line_number": 32, + "line_number": 38, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", "is_secret": false, "is_verified": false, - "line_number": 49, + "line_number": 55, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", "is_secret": false, "is_verified": false, - "line_number": 51, + "line_number": 57, "type": "Secret Keyword" } ], diff --git a/doc/kube-setup-ingress.md b/doc/kube-setup-ingress.md index 15b2bd39e..bd4dff29c 100644 --- a/doc/kube-setup-ingress.md +++ b/doc/kube-setup-ingress.md @@ -2,7 +2,9 @@ Setup the aws-load-balancer-controller and an ALB. -This is a replacement for the revproxy-service-elb +This is a replacement for the revproxy-service-elb and WAF + +PLEASE NOTE: This script will now also deploy AWS WAF which will be associated with the ALB. This can be deployed by setting/adding the "waf_enabled" flag to true in the manifest-global configmap (set via the global section of the manifest.json). ## Overview diff --git a/doc/kube-setup-revproxy.md b/doc/kube-setup-revproxy.md index 365d0b129..5c483e12f 100644 --- a/doc/kube-setup-revproxy.md +++ b/doc/kube-setup-revproxy.md @@ -5,6 +5,7 @@ Configure and launch the reverse proxy. ## References * the reverse proxy [readme](../kube/services/revproxy/README.md) has more details. -* WAF - the reverse proxy deploys the [modsecurity web application firewall](./waf.md). +* WAF - the reverse proxy deploys the [modsecurity web application firewall](./waf.md). (This is only deployed if the "deploy_elb" flag is set to true in the manifest-global configmap (set/added via the global section of the manifest.json).deploy the revproxy-ELB-service and WAF) +* Please see https://github.com/uc-cdis/cloud-automation/blob/master/doc/kube-setup-ingress.md as AWS WAF and ALB is recommended. * [maintenance mode](./maintenance.md) * the [ip blacklist](../gen3/lib/manifestDefaults/revproxy/) may be configured with a custom `manifests/revproxy/blacklist.conf` diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 9e38fb908..0b6ee74d7 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -13,6 +13,12 @@ set -e source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" +gen3_load "gen3/lib/g3k_manifest" + +# Deploy ELB Service if flag set in manifest +manifestPath=$(g3k_manifest_path) +deployELB="$(jq -r ".[\"global\"][\"deploy_elb\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" + # # Setup indexd basic-auth gateway user creds enforced @@ -255,6 +261,9 @@ export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_a # revproxy deployment using http proxy protocol. # # port 81 == proxy-protocol listener - main service entry + +gen3_deploy_revproxy_elb() { +gen3_log_info "Deploying revproxy-service-elb..." export TARGET_PORT_HTTPS=81 # port 82 == proxy-protocol listener - redirects to https export TARGET_PORT_HTTP=82 @@ -280,6 +289,10 @@ else envsubst <$scriptDir/revproxy-service-elb.yaml gen3_log_info "DRY RUN" fi - +} # Don't automatically apply this right now #kubectl apply -f $scriptDir/revproxy-service.yaml + +if [ "$deployELB" = true ]; then + gen3_deploy_revproxy_elb +fi diff --git a/kube/services/revproxy/README.md b/kube/services/revproxy/README.md index 4cec90df6..8940687d4 100644 --- a/kube/services/revproxy/README.md +++ b/kube/services/revproxy/README.md @@ -10,6 +10,8 @@ as an AWS ELB that terminates HTTPS requests (using an AWS Certificate Manager s forwards http and https traffic to the revproxy deployment using http proxy protocol. +Update: The revproxy-service-elb and WAF is now only applied if you set/add the "waf_enabled" flag to true in the manifest-global configmap (set via the global section of the manifest.json). We now recommend using the ALB Ingress via the kube-setup-ingress script detailed here: https://github.com/uc-cdis/cloud-automation/blob/master/doc/kube-setup-ingress.md + - Create a cert in AWS Certificate Manager, and register it in the global config map. This will require the admin for the domain approve it through email - `gen3 kube-setup-revproxy` - deploys the service - creating an AWS ELB - update DNS to point at the ELB From b24407c42a50a9c2b02851a8a98acb155353ed76 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 30 Nov 2022 11:37:43 -0800 Subject: [PATCH 022/362] use node 14 for CI (#2087) --- Docker/jenkins/Jenkins-CI-Worker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index 08d047e52..afb1fca9f 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -72,7 +72,7 @@ RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && chmod a+rx /usr/local/bin/docker-compose # install nodejs -RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - +RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - RUN apt-get update && apt-get install -y nodejs # add psql: https://www.postgresql.org/download/linux/debian/ From 7b8072c446db41aed07d471abfed04ee0e3463db Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Wed, 30 Nov 2022 17:48:11 -0600 Subject: [PATCH 023/362] PXP-10268 Add 'fence-delete-expired-clients' job and cronjob (#2075) --- gen3/bin/kube-setup-fence.sh | 8 +++ .../fence-delete-expired-clients-job.yaml | 61 +++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 kube/services/jobs/fence-delete-expired-clients-job.yaml diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index 192000b8f..f69f80066 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -90,3 +90,11 @@ if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then gen3 job cron fence-visa-update "30 * * * *" fi fi + +# add cronjob for removing expired OIDC clients for required fence versions +if isServiceVersionGreaterOrEqual "fence" "6.2.0" "2023.01"; then + if ! g3kubectl get cronjob fence-delete-expired-clients >/dev/null 2>&1; then + echo "fence-delete-expired-clients being added as a cronjob b/c fence >= 6.2.0 or 2023.01" + gen3 job cron fence-delete-expired-clients "0 7 * * *" + fi +fi diff --git a/kube/services/jobs/fence-delete-expired-clients-job.yaml b/kube/services/jobs/fence-delete-expired-clients-job.yaml new file mode 100644 index 000000000..bac613404 --- /dev/null +++ b/kube/services/jobs/fence-delete-expired-clients-job.yaml @@ -0,0 +1,61 @@ +# Delete all expired Fence OIDC clients and optionally post about expired clients on Slack. +# To set up as a daily cronjob: `gen3 job cron fence-delete-expired-clients "0 7 * * *"` +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: fence-delete-expired-clients +spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: useryaml-job + volumes: + - name: yaml-merge + configMap: + name: "fence-yaml-merge" + - name: config-volume + secret: + secretName: "fence-config" + containers: + - name: fence + GEN3_FENCE_IMAGE + imagePullPolicy: Always + env: + - name: PYTHONPATH + value: /var/www/fence + - name: FENCE_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-fence + key: fence-config-public.yaml + optional: true + - name: slackWebHook + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + volumeMounts: + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config-secret.yaml" + subPath: fence-config.yaml + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + subPath: yaml_merge.py + command: ["/bin/bash"] + args: + - "-c" + - | + echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" + python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml + if [[ "$slackWebHook" =~ ^http ]]; then + fence-create client-delete-expired --slack-webhook $slackWebHook --warning-days 7 + else + fence-create client-delete-expired + fi + exit $? + restartPolicy: Never From aeb0b85eb492409e42bb9e3d8dcc22e26f94b419 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Mon, 5 Dec 2022 14:17:48 -0600 Subject: [PATCH 024/362] update datadog helm chart version (#2092) --- gen3/bin/kube-setup-datadog.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-datadog.sh b/gen3/bin/kube-setup-datadog.sh index 89f007b09..76019dff9 100644 --- a/gen3/bin/kube-setup-datadog.sh +++ b/gen3/bin/kube-setup-datadog.sh @@ -44,7 +44,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then fi helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 2.33.8 2> >(grep -v 'This is insecure' >&2) + helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.1.9 2> >(grep -v 'This is insecure' >&2) ) else gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy" From 54fe92826f489b13915552e517885ab1144f1902 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Mon, 5 Dec 2022 15:46:10 -0600 Subject: [PATCH 025/362] feat: add argo-wrapper, cohort-middleware and OHDSI tools to roll all (#2084) --- gen3/bin/kube-roll-all.sh | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 8b1abe88a..d93ac7600 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -314,6 +314,24 @@ else gen3_log_info "not deploying kayako-wrapper - no manifest entry for '.versions[\"kayako-wrapper\"]'" fi +if g3k_manifest_lookup '.versions["argo-wrapper"]' 2> /dev/null; then + gen3 kube-setup-argo-wrapper & +else + gen3_log_info "not deploying argo-wrapper - no manifest entry for '.versions[\"argo-wrapper\"]'" +fi + +if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then + gen3 roll cohort-middleware & +else + gen3_log_info "not deploying cohort-middleware - no manifest entry for '.versions[\"cohort-middleware\"]'" +fi + +if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then + gen3 kube-setup-ohdsi & +else + gen3_log_info "not deploying OHDSI tools - no manifest entry for '.versions[\"ohdsi-atlas\"]' and '.versions[\"ohdsi-webapi\"]'" +fi + gen3_log_info "enable network policy" gen3 kube-setup-networkpolicy "enable" || true & From 0052121ddf38943e2553de4e5b60c01f74d4fcc5 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 8 Dec 2022 13:14:30 -0700 Subject: [PATCH 026/362] Feat/gpe 627 (#2090) * adding a script to install and setup the ui for argocd in argocd-gpe-672 namespace. * adding an nginx conf to allow for argocd to be accessed on the same ALB. had to edit the base_url variable in the argo manifest to get the page to load properly * updating file name * needed to edit the kube-setup-revproxy script so it can add argocd as one of the services to grab an nginx configuration file for * changed the namespace to argocd and cleaned up the setup script to output the username and password. Also made some logic to check if argo is already deployed * fixing argocd security * updating typo * fixing the username output --- gen3/bin/kube-setup-argocd.sh | 18 + gen3/bin/kube-setup-revproxy.sh | 8 + kube/services/argocd/install.yaml | 11116 ++++++++++++++++ .../gen3.nginx.conf/argocd-server.conf | 20 + 4 files changed, 11162 insertions(+) create mode 100644 gen3/bin/kube-setup-argocd.sh create mode 100644 kube/services/argocd/install.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/argocd-server.conf diff --git a/gen3/bin/kube-setup-argocd.sh b/gen3/bin/kube-setup-argocd.sh new file mode 100644 index 000000000..a2eb44e00 --- /dev/null +++ b/gen3/bin/kube-setup-argocd.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Deploy the argocd +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +if g3kubectl get namespace argocd > /dev/null 2>&1; +then + gen3_log_info "ArgoCD is already deployed. Skipping..." +else + kubectl create namespace argocd + kubectl apply -f "${GEN3_HOME}/kube/services/argocd/install.yaml" -n argocd + gen3 kube-setup-revproxy + export argocdsecret=`kubectl get secret argocd-initial-admin-secret -n argocd -o json | jq .data.password -r | base64 -d` # pragma: allowlist secret + gen3_log_info "You can now access the ArgoCD endpoint with the following credentials: Username= admin and Password= $argocdsecret" +fi \ No newline at end of file diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 0b6ee74d7..02fcc5c38 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -122,6 +122,14 @@ then done fi +if g3kubectl get namespace argocd > /dev/null 2>&1; +then + filePath="$scriptDir/gen3.nginx.conf/argocd-server.conf" + if [[ -f "$filePath" ]]; then + confFileList+=("--from-file" "$filePath") + fi +fi + if [[ $current_namespace == "default" ]]; then if g3kubectl get namespace prometheus > /dev/null 2>&1; diff --git a/kube/services/argocd/install.yaml b/kube/services/argocd/install.yaml new file mode 100644 index 000000000..10f6477c7 --- /dev/null +++ b/kube/services/argocd/install.yaml @@ -0,0 +1,11116 @@ +# This is an auto-generated file. DO NOT EDIT +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: applications.argoproj.io + app.kubernetes.io/part-of: argocd + name: applications.argoproj.io +spec: + group: argoproj.io + names: + kind: Application + listKind: ApplicationList + plural: applications + shortNames: + - app + - apps + singular: application + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.sync.status + name: Sync Status + type: string + - jsonPath: .status.health.status + name: Health Status + type: string + - jsonPath: .status.sync.revision + name: Revision + priority: 10 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: Application is a definition of Application resource. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + operation: + description: Operation contains information about a requested or running + operation + properties: + info: + description: Info is a list of informational items for this operation + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + initiatedBy: + description: InitiatedBy contains information about who initiated + the operations + properties: + automated: + description: Automated is set to true if operation was initiated + automatically by the application controller. + type: boolean + username: + description: Username contains the name of a user who started + operation + type: string + type: object + retry: + description: Retry controls the strategy to apply if a sync fails + properties: + backoff: + description: Backoff controls how to backoff on subsequent retries + of failed syncs + properties: + duration: + description: Duration is the amount to back off. Default unit + is seconds, but could also be a duration (e.g. "2m", "1h") + type: string + factor: + description: Factor is a factor to multiply the base duration + after each failed retry + format: int64 + type: integer + maxDuration: + description: MaxDuration is the maximum amount of time allowed + for the backoff strategy + type: string + type: object + limit: + description: Limit is the maximum number of attempts for retrying + a failed sync. If set to 0, no retries will be performed. + format: int64 + type: integer + type: object + sync: + description: Sync contains parameters for the operation + properties: + dryRun: + description: DryRun specifies to perform a `kubectl apply --dry-run` + without actually performing the sync + type: boolean + manifests: + description: Manifests is an optional field that overrides sync + source with a local directory for development + items: + type: string + type: array + prune: + description: Prune specifies to delete resources from the cluster + that are no longer tracked in git + type: boolean + resources: + description: Resources describes which resources shall be part + of the sync + items: + description: SyncOperationResource contains resources to sync. + properties: + group: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + type: array + revision: + description: Revision is the revision (Git) or chart version (Helm) + which to sync the application to If omitted, will use the revision + specified in app spec. + type: string + source: + description: Source overrides the source definition set in the + application. This is typically set in a Rollback operation and + is nil during a Sync operation + properties: + chart: + description: Chart is a Helm chart name, and must be specified + for applications sourced from a Helm repo. + type: string + directory: + description: Directory holds path/directory specific options + properties: + exclude: + description: Exclude contains a glob pattern to match + paths against that should be explicitly excluded from + being used during manifest generation + type: string + include: + description: Include contains a glob pattern to match + paths against that should be explicitly included during + manifest generation + type: string + jsonnet: + description: Jsonnet holds options specific to Jsonnet + properties: + extVars: + description: ExtVars is a list of Jsonnet External + Variables + items: + description: JsonnetVar represents a variable to + be passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + description: Additional library search dirs + items: + type: string + type: array + tlas: + description: TLAS is a list of Jsonnet Top-level Arguments + items: + description: JsonnetVar represents a variable to + be passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + description: Recurse specifies whether to scan a directory + recursively for manifests + type: boolean + type: object + helm: + description: Helm holds helm specific options + properties: + fileParameters: + description: FileParameters are file parameters to the + helm template + items: + description: HelmFileParameter is a file parameter that's + passed to helm template during manifest generation + properties: + name: + description: Name is the name of the Helm parameter + type: string + path: + description: Path is the path to the file containing + the values for the Helm parameter + type: string + type: object + type: array + ignoreMissingValueFiles: + description: IgnoreMissingValueFiles prevents helm template + from failing when valueFiles do not exist locally by + not appending them to helm template --values + type: boolean + parameters: + description: Parameters is a list of Helm parameters which + are passed to the helm template command upon manifest + generation + items: + description: HelmParameter is a parameter that's passed + to helm template during manifest generation + properties: + forceString: + description: ForceString determines whether to tell + Helm to interpret booleans and numbers as strings + type: boolean + name: + description: Name is the name of the Helm parameter + type: string + value: + description: Value is the value for the Helm parameter + type: string + type: object + type: array + passCredentials: + description: PassCredentials pass credentials to all domains + (Helm's --pass-credentials) + type: boolean + releaseName: + description: ReleaseName is the Helm release name to use. + If omitted it will use the application name + type: string + skipCrds: + description: SkipCrds skips custom resource definition + installation step (Helm's --skip-crds) + type: boolean + valueFiles: + description: ValuesFiles is a list of Helm value files + to use when generating a template + items: + type: string + type: array + values: + description: Values specifies Helm values to be passed + to helm template, typically defined as a block + type: string + version: + description: Version is the Helm version to use for templating + ("3") + type: string + type: object + kustomize: + description: Kustomize holds kustomize specific options + properties: + commonAnnotations: + additionalProperties: + type: string + description: CommonAnnotations is a list of additional + annotations to add to rendered manifests + type: object + commonLabels: + additionalProperties: + type: string + description: CommonLabels is a list of additional labels + to add to rendered manifests + type: object + forceCommonAnnotations: + description: ForceCommonAnnotations specifies whether + to force applying common annotations to resources for + Kustomize apps + type: boolean + forceCommonLabels: + description: ForceCommonLabels specifies whether to force + applying common labels to resources for Kustomize apps + type: boolean + images: + description: Images is a list of Kustomize image override + specifications + items: + description: KustomizeImage represents a Kustomize image + definition in the format [old_image_name=]: + type: string + type: array + namePrefix: + description: NamePrefix is a prefix appended to resources + for Kustomize apps + type: string + nameSuffix: + description: NameSuffix is a suffix appended to resources + for Kustomize apps + type: string + version: + description: Version controls which version of Kustomize + to use for rendering manifests + type: string + type: object + path: + description: Path is a directory path within the Git repository, + and is only valid for applications sourced from Git. + type: string + plugin: + description: Plugin holds config management plugin specific + options + properties: + env: + description: Env is a list of environment variable entries + items: + description: EnvEntry represents an entry in the application's + environment + properties: + name: + description: Name is the name of the variable, usually + expressed in uppercase + type: string + value: + description: Value is the value of the variable + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + description: RepoURL is the URL to the repository (Git or + Helm) that contains the application manifests + type: string + targetRevision: + description: TargetRevision defines the revision of the source + to sync the application to. In case of Git, this can be + commit, tag, or branch. If omitted, will equal to HEAD. + In case of Helm, this is a semver tag for the Chart's version. + type: string + required: + - repoURL + type: object + syncOptions: + description: SyncOptions provide per-sync sync-options, e.g. Validate=false + items: + type: string + type: array + syncStrategy: + description: SyncStrategy describes how to perform the sync + properties: + apply: + description: Apply will perform a `kubectl apply` to perform + the sync. + properties: + force: + description: Force indicates whether or not to supply + the --force flag to `kubectl apply`. The --force flag + deletes and re-create the resource, when PATCH encounters + conflict and has retried for 5 times. + type: boolean + type: object + hook: + description: Hook will submit any referenced resources to + perform the sync. This is the default strategy + properties: + force: + description: Force indicates whether or not to supply + the --force flag to `kubectl apply`. The --force flag + deletes and re-create the resource, when PATCH encounters + conflict and has retried for 5 times. + type: boolean + type: object + type: object + type: object + type: object + spec: + description: ApplicationSpec represents desired application state. Contains + link to repository with application definition and additional parameters + link definition revision. + properties: + destination: + description: Destination is a reference to the target Kubernetes server + and namespace + properties: + name: + description: Name is an alternate way of specifying the target + cluster by its symbolic name + type: string + namespace: + description: Namespace specifies the target namespace for the + application's resources. The namespace will only be set for + namespace-scoped resources that have not set a value for .metadata.namespace + type: string + server: + description: Server specifies the URL of the target cluster and + must be set to the Kubernetes control plane API + type: string + type: object + ignoreDifferences: + description: IgnoreDifferences is a list of resources and their fields + which should be ignored during comparison + items: + description: ResourceIgnoreDifferences contains resource filter + and list of json paths which should be ignored during comparison + with live state. + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + description: ManagedFieldsManagers is a list of trusted managers. + Fields mutated by those managers will take precedence over + the desired state defined in the SCM and won't be displayed + in diffs + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + description: Info contains a list of information (URLs, email addresses, + and plain text) that relates to the application + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + description: Project is a reference to the project this application + belongs to. The empty string means that application belongs to the + 'default' project. + type: string + revisionHistoryLimit: + description: RevisionHistoryLimit limits the number of items kept + in the application's revision history, which is used for informational + purposes as well as for rollbacks to previous versions. This should + only be changed in exceptional circumstances. Setting to zero will + store no history. This will reduce storage used. Increasing will + increase the space used to store the history, so we do not recommend + increasing it. Default is 10. + format: int64 + type: integer + source: + description: Source is a reference to the location of the application's + manifests or chart + properties: + chart: + description: Chart is a Helm chart name, and must be specified + for applications sourced from a Helm repo. + type: string + directory: + description: Directory holds path/directory specific options + properties: + exclude: + description: Exclude contains a glob pattern to match paths + against that should be explicitly excluded from being used + during manifest generation + type: string + include: + description: Include contains a glob pattern to match paths + against that should be explicitly included during manifest + generation + type: string + jsonnet: + description: Jsonnet holds options specific to Jsonnet + properties: + extVars: + description: ExtVars is a list of Jsonnet External Variables + items: + description: JsonnetVar represents a variable to be + passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + description: Additional library search dirs + items: + type: string + type: array + tlas: + description: TLAS is a list of Jsonnet Top-level Arguments + items: + description: JsonnetVar represents a variable to be + passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + description: Recurse specifies whether to scan a directory + recursively for manifests + type: boolean + type: object + helm: + description: Helm holds helm specific options + properties: + fileParameters: + description: FileParameters are file parameters to the helm + template + items: + description: HelmFileParameter is a file parameter that's + passed to helm template during manifest generation + properties: + name: + description: Name is the name of the Helm parameter + type: string + path: + description: Path is the path to the file containing + the values for the Helm parameter + type: string + type: object + type: array + ignoreMissingValueFiles: + description: IgnoreMissingValueFiles prevents helm template + from failing when valueFiles do not exist locally by not + appending them to helm template --values + type: boolean + parameters: + description: Parameters is a list of Helm parameters which + are passed to the helm template command upon manifest generation + items: + description: HelmParameter is a parameter that's passed + to helm template during manifest generation + properties: + forceString: + description: ForceString determines whether to tell + Helm to interpret booleans and numbers as strings + type: boolean + name: + description: Name is the name of the Helm parameter + type: string + value: + description: Value is the value for the Helm parameter + type: string + type: object + type: array + passCredentials: + description: PassCredentials pass credentials to all domains + (Helm's --pass-credentials) + type: boolean + releaseName: + description: ReleaseName is the Helm release name to use. + If omitted it will use the application name + type: string + skipCrds: + description: SkipCrds skips custom resource definition installation + step (Helm's --skip-crds) + type: boolean + valueFiles: + description: ValuesFiles is a list of Helm value files to + use when generating a template + items: + type: string + type: array + values: + description: Values specifies Helm values to be passed to + helm template, typically defined as a block + type: string + version: + description: Version is the Helm version to use for templating + ("3") + type: string + type: object + kustomize: + description: Kustomize holds kustomize specific options + properties: + commonAnnotations: + additionalProperties: + type: string + description: CommonAnnotations is a list of additional annotations + to add to rendered manifests + type: object + commonLabels: + additionalProperties: + type: string + description: CommonLabels is a list of additional labels to + add to rendered manifests + type: object + forceCommonAnnotations: + description: ForceCommonAnnotations specifies whether to force + applying common annotations to resources for Kustomize apps + type: boolean + forceCommonLabels: + description: ForceCommonLabels specifies whether to force + applying common labels to resources for Kustomize apps + type: boolean + images: + description: Images is a list of Kustomize image override + specifications + items: + description: KustomizeImage represents a Kustomize image + definition in the format [old_image_name=]: + type: string + type: array + namePrefix: + description: NamePrefix is a prefix appended to resources + for Kustomize apps + type: string + nameSuffix: + description: NameSuffix is a suffix appended to resources + for Kustomize apps + type: string + version: + description: Version controls which version of Kustomize to + use for rendering manifests + type: string + type: object + path: + description: Path is a directory path within the Git repository, + and is only valid for applications sourced from Git. + type: string + plugin: + description: Plugin holds config management plugin specific options + properties: + env: + description: Env is a list of environment variable entries + items: + description: EnvEntry represents an entry in the application's + environment + properties: + name: + description: Name is the name of the variable, usually + expressed in uppercase + type: string + value: + description: Value is the value of the variable + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + description: RepoURL is the URL to the repository (Git or Helm) + that contains the application manifests + type: string + targetRevision: + description: TargetRevision defines the revision of the source + to sync the application to. In case of Git, this can be commit, + tag, or branch. If omitted, will equal to HEAD. In case of Helm, + this is a semver tag for the Chart's version. + type: string + required: + - repoURL + type: object + syncPolicy: + description: SyncPolicy controls when and how a sync will be performed + properties: + automated: + description: Automated will keep an application synced to the + target revision + properties: + allowEmpty: + description: 'AllowEmpty allows apps have zero live resources + (default: false)' + type: boolean + prune: + description: 'Prune specifies whether to delete resources + from the cluster that are not found in the sources anymore + as part of automated sync (default: false)' + type: boolean + selfHeal: + description: 'SelfHeal specifes whether to revert resources + back to their desired state upon modification in the cluster + (default: false)' + type: boolean + type: object + retry: + description: Retry controls failed sync retry behavior + properties: + backoff: + description: Backoff controls how to backoff on subsequent + retries of failed syncs + properties: + duration: + description: Duration is the amount to back off. Default + unit is seconds, but could also be a duration (e.g. + "2m", "1h") + type: string + factor: + description: Factor is a factor to multiply the base duration + after each failed retry + format: int64 + type: integer + maxDuration: + description: MaxDuration is the maximum amount of time + allowed for the backoff strategy + type: string + type: object + limit: + description: Limit is the maximum number of attempts for retrying + a failed sync. If set to 0, no retries will be performed. + format: int64 + type: integer + type: object + syncOptions: + description: Options allow you to specify whole app sync-options + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + status: + description: ApplicationStatus contains status information for the application + properties: + conditions: + description: Conditions is a list of currently observed application + conditions + items: + description: ApplicationCondition contains details about an application + condition, which is usally an error or warning + properties: + lastTransitionTime: + description: LastTransitionTime is the time the condition was + last observed + format: date-time + type: string + message: + description: Message contains human-readable message indicating + details about condition + type: string + type: + description: Type is an application condition type + type: string + required: + - message + - type + type: object + type: array + health: + description: Health contains information about the application's current + health status + properties: + message: + description: Message is a human-readable informational message + describing the health status + type: string + status: + description: Status holds the status code of the application or + resource + type: string + type: object + history: + description: History contains information about the application's + sync history + items: + description: RevisionHistory contains history information about + a previous sync + properties: + deployStartedAt: + description: DeployStartedAt holds the time the sync operation + started + format: date-time + type: string + deployedAt: + description: DeployedAt holds the time the sync operation completed + format: date-time + type: string + id: + description: ID is an auto incrementing identifier of the RevisionHistory + format: int64 + type: integer + revision: + description: Revision holds the revision the sync was performed + against + type: string + source: + description: Source is a reference to the application source + used for the sync operation + properties: + chart: + description: Chart is a Helm chart name, and must be specified + for applications sourced from a Helm repo. + type: string + directory: + description: Directory holds path/directory specific options + properties: + exclude: + description: Exclude contains a glob pattern to match + paths against that should be explicitly excluded from + being used during manifest generation + type: string + include: + description: Include contains a glob pattern to match + paths against that should be explicitly included during + manifest generation + type: string + jsonnet: + description: Jsonnet holds options specific to Jsonnet + properties: + extVars: + description: ExtVars is a list of Jsonnet External + Variables + items: + description: JsonnetVar represents a variable + to be passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + description: Additional library search dirs + items: + type: string + type: array + tlas: + description: TLAS is a list of Jsonnet Top-level + Arguments + items: + description: JsonnetVar represents a variable + to be passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + description: Recurse specifies whether to scan a directory + recursively for manifests + type: boolean + type: object + helm: + description: Helm holds helm specific options + properties: + fileParameters: + description: FileParameters are file parameters to the + helm template + items: + description: HelmFileParameter is a file parameter + that's passed to helm template during manifest generation + properties: + name: + description: Name is the name of the Helm parameter + type: string + path: + description: Path is the path to the file containing + the values for the Helm parameter + type: string + type: object + type: array + ignoreMissingValueFiles: + description: IgnoreMissingValueFiles prevents helm template + from failing when valueFiles do not exist locally + by not appending them to helm template --values + type: boolean + parameters: + description: Parameters is a list of Helm parameters + which are passed to the helm template command upon + manifest generation + items: + description: HelmParameter is a parameter that's passed + to helm template during manifest generation + properties: + forceString: + description: ForceString determines whether to + tell Helm to interpret booleans and numbers + as strings + type: boolean + name: + description: Name is the name of the Helm parameter + type: string + value: + description: Value is the value for the Helm parameter + type: string + type: object + type: array + passCredentials: + description: PassCredentials pass credentials to all + domains (Helm's --pass-credentials) + type: boolean + releaseName: + description: ReleaseName is the Helm release name to + use. If omitted it will use the application name + type: string + skipCrds: + description: SkipCrds skips custom resource definition + installation step (Helm's --skip-crds) + type: boolean + valueFiles: + description: ValuesFiles is a list of Helm value files + to use when generating a template + items: + type: string + type: array + values: + description: Values specifies Helm values to be passed + to helm template, typically defined as a block + type: string + version: + description: Version is the Helm version to use for + templating ("3") + type: string + type: object + kustomize: + description: Kustomize holds kustomize specific options + properties: + commonAnnotations: + additionalProperties: + type: string + description: CommonAnnotations is a list of additional + annotations to add to rendered manifests + type: object + commonLabels: + additionalProperties: + type: string + description: CommonLabels is a list of additional labels + to add to rendered manifests + type: object + forceCommonAnnotations: + description: ForceCommonAnnotations specifies whether + to force applying common annotations to resources + for Kustomize apps + type: boolean + forceCommonLabels: + description: ForceCommonLabels specifies whether to + force applying common labels to resources for Kustomize + apps + type: boolean + images: + description: Images is a list of Kustomize image override + specifications + items: + description: KustomizeImage represents a Kustomize + image definition in the format [old_image_name=]: + type: string + type: array + namePrefix: + description: NamePrefix is a prefix appended to resources + for Kustomize apps + type: string + nameSuffix: + description: NameSuffix is a suffix appended to resources + for Kustomize apps + type: string + version: + description: Version controls which version of Kustomize + to use for rendering manifests + type: string + type: object + path: + description: Path is a directory path within the Git repository, + and is only valid for applications sourced from Git. + type: string + plugin: + description: Plugin holds config management plugin specific + options + properties: + env: + description: Env is a list of environment variable entries + items: + description: EnvEntry represents an entry in the application's + environment + properties: + name: + description: Name is the name of the variable, + usually expressed in uppercase + type: string + value: + description: Value is the value of the variable + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + description: RepoURL is the URL to the repository (Git or + Helm) that contains the application manifests + type: string + targetRevision: + description: TargetRevision defines the revision of the + source to sync the application to. In case of Git, this + can be commit, tag, or branch. If omitted, will equal + to HEAD. In case of Helm, this is a semver tag for the + Chart's version. + type: string + required: + - repoURL + type: object + required: + - deployedAt + - id + - revision + type: object + type: array + observedAt: + description: 'ObservedAt indicates when the application state was + updated without querying latest git state Deprecated: controller + no longer updates ObservedAt field' + format: date-time + type: string + operationState: + description: OperationState contains information about any ongoing + operations, such as a sync + properties: + finishedAt: + description: FinishedAt contains time of operation completion + format: date-time + type: string + message: + description: Message holds any pertinent messages when attempting + to perform operation (typically errors). + type: string + operation: + description: Operation is the original requested operation + properties: + info: + description: Info is a list of informational items for this + operation + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + initiatedBy: + description: InitiatedBy contains information about who initiated + the operations + properties: + automated: + description: Automated is set to true if operation was + initiated automatically by the application controller. + type: boolean + username: + description: Username contains the name of a user who + started operation + type: string + type: object + retry: + description: Retry controls the strategy to apply if a sync + fails + properties: + backoff: + description: Backoff controls how to backoff on subsequent + retries of failed syncs + properties: + duration: + description: Duration is the amount to back off. Default + unit is seconds, but could also be a duration (e.g. + "2m", "1h") + type: string + factor: + description: Factor is a factor to multiply the base + duration after each failed retry + format: int64 + type: integer + maxDuration: + description: MaxDuration is the maximum amount of + time allowed for the backoff strategy + type: string + type: object + limit: + description: Limit is the maximum number of attempts for + retrying a failed sync. If set to 0, no retries will + be performed. + format: int64 + type: integer + type: object + sync: + description: Sync contains parameters for the operation + properties: + dryRun: + description: DryRun specifies to perform a `kubectl apply + --dry-run` without actually performing the sync + type: boolean + manifests: + description: Manifests is an optional field that overrides + sync source with a local directory for development + items: + type: string + type: array + prune: + description: Prune specifies to delete resources from + the cluster that are no longer tracked in git + type: boolean + resources: + description: Resources describes which resources shall + be part of the sync + items: + description: SyncOperationResource contains resources + to sync. + properties: + group: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + required: + - kind + - name + type: object + type: array + revision: + description: Revision is the revision (Git) or chart version + (Helm) which to sync the application to If omitted, + will use the revision specified in app spec. + type: string + source: + description: Source overrides the source definition set + in the application. This is typically set in a Rollback + operation and is nil during a Sync operation + properties: + chart: + description: Chart is a Helm chart name, and must + be specified for applications sourced from a Helm + repo. + type: string + directory: + description: Directory holds path/directory specific + options + properties: + exclude: + description: Exclude contains a glob pattern to + match paths against that should be explicitly + excluded from being used during manifest generation + type: string + include: + description: Include contains a glob pattern to + match paths against that should be explicitly + included during manifest generation + type: string + jsonnet: + description: Jsonnet holds options specific to + Jsonnet + properties: + extVars: + description: ExtVars is a list of Jsonnet + External Variables + items: + description: JsonnetVar represents a variable + to be passed to jsonnet during manifest + generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + description: Additional library search dirs + items: + type: string + type: array + tlas: + description: TLAS is a list of Jsonnet Top-level + Arguments + items: + description: JsonnetVar represents a variable + to be passed to jsonnet during manifest + generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + description: Recurse specifies whether to scan + a directory recursively for manifests + type: boolean + type: object + helm: + description: Helm holds helm specific options + properties: + fileParameters: + description: FileParameters are file parameters + to the helm template + items: + description: HelmFileParameter is a file parameter + that's passed to helm template during manifest + generation + properties: + name: + description: Name is the name of the Helm + parameter + type: string + path: + description: Path is the path to the file + containing the values for the Helm parameter + type: string + type: object + type: array + ignoreMissingValueFiles: + description: IgnoreMissingValueFiles prevents + helm template from failing when valueFiles do + not exist locally by not appending them to helm + template --values + type: boolean + parameters: + description: Parameters is a list of Helm parameters + which are passed to the helm template command + upon manifest generation + items: + description: HelmParameter is a parameter that's + passed to helm template during manifest generation + properties: + forceString: + description: ForceString determines whether + to tell Helm to interpret booleans and + numbers as strings + type: boolean + name: + description: Name is the name of the Helm + parameter + type: string + value: + description: Value is the value for the + Helm parameter + type: string + type: object + type: array + passCredentials: + description: PassCredentials pass credentials + to all domains (Helm's --pass-credentials) + type: boolean + releaseName: + description: ReleaseName is the Helm release name + to use. If omitted it will use the application + name + type: string + skipCrds: + description: SkipCrds skips custom resource definition + installation step (Helm's --skip-crds) + type: boolean + valueFiles: + description: ValuesFiles is a list of Helm value + files to use when generating a template + items: + type: string + type: array + values: + description: Values specifies Helm values to be + passed to helm template, typically defined as + a block + type: string + version: + description: Version is the Helm version to use + for templating ("3") + type: string + type: object + kustomize: + description: Kustomize holds kustomize specific options + properties: + commonAnnotations: + additionalProperties: + type: string + description: CommonAnnotations is a list of additional + annotations to add to rendered manifests + type: object + commonLabels: + additionalProperties: + type: string + description: CommonLabels is a list of additional + labels to add to rendered manifests + type: object + forceCommonAnnotations: + description: ForceCommonAnnotations specifies + whether to force applying common annotations + to resources for Kustomize apps + type: boolean + forceCommonLabels: + description: ForceCommonLabels specifies whether + to force applying common labels to resources + for Kustomize apps + type: boolean + images: + description: Images is a list of Kustomize image + override specifications + items: + description: KustomizeImage represents a Kustomize + image definition in the format [old_image_name=]: + type: string + type: array + namePrefix: + description: NamePrefix is a prefix appended to + resources for Kustomize apps + type: string + nameSuffix: + description: NameSuffix is a suffix appended to + resources for Kustomize apps + type: string + version: + description: Version controls which version of + Kustomize to use for rendering manifests + type: string + type: object + path: + description: Path is a directory path within the Git + repository, and is only valid for applications sourced + from Git. + type: string + plugin: + description: Plugin holds config management plugin + specific options + properties: + env: + description: Env is a list of environment variable + entries + items: + description: EnvEntry represents an entry in + the application's environment + properties: + name: + description: Name is the name of the variable, + usually expressed in uppercase + type: string + value: + description: Value is the value of the variable + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + description: RepoURL is the URL to the repository + (Git or Helm) that contains the application manifests + type: string + targetRevision: + description: TargetRevision defines the revision of + the source to sync the application to. In case of + Git, this can be commit, tag, or branch. If omitted, + will equal to HEAD. In case of Helm, this is a semver + tag for the Chart's version. + type: string + required: + - repoURL + type: object + syncOptions: + description: SyncOptions provide per-sync sync-options, + e.g. Validate=false + items: + type: string + type: array + syncStrategy: + description: SyncStrategy describes how to perform the + sync + properties: + apply: + description: Apply will perform a `kubectl apply` + to perform the sync. + properties: + force: + description: Force indicates whether or not to + supply the --force flag to `kubectl apply`. + The --force flag deletes and re-create the resource, + when PATCH encounters conflict and has retried + for 5 times. + type: boolean + type: object + hook: + description: Hook will submit any referenced resources + to perform the sync. This is the default strategy + properties: + force: + description: Force indicates whether or not to + supply the --force flag to `kubectl apply`. + The --force flag deletes and re-create the resource, + when PATCH encounters conflict and has retried + for 5 times. + type: boolean + type: object + type: object + type: object + type: object + phase: + description: Phase is the current phase of the operation + type: string + retryCount: + description: RetryCount contains time of operation retries + format: int64 + type: integer + startedAt: + description: StartedAt contains time of operation start + format: date-time + type: string + syncResult: + description: SyncResult is the result of a Sync operation + properties: + resources: + description: Resources contains a list of sync result items + for each individual resource in a sync operation + items: + description: ResourceResult holds the operation result details + of a specific resource + properties: + group: + description: Group specifies the API group of the resource + type: string + hookPhase: + description: HookPhase contains the state of any operation + associated with this resource OR hook This can also + contain values for non-hook resources. + type: string + hookType: + description: HookType specifies the type of the hook. + Empty for non-hook resources + type: string + kind: + description: Kind specifies the API kind of the resource + type: string + message: + description: Message contains an informational or error + message for the last sync OR operation + type: string + name: + description: Name specifies the name of the resource + type: string + namespace: + description: Namespace specifies the target namespace + of the resource + type: string + status: + description: Status holds the final result of the sync. + Will be empty if the resources is yet to be applied/pruned + and is always zero-value for hooks + type: string + syncPhase: + description: SyncPhase indicates the particular phase + of the sync that this result was acquired in + type: string + version: + description: Version specifies the API version of the + resource + type: string + required: + - group + - kind + - name + - namespace + - version + type: object + type: array + revision: + description: Revision holds the revision this sync operation + was performed to + type: string + source: + description: Source records the application source information + of the sync, used for comparing auto-sync + properties: + chart: + description: Chart is a Helm chart name, and must be specified + for applications sourced from a Helm repo. + type: string + directory: + description: Directory holds path/directory specific options + properties: + exclude: + description: Exclude contains a glob pattern to match + paths against that should be explicitly excluded + from being used during manifest generation + type: string + include: + description: Include contains a glob pattern to match + paths against that should be explicitly included + during manifest generation + type: string + jsonnet: + description: Jsonnet holds options specific to Jsonnet + properties: + extVars: + description: ExtVars is a list of Jsonnet External + Variables + items: + description: JsonnetVar represents a variable + to be passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + description: Additional library search dirs + items: + type: string + type: array + tlas: + description: TLAS is a list of Jsonnet Top-level + Arguments + items: + description: JsonnetVar represents a variable + to be passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + description: Recurse specifies whether to scan a directory + recursively for manifests + type: boolean + type: object + helm: + description: Helm holds helm specific options + properties: + fileParameters: + description: FileParameters are file parameters to + the helm template + items: + description: HelmFileParameter is a file parameter + that's passed to helm template during manifest + generation + properties: + name: + description: Name is the name of the Helm parameter + type: string + path: + description: Path is the path to the file containing + the values for the Helm parameter + type: string + type: object + type: array + ignoreMissingValueFiles: + description: IgnoreMissingValueFiles prevents helm + template from failing when valueFiles do not exist + locally by not appending them to helm template --values + type: boolean + parameters: + description: Parameters is a list of Helm parameters + which are passed to the helm template command upon + manifest generation + items: + description: HelmParameter is a parameter that's + passed to helm template during manifest generation + properties: + forceString: + description: ForceString determines whether + to tell Helm to interpret booleans and numbers + as strings + type: boolean + name: + description: Name is the name of the Helm parameter + type: string + value: + description: Value is the value for the Helm + parameter + type: string + type: object + type: array + passCredentials: + description: PassCredentials pass credentials to all + domains (Helm's --pass-credentials) + type: boolean + releaseName: + description: ReleaseName is the Helm release name + to use. If omitted it will use the application name + type: string + skipCrds: + description: SkipCrds skips custom resource definition + installation step (Helm's --skip-crds) + type: boolean + valueFiles: + description: ValuesFiles is a list of Helm value files + to use when generating a template + items: + type: string + type: array + values: + description: Values specifies Helm values to be passed + to helm template, typically defined as a block + type: string + version: + description: Version is the Helm version to use for + templating ("3") + type: string + type: object + kustomize: + description: Kustomize holds kustomize specific options + properties: + commonAnnotations: + additionalProperties: + type: string + description: CommonAnnotations is a list of additional + annotations to add to rendered manifests + type: object + commonLabels: + additionalProperties: + type: string + description: CommonLabels is a list of additional + labels to add to rendered manifests + type: object + forceCommonAnnotations: + description: ForceCommonAnnotations specifies whether + to force applying common annotations to resources + for Kustomize apps + type: boolean + forceCommonLabels: + description: ForceCommonLabels specifies whether to + force applying common labels to resources for Kustomize + apps + type: boolean + images: + description: Images is a list of Kustomize image override + specifications + items: + description: KustomizeImage represents a Kustomize + image definition in the format [old_image_name=]: + type: string + type: array + namePrefix: + description: NamePrefix is a prefix appended to resources + for Kustomize apps + type: string + nameSuffix: + description: NameSuffix is a suffix appended to resources + for Kustomize apps + type: string + version: + description: Version controls which version of Kustomize + to use for rendering manifests + type: string + type: object + path: + description: Path is a directory path within the Git repository, + and is only valid for applications sourced from Git. + type: string + plugin: + description: Plugin holds config management plugin specific + options + properties: + env: + description: Env is a list of environment variable + entries + items: + description: EnvEntry represents an entry in the + application's environment + properties: + name: + description: Name is the name of the variable, + usually expressed in uppercase + type: string + value: + description: Value is the value of the variable + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + description: RepoURL is the URL to the repository (Git + or Helm) that contains the application manifests + type: string + targetRevision: + description: TargetRevision defines the revision of the + source to sync the application to. In case of Git, this + can be commit, tag, or branch. If omitted, will equal + to HEAD. In case of Helm, this is a semver tag for the + Chart's version. + type: string + required: + - repoURL + type: object + required: + - revision + type: object + required: + - operation + - phase + - startedAt + type: object + reconciledAt: + description: ReconciledAt indicates when the application state was + reconciled using the latest git version + format: date-time + type: string + resourceHealthSource: + description: 'ResourceHealthSource indicates where the resource health + status is stored: inline if not set or appTree' + type: string + resources: + description: Resources is a list of Kubernetes resources managed by + this application + items: + description: 'ResourceStatus holds the current sync and health status + of a resource TODO: describe members of this type' + properties: + group: + type: string + health: + description: HealthStatus contains information about the currently + observed health state of an application or resource + properties: + message: + description: Message is a human-readable informational message + describing the health status + type: string + status: + description: Status holds the status code of the application + or resource + type: string + type: object + hook: + type: boolean + kind: + type: string + name: + type: string + namespace: + type: string + requiresPruning: + type: boolean + status: + description: SyncStatusCode is a type which represents possible + comparison results + type: string + syncWave: + format: int64 + type: integer + version: + type: string + type: object + type: array + sourceType: + description: SourceType specifies the type of this application + type: string + summary: + description: Summary contains a list of URLs and container images + used by this application + properties: + externalURLs: + description: ExternalURLs holds all external URLs of application + child resources. + items: + type: string + type: array + images: + description: Images holds all images of application child resources. + items: + type: string + type: array + type: object + sync: + description: Sync contains information about the application's current + sync status + properties: + comparedTo: + description: ComparedTo contains information about what has been + compared + properties: + destination: + description: Destination is a reference to the application's + destination used for comparison + properties: + name: + description: Name is an alternate way of specifying the + target cluster by its symbolic name + type: string + namespace: + description: Namespace specifies the target namespace + for the application's resources. The namespace will + only be set for namespace-scoped resources that have + not set a value for .metadata.namespace + type: string + server: + description: Server specifies the URL of the target cluster + and must be set to the Kubernetes control plane API + type: string + type: object + source: + description: Source is a reference to the application's source + used for comparison + properties: + chart: + description: Chart is a Helm chart name, and must be specified + for applications sourced from a Helm repo. + type: string + directory: + description: Directory holds path/directory specific options + properties: + exclude: + description: Exclude contains a glob pattern to match + paths against that should be explicitly excluded + from being used during manifest generation + type: string + include: + description: Include contains a glob pattern to match + paths against that should be explicitly included + during manifest generation + type: string + jsonnet: + description: Jsonnet holds options specific to Jsonnet + properties: + extVars: + description: ExtVars is a list of Jsonnet External + Variables + items: + description: JsonnetVar represents a variable + to be passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + description: Additional library search dirs + items: + type: string + type: array + tlas: + description: TLAS is a list of Jsonnet Top-level + Arguments + items: + description: JsonnetVar represents a variable + to be passed to jsonnet during manifest generation + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + description: Recurse specifies whether to scan a directory + recursively for manifests + type: boolean + type: object + helm: + description: Helm holds helm specific options + properties: + fileParameters: + description: FileParameters are file parameters to + the helm template + items: + description: HelmFileParameter is a file parameter + that's passed to helm template during manifest + generation + properties: + name: + description: Name is the name of the Helm parameter + type: string + path: + description: Path is the path to the file containing + the values for the Helm parameter + type: string + type: object + type: array + ignoreMissingValueFiles: + description: IgnoreMissingValueFiles prevents helm + template from failing when valueFiles do not exist + locally by not appending them to helm template --values + type: boolean + parameters: + description: Parameters is a list of Helm parameters + which are passed to the helm template command upon + manifest generation + items: + description: HelmParameter is a parameter that's + passed to helm template during manifest generation + properties: + forceString: + description: ForceString determines whether + to tell Helm to interpret booleans and numbers + as strings + type: boolean + name: + description: Name is the name of the Helm parameter + type: string + value: + description: Value is the value for the Helm + parameter + type: string + type: object + type: array + passCredentials: + description: PassCredentials pass credentials to all + domains (Helm's --pass-credentials) + type: boolean + releaseName: + description: ReleaseName is the Helm release name + to use. If omitted it will use the application name + type: string + skipCrds: + description: SkipCrds skips custom resource definition + installation step (Helm's --skip-crds) + type: boolean + valueFiles: + description: ValuesFiles is a list of Helm value files + to use when generating a template + items: + type: string + type: array + values: + description: Values specifies Helm values to be passed + to helm template, typically defined as a block + type: string + version: + description: Version is the Helm version to use for + templating ("3") + type: string + type: object + kustomize: + description: Kustomize holds kustomize specific options + properties: + commonAnnotations: + additionalProperties: + type: string + description: CommonAnnotations is a list of additional + annotations to add to rendered manifests + type: object + commonLabels: + additionalProperties: + type: string + description: CommonLabels is a list of additional + labels to add to rendered manifests + type: object + forceCommonAnnotations: + description: ForceCommonAnnotations specifies whether + to force applying common annotations to resources + for Kustomize apps + type: boolean + forceCommonLabels: + description: ForceCommonLabels specifies whether to + force applying common labels to resources for Kustomize + apps + type: boolean + images: + description: Images is a list of Kustomize image override + specifications + items: + description: KustomizeImage represents a Kustomize + image definition in the format [old_image_name=]: + type: string + type: array + namePrefix: + description: NamePrefix is a prefix appended to resources + for Kustomize apps + type: string + nameSuffix: + description: NameSuffix is a suffix appended to resources + for Kustomize apps + type: string + version: + description: Version controls which version of Kustomize + to use for rendering manifests + type: string + type: object + path: + description: Path is a directory path within the Git repository, + and is only valid for applications sourced from Git. + type: string + plugin: + description: Plugin holds config management plugin specific + options + properties: + env: + description: Env is a list of environment variable + entries + items: + description: EnvEntry represents an entry in the + application's environment + properties: + name: + description: Name is the name of the variable, + usually expressed in uppercase + type: string + value: + description: Value is the value of the variable + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + description: RepoURL is the URL to the repository (Git + or Helm) that contains the application manifests + type: string + targetRevision: + description: TargetRevision defines the revision of the + source to sync the application to. In case of Git, this + can be commit, tag, or branch. If omitted, will equal + to HEAD. In case of Helm, this is a semver tag for the + Chart's version. + type: string + required: + - repoURL + type: object + required: + - destination + - source + type: object + revision: + description: Revision contains information about the revision + the comparison has been performed to + type: string + status: + description: Status is the sync state of the comparison + type: string + required: + - status + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: applicationsets.argoproj.io + app.kubernetes.io/part-of: argocd + name: applicationsets.argoproj.io +spec: + group: argoproj.io + names: + kind: ApplicationSet + listKind: ApplicationSetList + plural: applicationsets + shortNames: + - appset + - appsets + singular: applicationset + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + generators: + items: + properties: + clusterDecisionResource: + properties: + configMapRef: + type: string + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + name: + type: string + requeueAfterSeconds: + format: int64 + type: integer + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + values: + additionalProperties: + type: string + type: object + required: + - configMapRef + type: object + clusters: + properties: + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + values: + additionalProperties: + type: string + type: object + type: object + git: + properties: + directories: + items: + properties: + exclude: + type: boolean + path: + type: string + required: + - path + type: object + type: array + files: + items: + properties: + path: + type: string + required: + - path + type: object + type: array + repoURL: + type: string + requeueAfterSeconds: + format: int64 + type: integer + revision: + type: string + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + required: + - repoURL + - revision + type: object + list: + properties: + elements: + items: + x-kubernetes-preserve-unknown-fields: true + type: array + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + required: + - elements + type: object + matrix: + properties: + generators: + items: + properties: + clusterDecisionResource: + properties: + configMapRef: + type: string + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + name: + type: string + requeueAfterSeconds: + format: int64 + type: integer + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + values: + additionalProperties: + type: string + type: object + required: + - configMapRef + type: object + clusters: + properties: + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + values: + additionalProperties: + type: string + type: object + type: object + git: + properties: + directories: + items: + properties: + exclude: + type: boolean + path: + type: string + required: + - path + type: object + type: array + files: + items: + properties: + path: + type: string + required: + - path + type: object + type: array + repoURL: + type: string + requeueAfterSeconds: + format: int64 + type: integer + revision: + type: string + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + required: + - repoURL + - revision + type: object + list: + properties: + elements: + items: + x-kubernetes-preserve-unknown-fields: true + type: array + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + required: + - elements + type: object + matrix: + x-kubernetes-preserve-unknown-fields: true + merge: + x-kubernetes-preserve-unknown-fields: true + pullRequest: + properties: + bitbucketServer: + properties: + api: + type: string + basicAuth: + properties: + passwordRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + username: + type: string + required: + - passwordRef + - username + type: object + project: + type: string + repo: + type: string + required: + - api + - project + - repo + type: object + filters: + items: + properties: + branchMatch: + type: string + type: object + type: array + gitea: + properties: + api: + type: string + insecure: + type: boolean + owner: + type: string + repo: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - api + - owner + - repo + type: object + github: + properties: + api: + type: string + appSecretName: + type: string + labels: + items: + type: string + type: array + owner: + type: string + repo: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - owner + - repo + type: object + gitlab: + properties: + api: + type: string + labels: + items: + type: string + type: array + project: + type: string + pullRequestState: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - project + type: object + requeueAfterSeconds: + format: int64 + type: integer + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + type: object + scmProvider: + properties: + azureDevOps: + properties: + accessTokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + allBranches: + type: boolean + api: + type: string + organization: + type: string + teamProject: + type: string + required: + - accessTokenRef + - organization + - teamProject + type: object + bitbucket: + properties: + allBranches: + type: boolean + appPasswordRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + owner: + type: string + user: + type: string + required: + - appPasswordRef + - owner + - user + type: object + bitbucketServer: + properties: + allBranches: + type: boolean + api: + type: string + basicAuth: + properties: + passwordRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + username: + type: string + required: + - passwordRef + - username + type: object + project: + type: string + required: + - api + - project + type: object + cloneProtocol: + type: string + filters: + items: + properties: + branchMatch: + type: string + labelMatch: + type: string + pathsDoNotExist: + items: + type: string + type: array + pathsExist: + items: + type: string + type: array + repositoryMatch: + type: string + type: object + type: array + gitea: + properties: + allBranches: + type: boolean + api: + type: string + insecure: + type: boolean + owner: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - api + - owner + type: object + github: + properties: + allBranches: + type: boolean + api: + type: string + appSecretName: + type: string + organization: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - organization + type: object + gitlab: + properties: + allBranches: + type: boolean + api: + type: string + group: + type: string + includeSubgroups: + type: boolean + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - group + type: object + requeueAfterSeconds: + format: int64 + type: integer + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + type: object + type: array + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + required: + - generators + type: object + merge: + properties: + generators: + items: + properties: + clusterDecisionResource: + properties: + configMapRef: + type: string + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + name: + type: string + requeueAfterSeconds: + format: int64 + type: integer + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + values: + additionalProperties: + type: string + type: object + required: + - configMapRef + type: object + clusters: + properties: + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + values: + additionalProperties: + type: string + type: object + type: object + git: + properties: + directories: + items: + properties: + exclude: + type: boolean + path: + type: string + required: + - path + type: object + type: array + files: + items: + properties: + path: + type: string + required: + - path + type: object + type: array + repoURL: + type: string + requeueAfterSeconds: + format: int64 + type: integer + revision: + type: string + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + required: + - repoURL + - revision + type: object + list: + properties: + elements: + items: + x-kubernetes-preserve-unknown-fields: true + type: array + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + required: + - elements + type: object + matrix: + x-kubernetes-preserve-unknown-fields: true + merge: + x-kubernetes-preserve-unknown-fields: true + pullRequest: + properties: + bitbucketServer: + properties: + api: + type: string + basicAuth: + properties: + passwordRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + username: + type: string + required: + - passwordRef + - username + type: object + project: + type: string + repo: + type: string + required: + - api + - project + - repo + type: object + filters: + items: + properties: + branchMatch: + type: string + type: object + type: array + gitea: + properties: + api: + type: string + insecure: + type: boolean + owner: + type: string + repo: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - api + - owner + - repo + type: object + github: + properties: + api: + type: string + appSecretName: + type: string + labels: + items: + type: string + type: array + owner: + type: string + repo: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - owner + - repo + type: object + gitlab: + properties: + api: + type: string + labels: + items: + type: string + type: array + project: + type: string + pullRequestState: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - project + type: object + requeueAfterSeconds: + format: int64 + type: integer + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + type: object + scmProvider: + properties: + azureDevOps: + properties: + accessTokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + allBranches: + type: boolean + api: + type: string + organization: + type: string + teamProject: + type: string + required: + - accessTokenRef + - organization + - teamProject + type: object + bitbucket: + properties: + allBranches: + type: boolean + appPasswordRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + owner: + type: string + user: + type: string + required: + - appPasswordRef + - owner + - user + type: object + bitbucketServer: + properties: + allBranches: + type: boolean + api: + type: string + basicAuth: + properties: + passwordRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + username: + type: string + required: + - passwordRef + - username + type: object + project: + type: string + required: + - api + - project + type: object + cloneProtocol: + type: string + filters: + items: + properties: + branchMatch: + type: string + labelMatch: + type: string + pathsDoNotExist: + items: + type: string + type: array + pathsExist: + items: + type: string + type: array + repositoryMatch: + type: string + type: object + type: array + gitea: + properties: + allBranches: + type: boolean + api: + type: string + insecure: + type: boolean + owner: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - api + - owner + type: object + github: + properties: + allBranches: + type: boolean + api: + type: string + appSecretName: + type: string + organization: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - organization + type: object + gitlab: + properties: + allBranches: + type: boolean + api: + type: string + group: + type: string + includeSubgroups: + type: boolean + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - group + type: object + requeueAfterSeconds: + format: int64 + type: integer + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + type: object + type: array + mergeKeys: + items: + type: string + type: array + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + required: + - generators + - mergeKeys + type: object + pullRequest: + properties: + bitbucketServer: + properties: + api: + type: string + basicAuth: + properties: + passwordRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + username: + type: string + required: + - passwordRef + - username + type: object + project: + type: string + repo: + type: string + required: + - api + - project + - repo + type: object + filters: + items: + properties: + branchMatch: + type: string + type: object + type: array + gitea: + properties: + api: + type: string + insecure: + type: boolean + owner: + type: string + repo: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - api + - owner + - repo + type: object + github: + properties: + api: + type: string + appSecretName: + type: string + labels: + items: + type: string + type: array + owner: + type: string + repo: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - owner + - repo + type: object + gitlab: + properties: + api: + type: string + labels: + items: + type: string + type: array + project: + type: string + pullRequestState: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - project + type: object + requeueAfterSeconds: + format: int64 + type: integer + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + type: object + scmProvider: + properties: + azureDevOps: + properties: + accessTokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + allBranches: + type: boolean + api: + type: string + organization: + type: string + teamProject: + type: string + required: + - accessTokenRef + - organization + - teamProject + type: object + bitbucket: + properties: + allBranches: + type: boolean + appPasswordRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + owner: + type: string + user: + type: string + required: + - appPasswordRef + - owner + - user + type: object + bitbucketServer: + properties: + allBranches: + type: boolean + api: + type: string + basicAuth: + properties: + passwordRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + username: + type: string + required: + - passwordRef + - username + type: object + project: + type: string + required: + - api + - project + type: object + cloneProtocol: + type: string + filters: + items: + properties: + branchMatch: + type: string + labelMatch: + type: string + pathsDoNotExist: + items: + type: string + type: array + pathsExist: + items: + type: string + type: array + repositoryMatch: + type: string + type: object + type: array + gitea: + properties: + allBranches: + type: boolean + api: + type: string + insecure: + type: boolean + owner: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - api + - owner + type: object + github: + properties: + allBranches: + type: boolean + api: + type: string + appSecretName: + type: string + organization: + type: string + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - organization + type: object + gitlab: + properties: + allBranches: + type: boolean + api: + type: string + group: + type: string + includeSubgroups: + type: boolean + tokenRef: + properties: + key: + type: string + secretName: + type: string + required: + - key + - secretName + type: object + required: + - group + type: object + requeueAfterSeconds: + format: int64 + type: integer + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + type: object + type: array + goTemplate: + type: boolean + syncPolicy: + properties: + preserveResourcesOnDeletion: + type: boolean + type: object + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + properties: + destination: + properties: + name: + type: string + namespace: + type: string + server: + type: string + type: object + ignoreDifferences: + items: + properties: + group: + type: string + jqPathExpressions: + items: + type: string + type: array + jsonPointers: + items: + type: string + type: array + kind: + type: string + managedFieldsManagers: + items: + type: string + type: array + name: + type: string + namespace: + type: string + required: + - kind + type: object + type: array + info: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + project: + type: string + revisionHistoryLimit: + format: int64 + type: integer + source: + properties: + chart: + type: string + directory: + properties: + exclude: + type: string + include: + type: string + jsonnet: + properties: + extVars: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + libs: + items: + type: string + type: array + tlas: + items: + properties: + code: + type: boolean + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + type: object + recurse: + type: boolean + type: object + helm: + properties: + fileParameters: + items: + properties: + name: + type: string + path: + type: string + type: object + type: array + ignoreMissingValueFiles: + type: boolean + parameters: + items: + properties: + forceString: + type: boolean + name: + type: string + value: + type: string + type: object + type: array + passCredentials: + type: boolean + releaseName: + type: string + skipCrds: + type: boolean + valueFiles: + items: + type: string + type: array + values: + type: string + version: + type: string + type: object + kustomize: + properties: + commonAnnotations: + additionalProperties: + type: string + type: object + commonLabels: + additionalProperties: + type: string + type: object + forceCommonAnnotations: + type: boolean + forceCommonLabels: + type: boolean + images: + items: + type: string + type: array + namePrefix: + type: string + nameSuffix: + type: string + version: + type: string + type: object + path: + type: string + plugin: + properties: + env: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + name: + type: string + type: object + repoURL: + type: string + targetRevision: + type: string + required: + - repoURL + type: object + syncPolicy: + properties: + automated: + properties: + allowEmpty: + type: boolean + prune: + type: boolean + selfHeal: + type: boolean + type: object + retry: + properties: + backoff: + properties: + duration: + type: string + factor: + format: int64 + type: integer + maxDuration: + type: string + type: object + limit: + format: int64 + type: integer + type: object + syncOptions: + items: + type: string + type: array + type: object + required: + - destination + - project + - source + type: object + required: + - metadata + - spec + type: object + required: + - generators + - template + type: object + status: + properties: + conditions: + items: + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + required: + - message + - reason + - status + - type + type: object + type: array + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: appprojects.argoproj.io + app.kubernetes.io/part-of: argocd + name: appprojects.argoproj.io +spec: + group: argoproj.io + names: + kind: AppProject + listKind: AppProjectList + plural: appprojects + shortNames: + - appproj + - appprojs + singular: appproject + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: 'AppProject provides a logical grouping of applications, providing + controls for: * where the apps may deploy to (cluster whitelist) * what + may be deployed (repository whitelist, resource whitelist/blacklist) * who + can access these applications (roles, OIDC group claims bindings) * and + what they can do (RBAC policies) * automation access to these roles (JWT + tokens)' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AppProjectSpec is the specification of an AppProject + properties: + clusterResourceBlacklist: + description: ClusterResourceBlacklist contains list of blacklisted + cluster level resources + items: + description: GroupKind specifies a Group and a Kind, but does not + force a version. This is useful for identifying concepts during + lookup stages without having partially valid types + properties: + group: + type: string + kind: + type: string + required: + - group + - kind + type: object + type: array + clusterResourceWhitelist: + description: ClusterResourceWhitelist contains list of whitelisted + cluster level resources + items: + description: GroupKind specifies a Group and a Kind, but does not + force a version. This is useful for identifying concepts during + lookup stages without having partially valid types + properties: + group: + type: string + kind: + type: string + required: + - group + - kind + type: object + type: array + description: + description: Description contains optional project description + type: string + destinations: + description: Destinations contains list of destinations available + for deployment + items: + description: ApplicationDestination holds information about the + application's destination + properties: + name: + description: Name is an alternate way of specifying the target + cluster by its symbolic name + type: string + namespace: + description: Namespace specifies the target namespace for the + application's resources. The namespace will only be set for + namespace-scoped resources that have not set a value for .metadata.namespace + type: string + server: + description: Server specifies the URL of the target cluster + and must be set to the Kubernetes control plane API + type: string + type: object + type: array + namespaceResourceBlacklist: + description: NamespaceResourceBlacklist contains list of blacklisted + namespace level resources + items: + description: GroupKind specifies a Group and a Kind, but does not + force a version. This is useful for identifying concepts during + lookup stages without having partially valid types + properties: + group: + type: string + kind: + type: string + required: + - group + - kind + type: object + type: array + namespaceResourceWhitelist: + description: NamespaceResourceWhitelist contains list of whitelisted + namespace level resources + items: + description: GroupKind specifies a Group and a Kind, but does not + force a version. This is useful for identifying concepts during + lookup stages without having partially valid types + properties: + group: + type: string + kind: + type: string + required: + - group + - kind + type: object + type: array + orphanedResources: + description: OrphanedResources specifies if controller should monitor + orphaned resources of apps in this project + properties: + ignore: + description: Ignore contains a list of resources that are to be + excluded from orphaned resources monitoring + items: + description: OrphanedResourceKey is a reference to a resource + to be ignored from + properties: + group: + type: string + kind: + type: string + name: + type: string + type: object + type: array + warn: + description: Warn indicates if warning condition should be created + for apps which have orphaned resources + type: boolean + type: object + permitOnlyProjectScopedClusters: + description: PermitOnlyProjectScopedClusters determines whether destinations + can only reference clusters which are project-scoped + type: boolean + roles: + description: Roles are user defined RBAC roles associated with this + project + items: + description: ProjectRole represents a role that has access to a + project + properties: + description: + description: Description is a description of the role + type: string + groups: + description: Groups are a list of OIDC group claims bound to + this role + items: + type: string + type: array + jwtTokens: + description: JWTTokens are a list of generated JWT tokens bound + to this role + items: + description: JWTToken holds the issuedAt and expiresAt values + of a token + properties: + exp: + format: int64 + type: integer + iat: + format: int64 + type: integer + id: + type: string + required: + - iat + type: object + type: array + name: + description: Name is a name for this role + type: string + policies: + description: Policies Stores a list of casbin formatted strings + that define access policies for the role in the project + items: + type: string + type: array + required: + - name + type: object + type: array + signatureKeys: + description: SignatureKeys contains a list of PGP key IDs that commits + in Git must be signed with in order to be allowed for sync + items: + description: SignatureKey is the specification of a key required + to verify commit signatures with + properties: + keyID: + description: The ID of the key in hexadecimal notation + type: string + required: + - keyID + type: object + type: array + sourceNamespaces: + description: SourceNamespaces defines the namespaces application resources + are allowed to be created in + items: + type: string + type: array + sourceRepos: + description: SourceRepos contains list of repository URLs which can + be used for deployment + items: + type: string + type: array + syncWindows: + description: SyncWindows controls when syncs can be run for apps in + this project + items: + description: SyncWindow contains the kind, time, duration and attributes + that are used to assign the syncWindows to apps + properties: + applications: + description: Applications contains a list of applications that + the window will apply to + items: + type: string + type: array + clusters: + description: Clusters contains a list of clusters that the window + will apply to + items: + type: string + type: array + duration: + description: Duration is the amount of time the sync window + will be open + type: string + kind: + description: Kind defines if the window allows or blocks syncs + type: string + manualSync: + description: ManualSync enables manual syncs when they would + otherwise be blocked + type: boolean + namespaces: + description: Namespaces contains a list of namespaces that the + window will apply to + items: + type: string + type: array + schedule: + description: Schedule is the time the window will begin, specified + in cron format + type: string + timeZone: + description: TimeZone of the sync that will be applied to the + schedule + type: string + type: object + type: array + type: object + status: + description: AppProjectStatus contains status information for AppProject + CRs + properties: + jwtTokensByRole: + additionalProperties: + description: JWTTokens represents a list of JWT tokens + properties: + items: + items: + description: JWTToken holds the issuedAt and expiresAt values + of a token + properties: + exp: + format: int64 + type: integer + iat: + format: int64 + type: integer + id: + type: string + required: + - iat + type: object + type: array + type: object + description: JWTTokensByRole contains a list of JWT tokens issued + for a given role + type: object + type: object + required: + - metadata + - spec + type: object + served: true + storage: true +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: application-controller + app.kubernetes.io/name: argocd-application-controller + app.kubernetes.io/part-of: argocd + name: argocd-application-controller +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: argocd-applicationset-controller + app.kubernetes.io/part-of: argocd-applicationset + name: argocd-applicationset-controller +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: dex-server + app.kubernetes.io/name: argocd-dex-server + app.kubernetes.io/part-of: argocd + name: argocd-dex-server +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: notifications-controller + app.kubernetes.io/name: argocd-notifications-controller + app.kubernetes.io/part-of: argocd + name: argocd-notifications-controller +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: redis + app.kubernetes.io/name: argocd-redis + app.kubernetes.io/part-of: argocd + name: argocd-redis +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: repo-server + app.kubernetes.io/name: argocd-repo-server + app.kubernetes.io/part-of: argocd + name: argocd-repo-server +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: argocd-server + app.kubernetes.io/part-of: argocd + name: argocd-server +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: application-controller + app.kubernetes.io/name: argocd-application-controller + app.kubernetes.io/part-of: argocd + name: argocd-application-controller +rules: +- apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - argoproj.io + resources: + - applications + - appprojects + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: argocd-applicationset-controller + app.kubernetes.io/part-of: argocd-applicationset + name: argocd-applicationset-controller +rules: +- apiGroups: + - argoproj.io + resources: + - applications + - applicationsets + - applicationsets/finalizers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - argoproj.io + resources: + - appprojects + verbs: + - get +- apiGroups: + - argoproj.io + resources: + - applicationsets/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - events + verbs: + - create + - get + - list + - patch + - watch +- apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - get + - list + - watch +- apiGroups: + - apps + - extensions + resources: + - deployments + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: dex-server + app.kubernetes.io/name: argocd-dex-server + app.kubernetes.io/part-of: argocd + name: argocd-dex-server +rules: +- apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argocd-notifications-controller +rules: +- apiGroups: + - argoproj.io + resources: + - applications + - appprojects + verbs: + - get + - list + - watch + - update + - patch +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - argocd-notifications-cm + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resourceNames: + - argocd-notifications-secret + resources: + - secrets + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: argocd-server + app.kubernetes.io/part-of: argocd + name: argocd-server +rules: +- apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - argoproj.io + resources: + - applications + - appprojects + - applicationsets + verbs: + - create + - get + - list + - watch + - update + - delete + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - list +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: application-controller + app.kubernetes.io/name: argocd-application-controller + app.kubernetes.io/part-of: argocd + name: argocd-application-controller +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' +- nonResourceURLs: + - '*' + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: argocd-server + app.kubernetes.io/part-of: argocd + name: argocd-server +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - delete + - get + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - list +- apiGroups: + - "" + resources: + - pods + - pods/log + verbs: + - get +- apiGroups: + - argoproj.io + resources: + - applications + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: application-controller + app.kubernetes.io/name: argocd-application-controller + app.kubernetes.io/part-of: argocd + name: argocd-application-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argocd-application-controller +subjects: +- kind: ServiceAccount + name: argocd-application-controller +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: argocd-applicationset-controller + app.kubernetes.io/part-of: argocd-applicationset + name: argocd-applicationset-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argocd-applicationset-controller +subjects: +- kind: ServiceAccount + name: argocd-applicationset-controller +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: dex-server + app.kubernetes.io/name: argocd-dex-server + app.kubernetes.io/part-of: argocd + name: argocd-dex-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argocd-dex-server +subjects: +- kind: ServiceAccount + name: argocd-dex-server +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: argocd-notifications-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argocd-notifications-controller +subjects: +- kind: ServiceAccount + name: argocd-notifications-controller +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: redis + app.kubernetes.io/name: argocd-redis + app.kubernetes.io/part-of: argocd + name: argocd-redis +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argocd-redis +subjects: +- kind: ServiceAccount + name: argocd-redis +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: argocd-server + app.kubernetes.io/part-of: argocd + name: argocd-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argocd-server +subjects: +- kind: ServiceAccount + name: argocd-server +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: application-controller + app.kubernetes.io/name: argocd-application-controller + app.kubernetes.io/part-of: argocd + name: argocd-application-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argocd-application-controller +subjects: +- kind: ServiceAccount + name: argocd-application-controller + namespace: argocd +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: argocd-server + app.kubernetes.io/part-of: argocd + name: argocd-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argocd-server +subjects: +- kind: ServiceAccount + name: argocd-server + namespace: argocd +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/name: argocd-cm + app.kubernetes.io/part-of: argocd + name: argocd-cm +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/name: argocd-cmd-params-cm + app.kubernetes.io/part-of: argocd + name: argocd-cmd-params-cm +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/name: argocd-gpg-keys-cm + app.kubernetes.io/part-of: argocd + name: argocd-gpg-keys-cm +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: argocd-notifications-cm +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/name: argocd-rbac-cm + app.kubernetes.io/part-of: argocd + name: argocd-rbac-cm +--- +apiVersion: v1 +data: + ssh_known_hosts: |- + bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== + github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== + gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= + gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf + gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 + ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/name: argocd-ssh-known-hosts-cm + app.kubernetes.io/part-of: argocd + name: argocd-ssh-known-hosts-cm +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/name: argocd-tls-certs-cm + app.kubernetes.io/part-of: argocd + name: argocd-tls-certs-cm +--- +apiVersion: v1 +kind: Secret +metadata: + name: argocd-notifications-secret +type: Opaque +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/name: argocd-secret + app.kubernetes.io/part-of: argocd + name: argocd-secret +type: Opaque +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: argocd-applicationset-controller + app.kubernetes.io/part-of: argocd-applicationset + name: argocd-applicationset-controller +spec: + ports: + - name: webhook + port: 7000 + protocol: TCP + targetPort: webhook + - name: metrics + port: 8080 + protocol: TCP + targetPort: metrics + selector: + app.kubernetes.io/name: argocd-applicationset-controller +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: dex-server + app.kubernetes.io/name: argocd-dex-server + app.kubernetes.io/part-of: argocd + name: argocd-dex-server +spec: + ports: + - name: http + port: 5556 + protocol: TCP + targetPort: 5556 + - name: grpc + port: 5557 + protocol: TCP + targetPort: 5557 + - name: metrics + port: 5558 + protocol: TCP + targetPort: 5558 + selector: + app.kubernetes.io/name: argocd-dex-server +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: metrics + app.kubernetes.io/name: argocd-metrics + app.kubernetes.io/part-of: argocd + name: argocd-metrics +spec: + ports: + - name: metrics + port: 8082 + protocol: TCP + targetPort: 8082 + selector: + app.kubernetes.io/name: argocd-application-controller +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: argocd-notifications-controller-metrics + name: argocd-notifications-controller-metrics +spec: + ports: + - name: metrics + port: 9001 + protocol: TCP + targetPort: 9001 + selector: + app.kubernetes.io/name: argocd-notifications-controller +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: redis + app.kubernetes.io/name: argocd-redis + app.kubernetes.io/part-of: argocd + name: argocd-redis +spec: + ports: + - name: tcp-redis + port: 6379 + targetPort: 6379 + selector: + app.kubernetes.io/name: argocd-redis +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: repo-server + app.kubernetes.io/name: argocd-repo-server + app.kubernetes.io/part-of: argocd + name: argocd-repo-server +spec: + ports: + - name: server + port: 8081 + protocol: TCP + targetPort: 8081 + - name: metrics + port: 8084 + protocol: TCP + targetPort: 8084 + selector: + app.kubernetes.io/name: argocd-repo-server +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: argocd-server + app.kubernetes.io/part-of: argocd + name: argocd-server +spec: + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 8080 + - name: https + port: 443 + protocol: TCP + targetPort: 8080 + selector: + app.kubernetes.io/name: argocd-server +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: argocd-server-metrics + app.kubernetes.io/part-of: argocd + name: argocd-server-metrics +spec: + ports: + - name: metrics + port: 8083 + protocol: TCP + targetPort: 8083 + selector: + app.kubernetes.io/name: argocd-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/name: argocd-applicationset-controller + app.kubernetes.io/part-of: argocd-applicationset + name: argocd-applicationset-controller +spec: + selector: + matchLabels: + app.kubernetes.io/name: argocd-applicationset-controller + template: + metadata: + labels: + app.kubernetes.io/name: argocd-applicationset-controller + spec: + containers: + - command: + - entrypoint.sh + - argocd-applicationset-controller + env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: quay.io/argoproj/argocd:v2.5.3 + imagePullPolicy: Always + name: argocd-applicationset-controller + ports: + - containerPort: 7000 + name: webhook + - containerPort: 8080 + name: metrics + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /app/config/ssh + name: ssh-known-hosts + - mountPath: /app/config/tls + name: tls-certs + - mountPath: /app/config/gpg/source + name: gpg-keys + - mountPath: /app/config/gpg/keys + name: gpg-keyring + - mountPath: /tmp + name: tmp + serviceAccountName: argocd-applicationset-controller + volumes: + - configMap: + name: argocd-ssh-known-hosts-cm + name: ssh-known-hosts + - configMap: + name: argocd-tls-certs-cm + name: tls-certs + - configMap: + name: argocd-gpg-keys-cm + name: gpg-keys + - emptyDir: {} + name: gpg-keyring + - emptyDir: {} + name: tmp +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: dex-server + app.kubernetes.io/name: argocd-dex-server + app.kubernetes.io/part-of: argocd + name: argocd-dex-server +spec: + selector: + matchLabels: + app.kubernetes.io/name: argocd-dex-server + template: + metadata: + labels: + app.kubernetes.io/name: argocd-dex-server + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/part-of: argocd + topologyKey: kubernetes.io/hostname + weight: 5 + containers: + - command: + - /shared/argocd-dex + - rundex + env: + - name: ARGOCD_DEX_SERVER_DISABLE_TLS + valueFrom: + configMapKeyRef: + key: dexserver.disable.tls + name: argocd-cmd-params-cm + optional: true + image: ghcr.io/dexidp/dex:v2.35.3 + imagePullPolicy: Always + name: dex + ports: + - containerPort: 5556 + - containerPort: 5557 + - containerPort: 5558 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /shared + name: static-files + - mountPath: /tmp + name: dexconfig + - mountPath: /tls + name: argocd-dex-server-tls + initContainers: + - command: + - cp + - -n + - /usr/local/bin/argocd + - /shared/argocd-dex + image: quay.io/argoproj/argocd:v2.5.3 + imagePullPolicy: Always + name: copyutil + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /shared + name: static-files + - mountPath: /tmp + name: dexconfig + serviceAccountName: argocd-dex-server + volumes: + - emptyDir: {} + name: static-files + - emptyDir: {} + name: dexconfig + - name: argocd-dex-server-tls + secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + optional: true + secretName: argocd-dex-server-tls +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argocd-notifications-controller +spec: + selector: + matchLabels: + app.kubernetes.io/name: argocd-notifications-controller + strategy: + type: Recreate + template: + metadata: + labels: + app.kubernetes.io/name: argocd-notifications-controller + spec: + containers: + - command: + - argocd-notifications + image: quay.io/argoproj/argocd:v2.5.3 + imagePullPolicy: Always + livenessProbe: + tcpSocket: + port: 9001 + name: argocd-notifications-controller + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /app/config/tls + name: tls-certs + - mountPath: /app/config/reposerver/tls + name: argocd-repo-server-tls + workingDir: /app + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: argocd-notifications-controller + volumes: + - configMap: + name: argocd-tls-certs-cm + name: tls-certs + - name: argocd-repo-server-tls + secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + optional: true + secretName: argocd-repo-server-tls +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: redis + app.kubernetes.io/name: argocd-redis + app.kubernetes.io/part-of: argocd + name: argocd-redis +spec: + selector: + matchLabels: + app.kubernetes.io/name: argocd-redis + template: + metadata: + labels: + app.kubernetes.io/name: argocd-redis + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: argocd-redis + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/part-of: argocd + topologyKey: kubernetes.io/hostname + weight: 5 + containers: + - args: + - --save + - "" + - --appendonly + - "no" + image: redis:7.0.5-alpine + imagePullPolicy: Always + name: redis + ports: + - containerPort: 6379 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + securityContext: + runAsNonRoot: true + runAsUser: 999 + seccompProfile: + type: RuntimeDefault + serviceAccountName: argocd-redis +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: repo-server + app.kubernetes.io/name: argocd-repo-server + app.kubernetes.io/part-of: argocd + name: argocd-repo-server +spec: + selector: + matchLabels: + app.kubernetes.io/name: argocd-repo-server + template: + metadata: + labels: + app.kubernetes.io/name: argocd-repo-server + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: argocd-repo-server + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/part-of: argocd + topologyKey: kubernetes.io/hostname + weight: 5 + automountServiceAccountToken: false + containers: + - command: + - sh + - -c + - entrypoint.sh argocd-repo-server --redis argocd-redis:6379 + env: + - name: ARGOCD_RECONCILIATION_TIMEOUT + valueFrom: + configMapKeyRef: + key: timeout.reconciliation + name: argocd-cm + optional: true + - name: ARGOCD_REPO_SERVER_LOGFORMAT + valueFrom: + configMapKeyRef: + key: reposerver.log.format + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_LOGLEVEL + valueFrom: + configMapKeyRef: + key: reposerver.log.level + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_PARALLELISM_LIMIT + valueFrom: + configMapKeyRef: + key: reposerver.parallelism.limit + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_DISABLE_TLS + valueFrom: + configMapKeyRef: + key: reposerver.disable.tls + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_TLS_MIN_VERSION + valueFrom: + configMapKeyRef: + key: reposerver.tls.minversion + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_TLS_MAX_VERSION + valueFrom: + configMapKeyRef: + key: reposerver.tls.maxversion + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_TLS_CIPHERS + valueFrom: + configMapKeyRef: + key: reposerver.tls.ciphers + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_CACHE_EXPIRATION + valueFrom: + configMapKeyRef: + key: reposerver.repo.cache.expiration + name: argocd-cmd-params-cm + optional: true + - name: REDIS_SERVER + valueFrom: + configMapKeyRef: + key: redis.server + name: argocd-cmd-params-cm + optional: true + - name: REDIS_COMPRESSION + valueFrom: + configMapKeyRef: + key: redis.compression + name: argocd-cmd-params-cm + optional: true + - name: REDISDB + valueFrom: + configMapKeyRef: + key: redis.db + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_DEFAULT_CACHE_EXPIRATION + valueFrom: + configMapKeyRef: + key: reposerver.default.cache.expiration + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_OTLP_ADDRESS + valueFrom: + configMapKeyRef: + key: otlp.address + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE + valueFrom: + configMapKeyRef: + key: reposerver.max.combined.directory.manifests.size + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_PLUGIN_TAR_EXCLUSIONS + valueFrom: + configMapKeyRef: + key: reposerver.plugin.tar.exclusions + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_ALLOW_OUT_OF_BOUNDS_SYMLINKS + valueFrom: + configMapKeyRef: + key: reposerver.allow.oob.symlinks + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_TAR_SIZE + valueFrom: + configMapKeyRef: + key: reposerver.streamed.manifest.max.tar.size + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_EXTRACTED_SIZE + valueFrom: + configMapKeyRef: + key: reposerver.streamed.manifest.max.extracted.size + name: argocd-cmd-params-cm + optional: true + - name: HELM_CACHE_HOME + value: /helm-working-dir + - name: HELM_CONFIG_HOME + value: /helm-working-dir + - name: HELM_DATA_HOME + value: /helm-working-dir + image: quay.io/argoproj/argocd:v2.5.3 + imagePullPolicy: Always + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz?full=true + port: 8084 + initialDelaySeconds: 30 + periodSeconds: 5 + name: argocd-repo-server + ports: + - containerPort: 8081 + - containerPort: 8084 + readinessProbe: + httpGet: + path: /healthz + port: 8084 + initialDelaySeconds: 5 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /app/config/ssh + name: ssh-known-hosts + - mountPath: /app/config/tls + name: tls-certs + - mountPath: /app/config/gpg/source + name: gpg-keys + - mountPath: /app/config/gpg/keys + name: gpg-keyring + - mountPath: /app/config/reposerver/tls + name: argocd-repo-server-tls + - mountPath: /tmp + name: tmp + - mountPath: /helm-working-dir + name: helm-working-dir + - mountPath: /home/argocd/cmp-server/plugins + name: plugins + initContainers: + - command: + - cp + - -n + - /usr/local/bin/argocd + - /var/run/argocd/argocd-cmp-server + image: quay.io/argoproj/argocd:v2.5.3 + name: copyutil + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /var/run/argocd + name: var-files + serviceAccountName: argocd-repo-server + volumes: + - configMap: + name: argocd-ssh-known-hosts-cm + name: ssh-known-hosts + - configMap: + name: argocd-tls-certs-cm + name: tls-certs + - configMap: + name: argocd-gpg-keys-cm + name: gpg-keys + - emptyDir: {} + name: gpg-keyring + - emptyDir: {} + name: tmp + - emptyDir: {} + name: helm-working-dir + - name: argocd-repo-server-tls + secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + optional: true + secretName: argocd-repo-server-tls + - emptyDir: {} + name: var-files + - emptyDir: {} + name: plugins +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: server + app.kubernetes.io/name: argocd-server + app.kubernetes.io/part-of: argocd + name: argocd-server +spec: + selector: + matchLabels: + app.kubernetes.io/name: argocd-server + template: + metadata: + labels: + app.kubernetes.io/name: argocd-server + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: argocd-server + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/part-of: argocd + topologyKey: kubernetes.io/hostname + weight: 5 + containers: + - command: + - argocd-server + - --insecure + env: + - name: ARGOCD_SERVER_INSECURE + valueFrom: + configMapKeyRef: + key: server.insecure + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_BASEHREF + value: "/argocd/" + - name: ARGOCD_SERVER_ROOTPATH + valueFrom: + configMapKeyRef: + key: server.rootpath + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_LOGFORMAT + valueFrom: + configMapKeyRef: + key: server.log.format + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_LOG_LEVEL + valueFrom: + configMapKeyRef: + key: server.log.level + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_REPO_SERVER + valueFrom: + configMapKeyRef: + key: repo.server + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_DEX_SERVER + valueFrom: + configMapKeyRef: + key: server.dex.server + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_DISABLE_AUTH + valueFrom: + configMapKeyRef: + key: server.disable.auth + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_ENABLE_GZIP + valueFrom: + configMapKeyRef: + key: server.enable.gzip + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS + valueFrom: + configMapKeyRef: + key: server.repo.server.timeout.seconds + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_X_FRAME_OPTIONS + valueFrom: + configMapKeyRef: + key: server.x.frame.options + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_CONTENT_SECURITY_POLICY + valueFrom: + configMapKeyRef: + key: server.content.security.policy + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_REPO_SERVER_PLAINTEXT + valueFrom: + configMapKeyRef: + key: server.repo.server.plaintext + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_REPO_SERVER_STRICT_TLS + valueFrom: + configMapKeyRef: + key: server.repo.server.strict.tls + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_DEX_SERVER_PLAINTEXT + valueFrom: + configMapKeyRef: + key: server.dex.server.plaintext + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_DEX_SERVER_STRICT_TLS + valueFrom: + configMapKeyRef: + key: server.dex.server.strict.tls + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_TLS_MIN_VERSION + valueFrom: + configMapKeyRef: + key: server.tls.minversion + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_TLS_MAX_VERSION + valueFrom: + configMapKeyRef: + key: server.tls.maxversion + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_TLS_CIPHERS + valueFrom: + configMapKeyRef: + key: server.tls.ciphers + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_CONNECTION_STATUS_CACHE_EXPIRATION + valueFrom: + configMapKeyRef: + key: server.connection.status.cache.expiration + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_OIDC_CACHE_EXPIRATION + valueFrom: + configMapKeyRef: + key: server.oidc.cache.expiration + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_LOGIN_ATTEMPTS_EXPIRATION + valueFrom: + configMapKeyRef: + key: server.login.attempts.expiration + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_STATIC_ASSETS + valueFrom: + configMapKeyRef: + key: server.staticassets + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APP_STATE_CACHE_EXPIRATION + valueFrom: + configMapKeyRef: + key: server.app.state.cache.expiration + name: argocd-cmd-params-cm + optional: true + - name: REDIS_SERVER + valueFrom: + configMapKeyRef: + key: redis.server + name: argocd-cmd-params-cm + optional: true + - name: REDIS_COMPRESSION + valueFrom: + configMapKeyRef: + key: redis.compression + name: argocd-cmd-params-cm + optional: true + - name: REDISDB + valueFrom: + configMapKeyRef: + key: redis.db + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_DEFAULT_CACHE_EXPIRATION + valueFrom: + configMapKeyRef: + key: server.default.cache.expiration + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_MAX_COOKIE_NUMBER + valueFrom: + configMapKeyRef: + key: server.http.cookie.maxnumber + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_SERVER_OTLP_ADDRESS + valueFrom: + configMapKeyRef: + key: otlp.address + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_NAMESPACES + valueFrom: + configMapKeyRef: + key: application.namespaces + name: argocd-cmd-params-cm + optional: true + image: quay.io/argoproj/argocd:v2.5.3 + imagePullPolicy: Always + livenessProbe: + httpGet: + path: /healthz?full=true + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 30 + name: argocd-server + ports: + - containerPort: 8080 + - containerPort: 8083 + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 3 + periodSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /app/config/ssh + name: ssh-known-hosts + - mountPath: /app/config/tls + name: tls-certs + - mountPath: /app/config/server/tls + name: argocd-repo-server-tls + - mountPath: /app/config/dex/tls + name: argocd-dex-server-tls + - mountPath: /home/argocd + name: plugins-home + - mountPath: /tmp + name: tmp + serviceAccountName: argocd-server + volumes: + - emptyDir: {} + name: plugins-home + - emptyDir: {} + name: tmp + - configMap: + name: argocd-ssh-known-hosts-cm + name: ssh-known-hosts + - configMap: + name: argocd-tls-certs-cm + name: tls-certs + - name: argocd-repo-server-tls + secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + optional: true + secretName: argocd-repo-server-tls + - name: argocd-dex-server-tls + secret: + items: + - key: tls.crt + path: tls.crt + - key: ca.crt + path: ca.crt + optional: true + secretName: argocd-dex-server-tls +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app.kubernetes.io/component: application-controller + app.kubernetes.io/name: argocd-application-controller + app.kubernetes.io/part-of: argocd + name: argocd-application-controller +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: argocd-application-controller + serviceName: argocd-application-controller + template: + metadata: + labels: + app.kubernetes.io/name: argocd-application-controller + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/name: argocd-application-controller + topologyKey: kubernetes.io/hostname + weight: 100 + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/part-of: argocd + topologyKey: kubernetes.io/hostname + weight: 5 + containers: + - command: + - argocd-application-controller + env: + - name: ARGOCD_CONTROLLER_REPLICAS + value: "1" + - name: ARGOCD_RECONCILIATION_TIMEOUT + valueFrom: + configMapKeyRef: + key: timeout.reconciliation + name: argocd-cm + optional: true + - name: ARGOCD_HARD_RECONCILIATION_TIMEOUT + valueFrom: + configMapKeyRef: + key: timeout.hard.reconciliation + name: argocd-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER + valueFrom: + configMapKeyRef: + key: repo.server + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS + valueFrom: + configMapKeyRef: + key: controller.repo.server.timeout.seconds + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_STATUS_PROCESSORS + valueFrom: + configMapKeyRef: + key: controller.status.processors + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_OPERATION_PROCESSORS + valueFrom: + configMapKeyRef: + key: controller.operation.processors + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_LOGFORMAT + valueFrom: + configMapKeyRef: + key: controller.log.format + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_LOGLEVEL + valueFrom: + configMapKeyRef: + key: controller.log.level + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION + valueFrom: + configMapKeyRef: + key: controller.metrics.cache.expiration + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS + valueFrom: + configMapKeyRef: + key: controller.self.heal.timeout.seconds + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT + valueFrom: + configMapKeyRef: + key: controller.repo.server.plaintext + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS + valueFrom: + configMapKeyRef: + key: controller.repo.server.strict.tls + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH + valueFrom: + configMapKeyRef: + key: controller.resource.health.persist + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APP_STATE_CACHE_EXPIRATION + valueFrom: + configMapKeyRef: + key: controller.app.state.cache.expiration + name: argocd-cmd-params-cm + optional: true + - name: REDIS_SERVER + valueFrom: + configMapKeyRef: + key: redis.server + name: argocd-cmd-params-cm + optional: true + - name: REDIS_COMPRESSION + valueFrom: + configMapKeyRef: + key: redis.compression + name: argocd-cmd-params-cm + optional: true + - name: REDISDB + valueFrom: + configMapKeyRef: + key: redis.db + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_DEFAULT_CACHE_EXPIRATION + valueFrom: + configMapKeyRef: + key: controller.default.cache.expiration + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_CONTROLLER_OTLP_ADDRESS + valueFrom: + configMapKeyRef: + key: otlp.address + name: argocd-cmd-params-cm + optional: true + - name: ARGOCD_APPLICATION_NAMESPACES + valueFrom: + configMapKeyRef: + key: application.namespaces + name: argocd-cmd-params-cm + optional: true + image: quay.io/argoproj/argocd:v2.5.3 + imagePullPolicy: Always + name: argocd-application-controller + ports: + - containerPort: 8082 + readinessProbe: + httpGet: + path: /healthz + port: 8082 + initialDelaySeconds: 5 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /app/config/controller/tls + name: argocd-repo-server-tls + - mountPath: /home/argocd + name: argocd-home + workingDir: /home/argocd + serviceAccountName: argocd-application-controller + volumes: + - emptyDir: {} + name: argocd-home + - name: argocd-repo-server-tls + secret: + items: + - key: tls.crt + path: tls.crt + - key: tls.key + path: tls.key + - key: ca.crt + path: ca.crt + optional: true + secretName: argocd-repo-server-tls +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: argocd-application-controller-network-policy +spec: + ingress: + - from: + - namespaceSelector: {} + ports: + - port: 8082 + podSelector: + matchLabels: + app.kubernetes.io/name: argocd-application-controller + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: argocd-applicationset-controller-network-policy +spec: + ingress: + - from: + - namespaceSelector: {} + ports: + - port: 7000 + protocol: TCP + - port: 8080 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/name: argocd-applicationset-controller + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: argocd-dex-server-network-policy +spec: + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: argocd-server + ports: + - port: 5556 + protocol: TCP + - port: 5557 + protocol: TCP + - from: + - namespaceSelector: {} + ports: + - port: 5558 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/name: argocd-dex-server + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: argocd-notifications-controller-network-policy +spec: + ingress: + - from: + - namespaceSelector: {} + ports: + - port: 9001 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/name: argocd-notifications-controller + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: argocd-redis-network-policy +spec: + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: argocd-server + - podSelector: + matchLabels: + app.kubernetes.io/name: argocd-repo-server + - podSelector: + matchLabels: + app.kubernetes.io/name: argocd-application-controller + ports: + - port: 6379 + protocol: TCP + podSelector: + matchLabels: + app.kubernetes.io/name: argocd-redis + policyTypes: + - Ingress + - Egress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: argocd-repo-server-network-policy +spec: + ingress: + - from: + - podSelector: + matchLabels: + app.kubernetes.io/name: argocd-server + - podSelector: + matchLabels: + app.kubernetes.io/name: argocd-application-controller + - podSelector: + matchLabels: + app.kubernetes.io/name: argocd-notifications-controller + ports: + - port: 8081 + protocol: TCP + - from: + - namespaceSelector: {} + ports: + - port: 8084 + podSelector: + matchLabels: + app.kubernetes.io/name: argocd-repo-server + policyTypes: + - Ingress +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: argocd-server-network-policy +spec: + ingress: + - {} + podSelector: + matchLabels: + app.kubernetes.io/name: argocd-server + policyTypes: + - Ingress \ No newline at end of file diff --git a/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf b/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf new file mode 100644 index 000000000..cceefd3eb --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf @@ -0,0 +1,20 @@ + location /argocd { + error_page 403 @errorworkspace; + set $authz_resource "/argocd"; + set $authz_method "access"; + set $authz_service "argocd"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; + + set $proxy_service "argocd"; + # $upstream is written to the logs + set $upstream http://argocd-server.argocd.svc.cluster.local; + + rewrite ^/argocd/(.*) /$1 break; + + proxy_set_header Connection ''; + proxy_http_version 1.1; + chunked_transfer_encoding off; + + proxy_pass $upstream; + } \ No newline at end of file From fb505319b972c984cd94cafbd924e944c0b8508f Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 9 Dec 2022 07:00:27 -0600 Subject: [PATCH 027/362] chore(spot-daemonset-whitelist): whitelisted public ecr domain for spot instance daemonset image (#2098) Co-authored-by: Edward Malinowski --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 9955eff9c..c1238bea6 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -119,6 +119,7 @@ opportunityinsights.org orcid.org pgp.mit.edu ppa.launchpad.net +public.ecr.aws pubmirrors.dal.corespace.com reflector.westga.edu registry.npmjs.org From 9695faa71168d0479619403713b2587a97f6d585 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 9 Dec 2022 07:13:43 -0600 Subject: [PATCH 028/362] feat(spot-instances): Added some config for spot instance scheduling (#2095) * feat(spot-instances): Added some config for spot instance scheduling * feat(spot-instances): Added some config for spot instance scheduling * feat(spot-instances): Added some config for spot instance scheduling * feat(spot-instances): Added some config for spot instance scheduling * feat(spot-instances): Added some config for spot instance scheduling * feat(spot-instances): Added some config for spot instance scheduling Co-authored-by: Edward Malinowski --- .secrets.baseline | 4 ++-- Jenkinsfile | 9 +++++++++ .../access-backend/access-backend-deploy.yaml | 11 ++++++++++- .../ambassador-gen3/ambassador-gen3-deploy.yaml | 11 ++++++++++- kube/services/ambassador/ambassador-deploy.yaml | 11 ++++++++++- kube/services/ambtest/ambtest-deploy.yaml | 11 ++++++++++- kube/services/arborist/arborist-deploy-2.yaml | 11 ++++++++++- kube/services/arborist/arborist-deploy.yaml | 11 ++++++++++- kube/services/argo-wrapper/argo-wrapper-deploy.yaml | 11 ++++++++++- .../arranger-dashboard/arranger-dashboard-deploy.yaml | 11 ++++++++++- kube/services/arranger/arranger-deploy.yaml | 11 ++++++++++- kube/services/audit-service/audit-service-deploy.yaml | 11 ++++++++++- kube/services/auspice/auspice-deploy.yaml | 11 ++++++++++- kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml | 10 ++++++++++ kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml | 11 ++++++++++- kube/services/cogwheel/cogwheel-deploy.yaml | 10 ++++++++++ .../cohort-middleware/cohort-middleware-deploy.yaml | 11 ++++++++++- kube/services/dashboard/dashboard-deploy.yaml | 11 ++++++++++- kube/services/datasim/datasim-deploy.yaml | 11 ++++++++++- kube/services/dicom-server/dicom-server-deploy.yaml | 10 ++++++++++ kube/services/dicom-viewer/dicom-viewer-deploy.yaml | 10 ++++++++++ kube/services/fence/fence-canary-deploy.yaml | 11 ++++++++++- kube/services/fence/fence-deploy.yaml | 11 ++++++++++- kube/services/fenceshib/fenceshib-canary-deploy.yaml | 11 ++++++++++- kube/services/fenceshib/fenceshib-deploy.yaml | 11 ++++++++++- kube/services/fluentd/fluentd.yaml | 4 ++++ .../frontend-framework/frontend-framework-deploy.yaml | 11 ++++++++++- .../frontend-framework-root-deploy.yaml | 11 ++++++++++- kube/services/gdcapi/gdcapi-deploy.yaml | 10 ++++++++++ .../google-sa-validation-deploy.yaml | 10 ++++++++++ kube/services/guppy/guppy-deploy.yaml | 11 ++++++++++- kube/services/hatchery/hatchery-deploy.yaml | 11 ++++++++++- kube/services/indexd/indexd-canary-deploy.yaml | 11 ++++++++++- kube/services/indexd/indexd-deploy.yaml | 11 ++++++++++- kube/services/influxdb/influxdb-deployment.yaml | 10 ++++++++++ .../jenkins-ci-worker/jenkins-ci-worker-deploy.yaml | 9 +++++++++ .../jenkins-worker/jenkins-worker-deploy.yaml | 9 +++++++++ kube/services/jenkins/jenkins-deploy.yaml | 9 +++++++++ .../jenkins2-worker/jenkins2-worker-deploy.yaml | 9 +++++++++ kube/services/jenkins2/jenkins2-deploy.yaml | 9 +++++++++ .../jobs/arborist-rm-expired-access-cronjob.yaml | 10 ++++++++++ .../services/jobs/arborist-rm-expired-access-job.yaml | 10 ++++++++++ kube/services/jobs/arboristdb-create-job.yaml | 10 ++++++++++ kube/services/jobs/aws-bucket-replicate-job.yaml | 10 ++++++++++ kube/services/jobs/bucket-manifest-job.yaml | 10 ++++++++++ kube/services/jobs/bucket-replicate-job.yaml | 10 ++++++++++ kube/services/jobs/bucket-replication-job.yaml | 10 ++++++++++ kube/services/jobs/bucket-size-report-job.yaml | 10 ++++++++++ kube/services/jobs/cedar-ingestion-job.yaml | 10 ++++++++++ kube/services/jobs/client-modify-job.yaml | 10 ++++++++++ kube/services/jobs/cogwheel-register-client-job.yaml | 10 ++++++++++ kube/services/jobs/config-fence-job.yaml | 10 ++++++++++ kube/services/jobs/covid19-bayes-cronjob.yaml | 10 ++++++++++ kube/services/jobs/covid19-bayes-job.yaml | 10 ++++++++++ kube/services/jobs/covid19-etl-job.yaml | 10 ++++++++++ kube/services/jobs/covid19-notebook-etl-job.yaml | 10 ++++++++++ kube/services/jobs/data-ingestion-job.yaml | 10 ++++++++++ kube/services/jobs/distribute-licenses-job.yaml | 10 ++++++++++ kube/services/jobs/envtest-job.yaml | 10 ++++++++++ kube/services/jobs/es-garbage-job.yaml | 10 ++++++++++ kube/services/jobs/etl-cronjob.yaml | 10 ++++++++++ kube/services/jobs/etl-job.yaml | 10 ++++++++++ .../fence-cleanup-expired-ga4gh-info-cronjob.yaml | 10 ++++++++++ .../jobs/fence-cleanup-expired-ga4gh-info-job.yaml | 10 ++++++++++ kube/services/jobs/fence-db-migrate-job.yaml | 10 ++++++++++ .../jobs/fence-delete-expired-clients-job.yaml | 10 ++++++++++ kube/services/jobs/fence-visa-update-cronjob.yaml | 10 ++++++++++ kube/services/jobs/fence-visa-update-job.yaml | 10 ++++++++++ kube/services/jobs/fencedb-create-job.yaml | 10 ++++++++++ kube/services/jobs/fluentd-restart-job.yaml | 10 ++++++++++ kube/services/jobs/gdcdb-create-job.yaml | 10 ++++++++++ .../services/jobs/gen3qa-check-bucket-access-job.yaml | 10 ++++++++++ kube/services/jobs/gentestdata-job.yaml | 10 ++++++++++ kube/services/jobs/gitops-sync-job.yaml | 10 ++++++++++ kube/services/jobs/google-bucket-manifest-job.yaml | 10 ++++++++++ kube/services/jobs/google-bucket-replicate-job.yaml | 10 ++++++++++ kube/services/jobs/google-create-bucket-job.yaml | 10 ++++++++++ .../jobs/google-delete-expired-access-cronjob.yaml | 10 ++++++++++ .../jobs/google-delete-expired-access-job.yaml | 10 ++++++++++ ...google-delete-expired-service-account-cronjob.yaml | 10 ++++++++++ .../google-delete-expired-service-account-job.yaml | 10 ++++++++++ .../jobs/google-init-proxy-groups-cronjob.yaml | 10 ++++++++++ kube/services/jobs/google-init-proxy-groups-job.yaml | 10 ++++++++++ .../jobs/google-manage-account-access-cronjob.yaml | 10 ++++++++++ .../jobs/google-manage-account-access-job.yaml | 10 ++++++++++ kube/services/jobs/google-manage-keys-cronjob.yaml | 10 ++++++++++ kube/services/jobs/google-manage-keys-job.yaml | 10 ++++++++++ .../google-verify-bucket-access-group-cronjob.yaml | 10 ++++++++++ .../jobs/google-verify-bucket-access-group-job.yaml | 10 ++++++++++ kube/services/jobs/graph-create-job.yaml | 10 ++++++++++ kube/services/jobs/hatchery-metrics-job.yaml | 10 ++++++++++ kube/services/jobs/hatchery-reaper-job.yaml | 10 ++++++++++ kube/services/jobs/healthcheck-cronjob.yaml | 10 ++++++++++ kube/services/jobs/indexd-authz-job.yaml | 10 ++++++++++ kube/services/jobs/indexd-userdb-job.yaml | 10 ++++++++++ kube/services/jobs/metadata-aggregate-sync-job.yaml | 10 ++++++++++ kube/services/jobs/opencost-report-argo-job.yaml | 10 ++++++++++ kube/services/jobs/psql-fix-job.yaml | 10 ++++++++++ .../services/jobs/remove-objects-from-clouds-job.yaml | 10 ++++++++++ kube/services/jobs/replicate-validation-job.yaml | 10 ++++++++++ kube/services/jobs/s3sync-cronjob.yaml | 10 ++++++++++ kube/services/jobs/usersync-job.yaml | 10 ++++++++++ kube/services/jobs/useryaml-job.yaml | 10 ++++++++++ kube/services/jupyterhub/jupyterhub-deploy.yaml | 10 ++++++++++ .../kayako-wrapper/kayako-wrapper-deploy.yaml | 11 ++++++++++- .../manifestservice/manifestservice-deploy.yaml | 11 ++++++++++- kube/services/mariner/mariner-deploy.yaml | 11 ++++++++++- kube/services/metadata/metadata-deploy.yaml | 11 ++++++++++- kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml | 11 ++++++++++- kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml | 11 ++++++++++- kube/services/peregrine/peregrine-canary-deploy.yaml | 11 ++++++++++- kube/services/peregrine/peregrine-deploy.yaml | 11 ++++++++++- kube/services/pidgin/pidgin-deploy.yaml | 11 ++++++++++- kube/services/portal/portal-deploy.yaml | 11 ++++++++++- kube/services/portal/portal-root-deploy.yaml | 11 ++++++++++- .../presigned-url-fence-deploy.yaml | 11 ++++++++++- .../qa-dashboard/qa-dashboard-deployment.yaml | 10 ++++++++++ kube/services/qabot/qabot-deploy.yaml | 10 ++++++++++ kube/services/requestor/requestor-deploy.yaml | 11 ++++++++++- kube/services/revproxy/revproxy-deploy.yaml | 11 ++++++++++- kube/services/selenium/selenium-hub-deployment.yaml | 10 ++++++++++ .../selenium/selenium-node-chrome-deployment.yaml | 10 ++++++++++ kube/services/sftp/sftp-deploy.yaml | 10 ++++++++++ kube/services/sheepdog/sheepdog-canary-deploy.yaml | 11 ++++++++++- kube/services/sheepdog/sheepdog-deploy.yaml | 11 ++++++++++- kube/services/shiny/shiny-deploy.yaml | 10 ++++++++++ kube/services/sower/sower-deploy.yaml | 11 ++++++++++- kube/services/spark/spark-deploy.yaml | 11 ++++++++++- kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml | 11 ++++++++++- .../statsd-exporter/statsd-exporter-deploy.yaml | 10 ++++++++++ kube/services/status-api/status-api-deploy.yaml | 10 ++++++++++ kube/services/superset/superset-deploy.yaml | 10 ++++++++++ kube/services/thor/thor-deploy.yaml | 11 ++++++++++- kube/services/tty/tty-deploy.yaml | 11 ++++++++++- kube/services/tube/tube-deploy.yaml | 11 ++++++++++- kube/services/ws-storage/ws-storage-deploy.yaml | 11 ++++++++++- kube/services/wts/wts-deploy.yaml | 11 ++++++++++- 137 files changed, 1350 insertions(+), 51 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 8d7d9afb8..c0439527e 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-11-17T21:04:51Z", + "generated_at": "2022-12-06T22:21:29Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -1143,7 +1143,7 @@ "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", "is_secret": false, "is_verified": false, - "line_number": 70, + "line_number": 80, "type": "Basic Auth Credentials" } ], diff --git a/Jenkinsfile b/Jenkinsfile index 365f1ca24..c2d149681 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -45,6 +45,15 @@ metadata: annotations: "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND containers: - name: shell image: quay.io/cdis/gen3-ci-worker:master diff --git a/kube/services/access-backend/access-backend-deploy.yaml b/kube/services/access-backend/access-backend-deploy.yaml index 661dd1fe7..e12a954f7 100644 --- a/kube/services/access-backend/access-backend-deploy.yaml +++ b/kube/services/access-backend/access-backend-deploy.yaml @@ -28,7 +28,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -37,6 +37,15 @@ spec: values: - access-backend topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml b/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml index 1390be121..2a5ce95ff 100644 --- a/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml +++ b/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml @@ -22,12 +22,21 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchLabels: service: ambassador topologyKey: kubernetes.io/hostname + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT serviceAccountName: ambassador containers: - name: ambassador diff --git a/kube/services/ambassador/ambassador-deploy.yaml b/kube/services/ambassador/ambassador-deploy.yaml index 3ddea4303..cd3f52cf1 100644 --- a/kube/services/ambassador/ambassador-deploy.yaml +++ b/kube/services/ambassador/ambassador-deploy.yaml @@ -28,12 +28,21 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchLabels: service: ambassador topologyKey: kubernetes.io/hostname + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT serviceAccountName: ambassador containers: - name: ambassador diff --git a/kube/services/ambtest/ambtest-deploy.yaml b/kube/services/ambtest/ambtest-deploy.yaml index b7ba2ac45..00247f7ea 100644 --- a/kube/services/ambtest/ambtest-deploy.yaml +++ b/kube/services/ambtest/ambtest-deploy.yaml @@ -26,7 +26,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -35,6 +35,15 @@ spec: values: - ambtest topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: ambtest-conf diff --git a/kube/services/arborist/arborist-deploy-2.yaml b/kube/services/arborist/arborist-deploy-2.yaml index 514a7ef96..8949b60bb 100644 --- a/kube/services/arborist/arborist-deploy-2.yaml +++ b/kube/services/arborist/arborist-deploy-2.yaml @@ -27,7 +27,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -36,6 +36,15 @@ spec: values: - arborist topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: arborist-secret diff --git a/kube/services/arborist/arborist-deploy.yaml b/kube/services/arborist/arborist-deploy.yaml index 08ea04a59..11a7dcda9 100644 --- a/kube/services/arborist/arborist-deploy.yaml +++ b/kube/services/arborist/arborist-deploy.yaml @@ -28,7 +28,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -37,6 +37,15 @@ spec: values: - arborist topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: # ----------------------------------------------------------------------------- diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index cbd734739..9c661e348 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -28,7 +28,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -37,6 +37,15 @@ spec: values: - argo-wrapper topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: argo-config configMap: diff --git a/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml b/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml index ae9a26a43..c0dae7370 100644 --- a/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml +++ b/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml @@ -22,7 +22,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -31,6 +31,15 @@ spec: values: - arranger topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false containers: - name: arranger-dashboard diff --git a/kube/services/arranger/arranger-deploy.yaml b/kube/services/arranger/arranger-deploy.yaml index 57e19ae29..7620ce536 100644 --- a/kube/services/arranger/arranger-deploy.yaml +++ b/kube/services/arranger/arranger-deploy.yaml @@ -23,7 +23,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -32,6 +32,15 @@ spec: values: - arranger topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: arranger-config diff --git a/kube/services/audit-service/audit-service-deploy.yaml b/kube/services/audit-service/audit-service-deploy.yaml index 78e7d6df1..97fe415c8 100644 --- a/kube/services/audit-service/audit-service-deploy.yaml +++ b/kube/services/audit-service/audit-service-deploy.yaml @@ -32,7 +32,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -41,6 +41,15 @@ spec: values: - audit-service topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/auspice/auspice-deploy.yaml b/kube/services/auspice/auspice-deploy.yaml index 88324fec4..6e4b371b7 100644 --- a/kube/services/auspice/auspice-deploy.yaml +++ b/kube/services/auspice/auspice-deploy.yaml @@ -23,7 +23,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -32,6 +32,15 @@ spec: values: - auspice topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false containers: - name: auspice diff --git a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml index a2a3170d5..4534c480e 100644 --- a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml +++ b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml @@ -22,6 +22,16 @@ spec: netvpc: "yes" GEN3_DATE_LABEL spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: credentials diff --git a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml index 954bc5f06..8f4b88311 100644 --- a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml +++ b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml @@ -24,7 +24,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -33,6 +33,15 @@ spec: values: - cedar-wrapper topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/cogwheel/cogwheel-deploy.yaml b/kube/services/cogwheel/cogwheel-deploy.yaml index ef274220a..0e857015f 100644 --- a/kube/services/cogwheel/cogwheel-deploy.yaml +++ b/kube/services/cogwheel/cogwheel-deploy.yaml @@ -12,6 +12,16 @@ spec: app: cogwheel GEN3_DATE_LABEL spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: cogwheel-g3auto secret: diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index e301856e5..96ff1b73b 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -30,7 +30,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -39,6 +39,15 @@ spec: values: - cohort-middleware topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: cohort-middleware-config diff --git a/kube/services/dashboard/dashboard-deploy.yaml b/kube/services/dashboard/dashboard-deploy.yaml index 14a3379cc..e03766304 100644 --- a/kube/services/dashboard/dashboard-deploy.yaml +++ b/kube/services/dashboard/dashboard-deploy.yaml @@ -29,7 +29,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -38,6 +38,15 @@ spec: values: - dashboard topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/datasim/datasim-deploy.yaml b/kube/services/datasim/datasim-deploy.yaml index c48075b89..a0e33149f 100644 --- a/kube/services/datasim/datasim-deploy.yaml +++ b/kube/services/datasim/datasim-deploy.yaml @@ -22,7 +22,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -31,6 +31,15 @@ spec: values: - datasim topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml index b2ef0834e..854cda23b 100644 --- a/kube/services/dicom-server/dicom-server-deploy.yaml +++ b/kube/services/dicom-server/dicom-server-deploy.yaml @@ -17,6 +17,16 @@ spec: public: "yes" GEN3_DATE_LABEL spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT volumes: - name: config-volume-g3auto secret: diff --git a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml index d1fb8ce55..ed2af0fec 100644 --- a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml @@ -17,6 +17,16 @@ spec: public: "yes" GEN3_DATE_LABEL spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT containers: - name: dicom-viewer GEN3_DICOM-VIEWER_IMAGE diff --git a/kube/services/fence/fence-canary-deploy.yaml b/kube/services/fence/fence-canary-deploy.yaml index 12e5a8ee8..7c41c1f0f 100644 --- a/kube/services/fence/fence-canary-deploy.yaml +++ b/kube/services/fence/fence-canary-deploy.yaml @@ -29,7 +29,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -38,6 +38,15 @@ spec: values: - fence topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index 95d2b5496..72b2b9572 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -35,7 +35,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -44,6 +44,15 @@ spec: values: - fence topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: # ----------------------------------------------------------------------------- diff --git a/kube/services/fenceshib/fenceshib-canary-deploy.yaml b/kube/services/fenceshib/fenceshib-canary-deploy.yaml index 152edefec..74085009f 100644 --- a/kube/services/fenceshib/fenceshib-canary-deploy.yaml +++ b/kube/services/fenceshib/fenceshib-canary-deploy.yaml @@ -30,7 +30,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -39,6 +39,15 @@ spec: values: - fence topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/fenceshib/fenceshib-deploy.yaml b/kube/services/fenceshib/fenceshib-deploy.yaml index 528726262..469d7eb55 100644 --- a/kube/services/fenceshib/fenceshib-deploy.yaml +++ b/kube/services/fenceshib/fenceshib-deploy.yaml @@ -30,7 +30,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -39,6 +39,15 @@ spec: values: - fenceshib topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/fluentd/fluentd.yaml b/kube/services/fluentd/fluentd.yaml index f6526ea56..5c25ddfaa 100644 --- a/kube/services/fluentd/fluentd.yaml +++ b/kube/services/fluentd/fluentd.yaml @@ -27,6 +27,10 @@ spec: operator: "Equal" value: "jupyter" effect: "NoSchedule" + - key: "role" + operator: "Equal" + value: "workflow" + effect: "NoSchedule" containers: - name: fluentd GEN3_FLUENTD_IMAGE diff --git a/kube/services/frontend-framework/frontend-framework-deploy.yaml b/kube/services/frontend-framework/frontend-framework-deploy.yaml index 843002844..743d4736c 100644 --- a/kube/services/frontend-framework/frontend-framework-deploy.yaml +++ b/kube/services/frontend-framework/frontend-framework-deploy.yaml @@ -22,7 +22,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -31,6 +31,15 @@ spec: values: - frontend-framework topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml index df66b97ad..f5766555c 100644 --- a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml +++ b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml @@ -22,7 +22,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -31,6 +31,15 @@ spec: values: - frontend-framework topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/gdcapi/gdcapi-deploy.yaml b/kube/services/gdcapi/gdcapi-deploy.yaml index cd397cab8..261b48994 100644 --- a/kube/services/gdcapi/gdcapi-deploy.yaml +++ b/kube/services/gdcapi/gdcapi-deploy.yaml @@ -14,6 +14,16 @@ spec: labels: app: gdcapi spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/google-sa-validation/google-sa-validation-deploy.yaml b/kube/services/google-sa-validation/google-sa-validation-deploy.yaml index 880ce5fb3..aa120b2e8 100644 --- a/kube/services/google-sa-validation/google-sa-validation-deploy.yaml +++ b/kube/services/google-sa-validation/google-sa-validation-deploy.yaml @@ -20,6 +20,16 @@ spec: dbfence: "yes" GEN3_DATE_LABEL spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml index 16486d3a9..666be88a1 100644 --- a/kube/services/guppy/guppy-deploy.yaml +++ b/kube/services/guppy/guppy-deploy.yaml @@ -27,7 +27,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -36,6 +36,15 @@ spec: values: - guppy topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: guppy-config diff --git a/kube/services/hatchery/hatchery-deploy.yaml b/kube/services/hatchery/hatchery-deploy.yaml index f67100098..3bb4ec0a8 100644 --- a/kube/services/hatchery/hatchery-deploy.yaml +++ b/kube/services/hatchery/hatchery-deploy.yaml @@ -28,7 +28,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -37,6 +37,15 @@ spec: values: - hatchery topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: hatchery-service-account securityContext: fsGroup: 1001 diff --git a/kube/services/indexd/indexd-canary-deploy.yaml b/kube/services/indexd/indexd-canary-deploy.yaml index 92c329f26..4ae860da0 100644 --- a/kube/services/indexd/indexd-canary-deploy.yaml +++ b/kube/services/indexd/indexd-canary-deploy.yaml @@ -27,7 +27,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -36,6 +36,15 @@ spec: values: - indexd topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/indexd/indexd-deploy.yaml b/kube/services/indexd/indexd-deploy.yaml index c9961fba7..afce6a3b5 100644 --- a/kube/services/indexd/indexd-deploy.yaml +++ b/kube/services/indexd/indexd-deploy.yaml @@ -31,7 +31,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -40,6 +40,15 @@ spec: values: - indexd topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/influxdb/influxdb-deployment.yaml b/kube/services/influxdb/influxdb-deployment.yaml index 72d4b57d7..36bdbe576 100644 --- a/kube/services/influxdb/influxdb-deployment.yaml +++ b/kube/services/influxdb/influxdb-deployment.yaml @@ -15,6 +15,16 @@ spec: labels: app: influxdb spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND containers: - image: docker.io/influxdb:1.8.0 imagePullPolicy: IfNotPresent diff --git a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml index f7b874111..630fc5837 100644 --- a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml +++ b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml @@ -16,6 +16,15 @@ spec: annotations: "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml index 4e13eea69..7cb169649 100644 --- a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml +++ b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml @@ -16,6 +16,15 @@ spec: annotations: "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins/jenkins-deploy.yaml b/kube/services/jenkins/jenkins-deploy.yaml index 2c6afb76d..5e2f8c154 100644 --- a/kube/services/jenkins/jenkins-deploy.yaml +++ b/kube/services/jenkins/jenkins-deploy.yaml @@ -24,6 +24,15 @@ spec: annotations: "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml index ad29eb47e..c85efcff3 100644 --- a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml +++ b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml @@ -16,6 +16,15 @@ spec: annotations: "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins2/jenkins2-deploy.yaml b/kube/services/jenkins2/jenkins2-deploy.yaml index 673686d17..a3c5b2f88 100644 --- a/kube/services/jenkins2/jenkins2-deploy.yaml +++ b/kube/services/jenkins2/jenkins2-deploy.yaml @@ -24,6 +24,15 @@ spec: annotations: "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml index 328894689..f99bd4d1c 100644 --- a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml +++ b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml @@ -14,6 +14,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: arborist-secret secret: diff --git a/kube/services/jobs/arborist-rm-expired-access-job.yaml b/kube/services/jobs/arborist-rm-expired-access-job.yaml index 34833dded..bc9625ccc 100644 --- a/kube/services/jobs/arborist-rm-expired-access-job.yaml +++ b/kube/services/jobs/arborist-rm-expired-access-job.yaml @@ -8,6 +8,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false dnsConfig: options: diff --git a/kube/services/jobs/arboristdb-create-job.yaml b/kube/services/jobs/arboristdb-create-job.yaml index 74d7bebe4..d96af6613 100644 --- a/kube/services/jobs/arboristdb-create-job.yaml +++ b/kube/services/jobs/arboristdb-create-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: arborist-secret diff --git a/kube/services/jobs/aws-bucket-replicate-job.yaml b/kube/services/jobs/aws-bucket-replicate-job.yaml index d9f0f08ad..4bc15a294 100644 --- a/kube/services/jobs/aws-bucket-replicate-job.yaml +++ b/kube/services/jobs/aws-bucket-replicate-job.yaml @@ -10,6 +10,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/bucket-manifest-job.yaml b/kube/services/jobs/bucket-manifest-job.yaml index 98506331e..24f42b76f 100644 --- a/kube/services/jobs/bucket-manifest-job.yaml +++ b/kube/services/jobs/bucket-manifest-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: sa-#SA_NAME_PLACEHOLDER# volumes: - name: cred-volume diff --git a/kube/services/jobs/bucket-replicate-job.yaml b/kube/services/jobs/bucket-replicate-job.yaml index fbaf15816..46a8be51f 100644 --- a/kube/services/jobs/bucket-replicate-job.yaml +++ b/kube/services/jobs/bucket-replicate-job.yaml @@ -17,6 +17,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: batch-operations-account securityContext: fsGroup: 1000 diff --git a/kube/services/jobs/bucket-replication-job.yaml b/kube/services/jobs/bucket-replication-job.yaml index 4ef56367e..86a569c94 100644 --- a/kube/services/jobs/bucket-replication-job.yaml +++ b/kube/services/jobs/bucket-replication-job.yaml @@ -8,6 +8,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: sa-#SA_NAME_PLACEHOLDER# volumes: - name: cred-volume diff --git a/kube/services/jobs/bucket-size-report-job.yaml b/kube/services/jobs/bucket-size-report-job.yaml index 253d010e4..9a9d0f958 100644 --- a/kube/services/jobs/bucket-size-report-job.yaml +++ b/kube/services/jobs/bucket-size-report-job.yaml @@ -8,6 +8,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never securityContext: fsGroup: 1000 diff --git a/kube/services/jobs/cedar-ingestion-job.yaml b/kube/services/jobs/cedar-ingestion-job.yaml index 37f537c53..87b284bf0 100644 --- a/kube/services/jobs/cedar-ingestion-job.yaml +++ b/kube/services/jobs/cedar-ingestion-job.yaml @@ -24,6 +24,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/client-modify-job.yaml b/kube/services/jobs/client-modify-job.yaml index 995fdd483..4e86709f0 100644 --- a/kube/services/jobs/client-modify-job.yaml +++ b/kube/services/jobs/client-modify-job.yaml @@ -11,6 +11,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: yaml-merge configMap: diff --git a/kube/services/jobs/cogwheel-register-client-job.yaml b/kube/services/jobs/cogwheel-register-client-job.yaml index 03461619b..81c6ff487 100644 --- a/kube/services/jobs/cogwheel-register-client-job.yaml +++ b/kube/services/jobs/cogwheel-register-client-job.yaml @@ -17,6 +17,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: cogwheel-g3auto secret: diff --git a/kube/services/jobs/config-fence-job.yaml b/kube/services/jobs/config-fence-job.yaml index 7fd655937..62ec47053 100644 --- a/kube/services/jobs/config-fence-job.yaml +++ b/kube/services/jobs/config-fence-job.yaml @@ -18,6 +18,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: shared-data diff --git a/kube/services/jobs/covid19-bayes-cronjob.yaml b/kube/services/jobs/covid19-bayes-cronjob.yaml index 951668b0c..53d92b9ca 100644 --- a/kube/services/jobs/covid19-bayes-cronjob.yaml +++ b/kube/services/jobs/covid19-bayes-cronjob.yaml @@ -16,6 +16,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: s3-access-opencdn-databucket-gen3 restartPolicy: Never nodeSelector: diff --git a/kube/services/jobs/covid19-bayes-job.yaml b/kube/services/jobs/covid19-bayes-job.yaml index a47ed9fc5..36853a8d6 100644 --- a/kube/services/jobs/covid19-bayes-job.yaml +++ b/kube/services/jobs/covid19-bayes-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: s3-access-opencdn-databucket-gen3 restartPolicy: Never containers: diff --git a/kube/services/jobs/covid19-etl-job.yaml b/kube/services/jobs/covid19-etl-job.yaml index d94c24808..84ab52a4e 100644 --- a/kube/services/jobs/covid19-etl-job.yaml +++ b/kube/services/jobs/covid19-etl-job.yaml @@ -10,6 +10,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: s3-access-opencdn-databucket-gen3 volumes: - name: cred-volume diff --git a/kube/services/jobs/covid19-notebook-etl-job.yaml b/kube/services/jobs/covid19-notebook-etl-job.yaml index 3d22b0240..e5045036b 100644 --- a/kube/services/jobs/covid19-notebook-etl-job.yaml +++ b/kube/services/jobs/covid19-notebook-etl-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: s3-access-opencdn-databucket-gen3 volumes: - name: cred-volume diff --git a/kube/services/jobs/data-ingestion-job.yaml b/kube/services/jobs/data-ingestion-job.yaml index 9530d0c8c..940e1ff08 100644 --- a/kube/services/jobs/data-ingestion-job.yaml +++ b/kube/services/jobs/data-ingestion-job.yaml @@ -8,6 +8,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never volumes: - name: shared-data diff --git a/kube/services/jobs/distribute-licenses-job.yaml b/kube/services/jobs/distribute-licenses-job.yaml index 8418f08e7..02a5b08ed 100644 --- a/kube/services/jobs/distribute-licenses-job.yaml +++ b/kube/services/jobs/distribute-licenses-job.yaml @@ -19,6 +19,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never serviceAccountName: hatchery-service-account containers: diff --git a/kube/services/jobs/envtest-job.yaml b/kube/services/jobs/envtest-job.yaml index 6f2c72383..4f6b6d054 100644 --- a/kube/services/jobs/envtest-job.yaml +++ b/kube/services/jobs/envtest-job.yaml @@ -10,6 +10,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never automountServiceAccountToken: false containers: diff --git a/kube/services/jobs/es-garbage-job.yaml b/kube/services/jobs/es-garbage-job.yaml index 13385f446..3583d1217 100644 --- a/kube/services/jobs/es-garbage-job.yaml +++ b/kube/services/jobs/es-garbage-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never serviceAccountName: gitops-sa securityContext: diff --git a/kube/services/jobs/etl-cronjob.yaml b/kube/services/jobs/etl-cronjob.yaml index f7ca5fd5b..2b2a00304 100644 --- a/kube/services/jobs/etl-cronjob.yaml +++ b/kube/services/jobs/etl-cronjob.yaml @@ -15,6 +15,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: creds-volume secret: diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml index 8540f3902..43761b7f8 100644 --- a/kube/services/jobs/etl-job.yaml +++ b/kube/services/jobs/etl-job.yaml @@ -10,6 +10,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: creds-volume secret: diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml index bce341aac..20358c6a6 100644 --- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml +++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml @@ -16,6 +16,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml index bed88c308..f464b690b 100644 --- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml +++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fence-db-migrate-job.yaml b/kube/services/jobs/fence-db-migrate-job.yaml index f8d2a001c..e954ba116 100644 --- a/kube/services/jobs/fence-db-migrate-job.yaml +++ b/kube/services/jobs/fence-db-migrate-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fence-delete-expired-clients-job.yaml b/kube/services/jobs/fence-delete-expired-clients-job.yaml index bac613404..1f9a8993b 100644 --- a/kube/services/jobs/fence-delete-expired-clients-job.yaml +++ b/kube/services/jobs/fence-delete-expired-clients-job.yaml @@ -11,6 +11,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fence-visa-update-cronjob.yaml b/kube/services/jobs/fence-visa-update-cronjob.yaml index 5409da672..a33b7f2a6 100644 --- a/kube/services/jobs/fence-visa-update-cronjob.yaml +++ b/kube/services/jobs/fence-visa-update-cronjob.yaml @@ -15,6 +15,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fence-visa-update-job.yaml b/kube/services/jobs/fence-visa-update-job.yaml index a34c9cff7..b5b125c7a 100644 --- a/kube/services/jobs/fence-visa-update-job.yaml +++ b/kube/services/jobs/fence-visa-update-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fencedb-create-job.yaml b/kube/services/jobs/fencedb-create-job.yaml index 7b3417c7e..71789f257 100644 --- a/kube/services/jobs/fencedb-create-job.yaml +++ b/kube/services/jobs/fencedb-create-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: creds-volume diff --git a/kube/services/jobs/fluentd-restart-job.yaml b/kube/services/jobs/fluentd-restart-job.yaml index 5c984b7ae..1cdf6e2ec 100644 --- a/kube/services/jobs/fluentd-restart-job.yaml +++ b/kube/services/jobs/fluentd-restart-job.yaml @@ -10,6 +10,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never serviceAccountName: fluentd-restart containers: diff --git a/kube/services/jobs/gdcdb-create-job.yaml b/kube/services/jobs/gdcdb-create-job.yaml index 2ceb333b0..14234707a 100644 --- a/kube/services/jobs/gdcdb-create-job.yaml +++ b/kube/services/jobs/gdcdb-create-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: creds-volume diff --git a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml index c95516ca9..843b3e3d5 100644 --- a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml +++ b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml @@ -8,6 +8,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND containers: - name: gen3qa-check-bucket-access GEN3_GEN3_QA_CONTROLLER_IMAGE|-image: quay.io/cdis/gen3-qa-controller:fix_gen3qa_get_check-| diff --git a/kube/services/jobs/gentestdata-job.yaml b/kube/services/jobs/gentestdata-job.yaml index b0c856e91..78e382f44 100644 --- a/kube/services/jobs/gentestdata-job.yaml +++ b/kube/services/jobs/gentestdata-job.yaml @@ -34,6 +34,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/gitops-sync-job.yaml b/kube/services/jobs/gitops-sync-job.yaml index 6044aff01..a81fd0d6e 100644 --- a/kube/services/jobs/gitops-sync-job.yaml +++ b/kube/services/jobs/gitops-sync-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never serviceAccountName: gitops-sa securityContext: diff --git a/kube/services/jobs/google-bucket-manifest-job.yaml b/kube/services/jobs/google-bucket-manifest-job.yaml index dcd6cd35e..38ed105a3 100644 --- a/kube/services/jobs/google-bucket-manifest-job.yaml +++ b/kube/services/jobs/google-bucket-manifest-job.yaml @@ -8,6 +8,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/google-bucket-replicate-job.yaml b/kube/services/jobs/google-bucket-replicate-job.yaml index f61a47868..bc6263a26 100644 --- a/kube/services/jobs/google-bucket-replicate-job.yaml +++ b/kube/services/jobs/google-bucket-replicate-job.yaml @@ -12,6 +12,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/google-create-bucket-job.yaml b/kube/services/jobs/google-create-bucket-job.yaml index eed19dfbb..4bc2b41c1 100644 --- a/kube/services/jobs/google-create-bucket-job.yaml +++ b/kube/services/jobs/google-create-bucket-job.yaml @@ -47,6 +47,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-delete-expired-access-cronjob.yaml b/kube/services/jobs/google-delete-expired-access-cronjob.yaml index a491865c3..7132f0379 100644 --- a/kube/services/jobs/google-delete-expired-access-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-access-cronjob.yaml @@ -16,6 +16,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-delete-expired-access-job.yaml b/kube/services/jobs/google-delete-expired-access-job.yaml index 24e00742c..901e0cab2 100644 --- a/kube/services/jobs/google-delete-expired-access-job.yaml +++ b/kube/services/jobs/google-delete-expired-access-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml index cbe8c049c..2106fc9d7 100644 --- a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml @@ -17,6 +17,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-delete-expired-service-account-job.yaml b/kube/services/jobs/google-delete-expired-service-account-job.yaml index 99a7f8749..8da478ea4 100644 --- a/kube/services/jobs/google-delete-expired-service-account-job.yaml +++ b/kube/services/jobs/google-delete-expired-service-account-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml index 2453f5009..7571e7f12 100644 --- a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml +++ b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml @@ -17,6 +17,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-init-proxy-groups-job.yaml b/kube/services/jobs/google-init-proxy-groups-job.yaml index b342c7db5..0b57da66c 100644 --- a/kube/services/jobs/google-init-proxy-groups-job.yaml +++ b/kube/services/jobs/google-init-proxy-groups-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-manage-account-access-cronjob.yaml b/kube/services/jobs/google-manage-account-access-cronjob.yaml index 856c3b056..0e5e16d44 100644 --- a/kube/services/jobs/google-manage-account-access-cronjob.yaml +++ b/kube/services/jobs/google-manage-account-access-cronjob.yaml @@ -17,6 +17,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-manage-account-access-job.yaml b/kube/services/jobs/google-manage-account-access-job.yaml index 09259088c..624259d4a 100644 --- a/kube/services/jobs/google-manage-account-access-job.yaml +++ b/kube/services/jobs/google-manage-account-access-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-manage-keys-cronjob.yaml b/kube/services/jobs/google-manage-keys-cronjob.yaml index ee92611ba..7de185099 100644 --- a/kube/services/jobs/google-manage-keys-cronjob.yaml +++ b/kube/services/jobs/google-manage-keys-cronjob.yaml @@ -17,6 +17,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-manage-keys-job.yaml b/kube/services/jobs/google-manage-keys-job.yaml index 64773af34..a7454b73b 100644 --- a/kube/services/jobs/google-manage-keys-job.yaml +++ b/kube/services/jobs/google-manage-keys-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml index b8bc21f88..26b290202 100644 --- a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml +++ b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml @@ -17,6 +17,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-verify-bucket-access-group-job.yaml b/kube/services/jobs/google-verify-bucket-access-group-job.yaml index 3f756eaa5..e387ffd59 100644 --- a/kube/services/jobs/google-verify-bucket-access-group-job.yaml +++ b/kube/services/jobs/google-verify-bucket-access-group-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/graph-create-job.yaml b/kube/services/jobs/graph-create-job.yaml index 6fd859cc2..f1f454e26 100644 --- a/kube/services/jobs/graph-create-job.yaml +++ b/kube/services/jobs/graph-create-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: creds-volume diff --git a/kube/services/jobs/hatchery-metrics-job.yaml b/kube/services/jobs/hatchery-metrics-job.yaml index 3a4e571f6..6dece59a3 100644 --- a/kube/services/jobs/hatchery-metrics-job.yaml +++ b/kube/services/jobs/hatchery-metrics-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never serviceAccountName: hatchery-service-account securityContext: diff --git a/kube/services/jobs/hatchery-reaper-job.yaml b/kube/services/jobs/hatchery-reaper-job.yaml index 9278fb727..58a65b573 100644 --- a/kube/services/jobs/hatchery-reaper-job.yaml +++ b/kube/services/jobs/hatchery-reaper-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never serviceAccountName: hatchery-service-account securityContext: diff --git a/kube/services/jobs/healthcheck-cronjob.yaml b/kube/services/jobs/healthcheck-cronjob.yaml index 25888f32c..a9a40598c 100644 --- a/kube/services/jobs/healthcheck-cronjob.yaml +++ b/kube/services/jobs/healthcheck-cronjob.yaml @@ -15,6 +15,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND restartPolicy: Never serviceAccountName: jenkins-service containers: diff --git a/kube/services/jobs/indexd-authz-job.yaml b/kube/services/jobs/indexd-authz-job.yaml index a3fbb8658..41ad4a4b8 100644 --- a/kube/services/jobs/indexd-authz-job.yaml +++ b/kube/services/jobs/indexd-authz-job.yaml @@ -8,6 +8,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/jobs/indexd-userdb-job.yaml b/kube/services/jobs/indexd-userdb-job.yaml index e018f7a34..57ab5677c 100644 --- a/kube/services/jobs/indexd-userdb-job.yaml +++ b/kube/services/jobs/indexd-userdb-job.yaml @@ -16,6 +16,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml index e4f6761f7..d88d12295 100644 --- a/kube/services/jobs/metadata-aggregate-sync-job.yaml +++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml @@ -8,6 +8,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: config-volume-g3auto secret: diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml index 0f31eca40..b50c83f78 100644 --- a/kube/services/jobs/opencost-report-argo-job.yaml +++ b/kube/services/jobs/opencost-report-argo-job.yaml @@ -27,6 +27,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: reports-service-account containers: - name: send-report diff --git a/kube/services/jobs/psql-fix-job.yaml b/kube/services/jobs/psql-fix-job.yaml index 20f453c2a..3e93b77a6 100644 --- a/kube/services/jobs/psql-fix-job.yaml +++ b/kube/services/jobs/psql-fix-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: jenkins-service containers: - name: fix diff --git a/kube/services/jobs/remove-objects-from-clouds-job.yaml b/kube/services/jobs/remove-objects-from-clouds-job.yaml index 46aa3d43f..3f1cf6f1b 100644 --- a/kube/services/jobs/remove-objects-from-clouds-job.yaml +++ b/kube/services/jobs/remove-objects-from-clouds-job.yaml @@ -11,6 +11,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/replicate-validation-job.yaml b/kube/services/jobs/replicate-validation-job.yaml index 13f767d69..28e7bc28e 100644 --- a/kube/services/jobs/replicate-validation-job.yaml +++ b/kube/services/jobs/replicate-validation-job.yaml @@ -11,6 +11,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: aws-cred-volume secret: diff --git a/kube/services/jobs/s3sync-cronjob.yaml b/kube/services/jobs/s3sync-cronjob.yaml index 14053492f..9113b4881 100644 --- a/kube/services/jobs/s3sync-cronjob.yaml +++ b/kube/services/jobs/s3sync-cronjob.yaml @@ -21,6 +21,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/usersync-job.yaml b/kube/services/jobs/usersync-job.yaml index 915f1a588..aa0718260 100644 --- a/kube/services/jobs/usersync-job.yaml +++ b/kube/services/jobs/usersync-job.yaml @@ -31,6 +31,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/useryaml-job.yaml b/kube/services/jobs/useryaml-job.yaml index bf3812951..49cff4854 100644 --- a/kube/services/jobs/useryaml-job.yaml +++ b/kube/services/jobs/useryaml-job.yaml @@ -9,6 +9,16 @@ spec: labels: app: gen3job spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/jupyterhub/jupyterhub-deploy.yaml b/kube/services/jupyterhub/jupyterhub-deploy.yaml index b2b96ff75..293d1169e 100644 --- a/kube/services/jupyterhub/jupyterhub-deploy.yaml +++ b/kube/services/jupyterhub/jupyterhub-deploy.yaml @@ -18,6 +18,16 @@ spec: userhelper: "yes" GEN3_DATE_LABEL spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: jupyter-service volumes: - name: config-volume diff --git a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml index 936f72520..a2d0c41f0 100644 --- a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml +++ b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml @@ -24,7 +24,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -33,6 +33,15 @@ spec: values: - kayako-wrapper topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/manifestservice/manifestservice-deploy.yaml b/kube/services/manifestservice/manifestservice-deploy.yaml index 52460cfbf..3db33dd7d 100644 --- a/kube/services/manifestservice/manifestservice-deploy.yaml +++ b/kube/services/manifestservice/manifestservice-deploy.yaml @@ -27,7 +27,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -36,6 +36,15 @@ spec: values: - manifestservice topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/mariner/mariner-deploy.yaml b/kube/services/mariner/mariner-deploy.yaml index 0912ea705..c151013b5 100644 --- a/kube/services/mariner/mariner-deploy.yaml +++ b/kube/services/mariner/mariner-deploy.yaml @@ -37,7 +37,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -46,6 +46,15 @@ spec: values: - mariner topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: true containers: - name: mariner diff --git a/kube/services/metadata/metadata-deploy.yaml b/kube/services/metadata/metadata-deploy.yaml index c4842dadc..ca8f268b7 100644 --- a/kube/services/metadata/metadata-deploy.yaml +++ b/kube/services/metadata/metadata-deploy.yaml @@ -29,7 +29,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -38,6 +38,15 @@ spec: values: - metadata topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume-g3auto diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml index abb611e39..aaf552389 100644 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml +++ b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml @@ -23,7 +23,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -32,6 +32,15 @@ spec: values: - ohdsi-atlas topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: ohdsi-atlas-config-local diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 2f4e57d47..6d82cc691 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -26,7 +26,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -35,6 +35,15 @@ spec: values: - ohdsi-webapi topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: ohdsi-webapi-reverse-proxy-config diff --git a/kube/services/peregrine/peregrine-canary-deploy.yaml b/kube/services/peregrine/peregrine-canary-deploy.yaml index d43698e67..ce5177ddd 100644 --- a/kube/services/peregrine/peregrine-canary-deploy.yaml +++ b/kube/services/peregrine/peregrine-canary-deploy.yaml @@ -27,7 +27,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -36,6 +36,15 @@ spec: values: - peregrine topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: shared-data diff --git a/kube/services/peregrine/peregrine-deploy.yaml b/kube/services/peregrine/peregrine-deploy.yaml index e69ef00c4..1c84be131 100644 --- a/kube/services/peregrine/peregrine-deploy.yaml +++ b/kube/services/peregrine/peregrine-deploy.yaml @@ -33,7 +33,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -42,6 +42,15 @@ spec: values: - peregrine topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: shared-data diff --git a/kube/services/pidgin/pidgin-deploy.yaml b/kube/services/pidgin/pidgin-deploy.yaml index 465b4b2f6..f50cf167e 100644 --- a/kube/services/pidgin/pidgin-deploy.yaml +++ b/kube/services/pidgin/pidgin-deploy.yaml @@ -27,7 +27,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -36,6 +36,15 @@ spec: values: - pidgin topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: cert-volume diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml index 41a31b157..408c826ab 100644 --- a/kube/services/portal/portal-deploy.yaml +++ b/kube/services/portal/portal-deploy.yaml @@ -23,7 +23,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -32,6 +32,15 @@ spec: values: - portal topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/portal/portal-root-deploy.yaml b/kube/services/portal/portal-root-deploy.yaml index e65e12ea1..867133b9e 100644 --- a/kube/services/portal/portal-root-deploy.yaml +++ b/kube/services/portal/portal-root-deploy.yaml @@ -23,7 +23,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -32,6 +32,15 @@ spec: values: - portal topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml index 13b27b878..457452490 100644 --- a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml +++ b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml @@ -35,7 +35,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -44,6 +44,15 @@ spec: values: - presigned-url-fence topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/qa-dashboard/qa-dashboard-deployment.yaml b/kube/services/qa-dashboard/qa-dashboard-deployment.yaml index b61b35058..3bbd17b99 100644 --- a/kube/services/qa-dashboard/qa-dashboard-deployment.yaml +++ b/kube/services/qa-dashboard/qa-dashboard-deployment.yaml @@ -19,6 +19,16 @@ spec: public: "yes" netnolimit: "yes" spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT containers: - name: qa-metrics image: "quay.io/cdis/qa-metrics:latest" diff --git a/kube/services/qabot/qabot-deploy.yaml b/kube/services/qabot/qabot-deploy.yaml index d8423e5bc..c2f9e208c 100644 --- a/kube/services/qabot/qabot-deploy.yaml +++ b/kube/services/qabot/qabot-deploy.yaml @@ -19,6 +19,16 @@ spec: app: qabot netnolimit: "yes" spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND containers: - name: qabot image: "quay.io/cdis/qa-bot:latest" diff --git a/kube/services/requestor/requestor-deploy.yaml b/kube/services/requestor/requestor-deploy.yaml index 6cba99085..3561019e9 100644 --- a/kube/services/requestor/requestor-deploy.yaml +++ b/kube/services/requestor/requestor-deploy.yaml @@ -29,7 +29,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -38,6 +38,15 @@ spec: values: - requestor topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index 5f0f90f3a..d8cfe9f41 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -26,7 +26,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -35,6 +35,15 @@ spec: values: - revproxy topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: revproxy-conf diff --git a/kube/services/selenium/selenium-hub-deployment.yaml b/kube/services/selenium/selenium-hub-deployment.yaml index 35ffe53c7..5c1ba3aa1 100644 --- a/kube/services/selenium/selenium-hub-deployment.yaml +++ b/kube/services/selenium/selenium-hub-deployment.yaml @@ -17,6 +17,16 @@ spec: annotations: "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND containers: - env: - name: GRID_MAX_SESSION diff --git a/kube/services/selenium/selenium-node-chrome-deployment.yaml b/kube/services/selenium/selenium-node-chrome-deployment.yaml index 45a1fc231..340f87ac1 100644 --- a/kube/services/selenium/selenium-node-chrome-deployment.yaml +++ b/kube/services/selenium/selenium-node-chrome-deployment.yaml @@ -22,6 +22,16 @@ spec: annotations: "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND containers: - env: - name: SE_EVENT_BUS_HOST diff --git a/kube/services/sftp/sftp-deploy.yaml b/kube/services/sftp/sftp-deploy.yaml index 00ad4d8f2..bbb619341 100644 --- a/kube/services/sftp/sftp-deploy.yaml +++ b/kube/services/sftp/sftp-deploy.yaml @@ -15,6 +15,16 @@ spec: app: sftp GEN3_DATE_LABEL spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: sftp-secret diff --git a/kube/services/sheepdog/sheepdog-canary-deploy.yaml b/kube/services/sheepdog/sheepdog-canary-deploy.yaml index f4568d97a..23a3c9d6a 100644 --- a/kube/services/sheepdog/sheepdog-canary-deploy.yaml +++ b/kube/services/sheepdog/sheepdog-canary-deploy.yaml @@ -26,7 +26,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -35,6 +35,15 @@ spec: values: - sheepdog topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/sheepdog/sheepdog-deploy.yaml b/kube/services/sheepdog/sheepdog-deploy.yaml index 9c0d7e18d..1b579207b 100644 --- a/kube/services/sheepdog/sheepdog-deploy.yaml +++ b/kube/services/sheepdog/sheepdog-deploy.yaml @@ -31,7 +31,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -40,6 +40,15 @@ spec: values: - sheepdog topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/shiny/shiny-deploy.yaml b/kube/services/shiny/shiny-deploy.yaml index c43c31409..48d53e87f 100644 --- a/kube/services/shiny/shiny-deploy.yaml +++ b/kube/services/shiny/shiny-deploy.yaml @@ -20,6 +20,16 @@ spec: public: "yes" GEN3_DATE_LABEL spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT volumes: - name: config-volume secret: diff --git a/kube/services/sower/sower-deploy.yaml b/kube/services/sower/sower-deploy.yaml index 3069ee31e..0bc582552 100644 --- a/kube/services/sower/sower-deploy.yaml +++ b/kube/services/sower/sower-deploy.yaml @@ -26,7 +26,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -35,6 +35,15 @@ spec: values: - sower topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: sower-service-account volumes: - name: sower-config diff --git a/kube/services/spark/spark-deploy.yaml b/kube/services/spark/spark-deploy.yaml index ebc830be3..da0349f41 100644 --- a/kube/services/spark/spark-deploy.yaml +++ b/kube/services/spark/spark-deploy.yaml @@ -25,7 +25,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -34,6 +34,15 @@ spec: values: - spark topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: containers: diff --git a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml index ac2b3246f..4d0b70ab8 100644 --- a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml +++ b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml @@ -29,7 +29,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -38,6 +38,15 @@ spec: values: - ssjdispatcher topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: true volumes: - name: ssjdispatcher-creds-volume diff --git a/kube/services/statsd-exporter/statsd-exporter-deploy.yaml b/kube/services/statsd-exporter/statsd-exporter-deploy.yaml index b608cef28..f39a167f3 100644 --- a/kube/services/statsd-exporter/statsd-exporter-deploy.yaml +++ b/kube/services/statsd-exporter/statsd-exporter-deploy.yaml @@ -22,6 +22,16 @@ spec: app: "statsd-exporter" GEN3_DATE_LABEL spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND containers: - name: "statsd-exporter" GEN3_STATSD-EXPORTER_IMAGE|-image: prom/statsd-exporter:v0.15.0-| diff --git a/kube/services/status-api/status-api-deploy.yaml b/kube/services/status-api/status-api-deploy.yaml index 8c9c28775..763d06a8b 100644 --- a/kube/services/status-api/status-api-deploy.yaml +++ b/kube/services/status-api/status-api-deploy.yaml @@ -19,6 +19,16 @@ spec: app: status-api netnolimit: "yes" spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND containers: - name: status-api image: "quay.io/cdis/status-dashboard:latest" diff --git a/kube/services/superset/superset-deploy.yaml b/kube/services/superset/superset-deploy.yaml index 72b9b88be..473e3c188 100644 --- a/kube/services/superset/superset-deploy.yaml +++ b/kube/services/superset/superset-deploy.yaml @@ -235,6 +235,16 @@ spec: dbsuperset: "yes" public: "yes" spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND securityContext: runAsUser: 0 initContainers: diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml index 00e57076c..b531389d5 100644 --- a/kube/services/thor/thor-deploy.yaml +++ b/kube/services/thor/thor-deploy.yaml @@ -22,7 +22,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -31,6 +31,15 @@ spec: values: - thor topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false containers: - name: thor diff --git a/kube/services/tty/tty-deploy.yaml b/kube/services/tty/tty-deploy.yaml index 302ac8ed9..c8b8386a9 100644 --- a/kube/services/tty/tty-deploy.yaml +++ b/kube/services/tty/tty-deploy.yaml @@ -26,7 +26,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -35,6 +35,15 @@ spec: values: - tty topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND serviceAccountName: tty-sa securityContext: fsGroup: 1000 diff --git a/kube/services/tube/tube-deploy.yaml b/kube/services/tube/tube-deploy.yaml index 4eb45d434..dd357a92f 100644 --- a/kube/services/tube/tube-deploy.yaml +++ b/kube/services/tube/tube-deploy.yaml @@ -26,7 +26,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -35,6 +35,15 @@ spec: values: - tube topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: creds-volume diff --git a/kube/services/ws-storage/ws-storage-deploy.yaml b/kube/services/ws-storage/ws-storage-deploy.yaml index f7c50b721..48a03be26 100644 --- a/kube/services/ws-storage/ws-storage-deploy.yaml +++ b/kube/services/ws-storage/ws-storage-deploy.yaml @@ -35,7 +35,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -44,6 +44,15 @@ spec: values: - ws-storage topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT volumes: - name: config-volume secret: diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml index 81cd6199d..ef950921d 100644 --- a/kube/services/wts/wts-deploy.yaml +++ b/kube/services/wts/wts-deploy.yaml @@ -33,7 +33,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -42,6 +42,15 @@ spec: values: - wts topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT terminationGracePeriodSeconds: 10 volumes: - name: wts-secret From b575f4eaae01897f5feba6e7191b626c47087a91 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Tue, 13 Dec 2022 07:07:56 -0600 Subject: [PATCH 029/362] updating the latest fips AMI (#2091) --- gen3/lib/aws.sh | 2 +- tf_files/aws/eks/sample.tfvars | 2 +- tf_files/aws/eks/variables.tf | 2 +- tf_files/aws/modules/eks/variables.tf | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gen3/lib/aws.sh b/gen3/lib/aws.sh index 9dd6e4402..096b95753 100644 --- a/gen3/lib/aws.sh +++ b/gen3/lib/aws.sh @@ -535,7 +535,7 @@ customer_id = "" # Enable/Disable Federal Information Processing Standards (FIPS) in EKS nodes. You need to have FIPS enabled AMI to enable this. fips = false fips_ami_kms = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99" -fips_enabled_ami = "ami-0de87e3680dcb13ec" +fips_enabled_ami = "ami-074d352c8e753fc93" # AZs where to deploy the kubernetes worker nodes. availability_zones = ["us-east-1a", "us-east-1c", "us-east-1d"] diff --git a/tf_files/aws/eks/sample.tfvars b/tf_files/aws/eks/sample.tfvars index da176e73e..06b4b309b 100644 --- a/tf_files/aws/eks/sample.tfvars +++ b/tf_files/aws/eks/sample.tfvars @@ -122,7 +122,7 @@ fips = false fips_ami_kms = "arn:aws:kms:us-east-1:707767160287:key/mrk-697897f040ef45b0aa3cebf38a916f99" #This is the FIPS enabled AMI in cdistest account -fips_enabled_ami = "ami-0de87e3680dcb13ec" +fips_enabled_ami = "ami-074d352c8e753fc93" #A list of AZs to be used by EKS nodes availability_zones = ["us-east-1a", "us-east-1c", "us-east-1d"] diff --git a/tf_files/aws/eks/variables.tf b/tf_files/aws/eks/variables.tf index 0dc78a8ab..6adbaad6b 100644 --- a/tf_files/aws/eks/variables.tf +++ b/tf_files/aws/eks/variables.tf @@ -162,7 +162,7 @@ variable "fips_ami_kms" { # This is the FIPS enabled AMI in cdistest account. variable "fips_enabled_ami" { - default = "ami-0de87e3680dcb13ec" + default = "ami-074d352c8e753fc93" } variable "availability_zones" { diff --git a/tf_files/aws/modules/eks/variables.tf b/tf_files/aws/modules/eks/variables.tf index 3eefa456c..2d7cfb5ba 100644 --- a/tf_files/aws/modules/eks/variables.tf +++ b/tf_files/aws/modules/eks/variables.tf @@ -175,5 +175,5 @@ variable "fips_ami_kms" { # This is the FIPS enabled AMI in cdistest account. variable "fips_enabled_ami" { - default = "ami-0de87e3680dcb13ec" + default = "ami-074d352c8e753fc93" } From 3970ac0085619c4d1a003f1c2bd2a4b2191dc7fb Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Tue, 13 Dec 2022 12:54:24 -0600 Subject: [PATCH 030/362] Add fixes to kube-setup-workvm, and simplify setup (#2103) --- .secrets.baseline | 900 ++---------- gen3/bin/kube-setup-workvm.sh | 110 +- package-lock.json | 2450 ++++++++++++++++++++++++--------- 3 files changed, 1967 insertions(+), 1493 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index c0439527e..ee70d5d8c 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-12-06T22:21:29Z", + "generated_at": "2022-12-13T12:32:32Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -74,39 +74,35 @@ "type": "Secret Keyword" } ], - "Docker/Jenkins-CI-Worker/Dockerfile": [ + "Docker/jenkins/Jenkins-CI-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 122, "type": "Secret Keyword" } ], - "Docker/Jenkins-Worker/Dockerfile": [ + "Docker/jenkins/Jenkins-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 136, "type": "Secret Keyword" } ], - "Docker/Jenkins/Dockerfile": [ + "Docker/jenkins/Jenkins/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 110, "type": "Secret Keyword" } ], - "Docker/Jenkins2/Dockerfile": [ + "Docker/jenkins/Jenkins2/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, - "line_number": 113, + "line_number": 110, "type": "Secret Keyword" } ], @@ -1163,22 +1159,6 @@ "type": "Secret Keyword" } ], - "kube/services/ohdsi-atlas/README.md": [ - { - "hashed_secret": "6e71f9f2b1e96de5a712f899ed26477ebc260a73", - "is_secret": false, - "is_verified": false, - "line_number": 105, - "type": "Secret Keyword" - }, - { - "hashed_secret": "317b889ca9fa8789dc1b85714568b1bdf2c7baf3", - "is_secret": false, - "is_verified": false, - "line_number": 108, - "type": "Secret Keyword" - } - ], "kube/services/revproxy/helpers.js": [ { "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", @@ -1211,32 +1191,7 @@ "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", "is_secret": false, "is_verified": false, - "line_number": 165, - "type": "Secret Keyword" - }, - { - "hashed_secret": "244f421f896bdcdd2784dccf4eaf7c8dfd5189b5", - "is_secret": false, - "is_verified": false, - "line_number": 260, - "type": "Secret Keyword" - } - ], - "kube/services/superset/superset/superset-deploy.yaml": [ - { - "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7", - "is_secret": false, - "is_verified": false, - "line_number": 38, - "type": "Secret Keyword" - } - ], - "kube/services/superset/superset/superset-redis.yaml": [ - { - "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", - "is_secret": false, - "is_verified": false, - "line_number": 169, + "line_number": 166, "type": "Secret Keyword" }, { @@ -1286,927 +1241,246 @@ ], "package-lock.json": [ { - "hashed_secret": "c95b6bc99445e7ed9177040f5ef94d0cdb38fb21", - "is_secret": false, - "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "a896da46c897d3a0d007843006621f78dbcabf51", - "is_secret": false, - "is_verified": false, - "line_number": 19, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "84b662fc9a2a275f90d0afafe6ce08a4d0928ac8", - "is_secret": false, - "is_verified": false, - "line_number": 28, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "6ebe9724873357aaea25e329efb726fa61b843e7", - "is_secret": false, - "is_verified": false, - "line_number": 39, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "f1dbba169db046906924ccd784068a2306096634", - "is_secret": false, - "is_verified": false, - "line_number": 44, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "2c7bd6cdc39b5b8a0f32aa11988a0ec769526cdb", - "is_secret": false, - "is_verified": false, - "line_number": 52, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "1addd61f68d977408128e530959437821a6d8b66", - "is_secret": false, - "is_verified": false, - "line_number": 57, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "9787d966f19a0d8d0021b31d34cfdfcebdb9c28a", - "is_secret": false, - "is_verified": false, - "line_number": 65, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "76693e518c3c8123e9a197821b506292322a0a95", - "is_secret": false, - "is_verified": false, - "line_number": 70, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "fa83dcbf0f435ee38066d19a2a43815510f96bc4", - "is_secret": false, - "is_verified": false, - "line_number": 86, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "017a7eab3d63331ecfe768927c8907a5a31888e5", - "is_secret": false, - "is_verified": false, - "line_number": 91, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "92b56edda4f2906f548fe77c015490e6ba2ee4c3", - "is_secret": false, - "is_verified": false, - "line_number": 96, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "936b0959aa13f1decc76be1d80acaac0860847b7", - "is_secret": false, - "is_verified": false, - "line_number": 101, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "4bad86c43b7cd06efc130272d8e4de2b32636371", - "is_secret": false, - "is_verified": false, - "line_number": 109, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "d11716ecfa623706b733654d78f4e7af3c117efa", - "is_secret": false, - "is_verified": false, - "line_number": 143, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "0cc93dfdf4ae08bc374b99af985b25d2427f71d8", - "is_secret": false, - "is_verified": false, - "line_number": 148, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "80f8d53f3fedde239f695d6a4c44c78b4aff0a44", - "is_secret": false, - "is_verified": false, - "line_number": 153, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "83307cb75a4a44ba528f4a0aefcec2a8018dc6d8", - "is_secret": false, - "is_verified": false, - "line_number": 158, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "c96d81662cc7919208154e7152fa0033391b7bcd", - "is_secret": false, - "is_verified": false, - "line_number": 166, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "7156492f40fb2479a45780b3d2959c29b27b6374", - "is_secret": false, - "is_verified": false, - "line_number": 181, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "885304335818f51938422166d361cddacfd626d0", - "is_secret": false, - "is_verified": false, - "line_number": 186, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "915ca894a8ec19ffcd55555e6c8daac1fe882751", - "is_secret": false, - "is_verified": false, - "line_number": 191, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "7ea379a1bf787a21401c8c39f285e4e84b478d72", - "is_secret": false, - "is_verified": false, - "line_number": 201, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "8e948a3b773d1a2e4b6f4220216efa734315246d", - "is_secret": false, - "is_verified": false, - "line_number": 209, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "1a321d0b0d9b6d75888ce7ae121ac222cec1eddd", - "is_secret": false, - "is_verified": false, - "line_number": 217, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "1a6bfe25744ad6c6ce27c3a52dbd98c15be12a5c", - "is_secret": false, - "is_verified": false, - "line_number": 222, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "04450eaacfa844f84926d04d6a07534cde99b28e", - "is_secret": false, - "is_verified": false, - "line_number": 227, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "b4c295435d09bbdfb91ced9040379166d67ccbd2", - "is_secret": false, - "is_verified": false, - "line_number": 232, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "bb2bf296d6e086b471d45a26af9fd57f55289a75", - "is_secret": false, - "is_verified": false, - "line_number": 237, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "9579b6a23d94d56f2f163233b716d8752e6b3bde", - "is_secret": false, - "is_verified": false, - "line_number": 256, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "796925256bc0f4dc43cdfab7fbff852eace18f42", - "is_secret": false, - "is_verified": false, - "line_number": 287, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "7e280af4ec2d573144d98e89ed2e1dfd817ca48f", - "is_secret": false, - "is_verified": false, - "line_number": 295, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "941b3e7836a6f26d32311893ac5d9ad0a52c45ca", - "is_secret": false, - "is_verified": false, - "line_number": 300, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "34743e1f7d9541c4a726b998f20baf828c694213", - "is_secret": false, + "hashed_secret": "0656ad0df3af4633dc369f13d5e8806973c5fd9d", "is_verified": false, - "line_number": 305, + "line_number": 1481, "type": "Base64 High Entropy String" }, { - "hashed_secret": "c4fea87bd49c4427d7215d57ada9ff3177e0c471", - "is_secret": false, + "hashed_secret": "00091d875d922437c5fc9e6067a08e78c2482e87", "is_verified": false, - "line_number": 310, + "line_number": 1489, "type": "Base64 High Entropy String" }, { - "hashed_secret": "85324324e21d0dfbfb5248ac92fa0f289d2e25f8", - "is_secret": false, + "hashed_secret": "c4e5cc37e115bf7d86e76e3d799705bf691e4d00", "is_verified": false, - "line_number": 315, + "line_number": 1521, "type": "Base64 High Entropy String" }, { - "hashed_secret": "19eea0e64f6a3311b04e472035df10c23f23dd0a", - "is_secret": false, + "hashed_secret": "0512e37fbedf1d16828680a038a241b4780a5c04", "is_verified": false, - "line_number": 352, + "line_number": 1547, "type": "Base64 High Entropy String" }, { - "hashed_secret": "acce4ef8d841ffa646256da3af7b79ad5cb78158", - "is_secret": false, + "hashed_secret": "01868fd50edbfe6eb91e5b01209b543adc6857af", "is_verified": false, - "line_number": 364, + "line_number": 1611, "type": "Base64 High Entropy String" }, { - "hashed_secret": "22e7ae9b65ade417baac61e6f0d84a54783ba759", - "is_secret": false, + "hashed_secret": "a6f48bf1e398deffc7fd31da17c3506b46c97a93", "is_verified": false, - "line_number": 369, + "line_number": 1640, "type": "Base64 High Entropy String" }, { - "hashed_secret": "8e71b7828c7c554f05dbbabddd63301b5fc56771", - "is_secret": false, + "hashed_secret": "85ce358dbdec0996cf3ccd2bf1c6602af68c181e", "is_verified": false, - "line_number": 374, + "line_number": 1648, "type": "Base64 High Entropy String" }, { - "hashed_secret": "fea0d9c5b0c53c41e6a0a961a49cccc170847120", + "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", "is_secret": false, "is_verified": false, - "line_number": 379, + "line_number": 1664, "type": "Base64 High Entropy String" }, { - "hashed_secret": "ebe2160ede628e0faeac9fe70c215cd38d28d8f6", - "is_secret": false, + "hashed_secret": "7098a3e6d6d2ec0a40f04fe12509c5c6f4c49c0e", "is_verified": false, - "line_number": 384, + "line_number": 1683, "type": "Base64 High Entropy String" }, { - "hashed_secret": "9cb2b0347722893cde39bbe83f9df7c3c6e1b7c3", - "is_secret": false, + "hashed_secret": "1664ad175bba1795a7ecad572bae7e0740b94f56", "is_verified": false, - "line_number": 398, + "line_number": 1733, "type": "Base64 High Entropy String" }, { - "hashed_secret": "344e37e02a35dd31cc7dc945b7fe7b2da88344c0", - "is_secret": false, + "hashed_secret": "1ec4ce2eb945ce2f816dcb6ebdd1e10247f439a3", "is_verified": false, - "line_number": 403, + "line_number": 1742, "type": "Base64 High Entropy String" }, { - "hashed_secret": "31a41817127c8d2b7b304c326b05d7319934e7a6", - "is_secret": false, + "hashed_secret": "a7af5768a6d936e36f28e1030d7f894d7aaf555e", "is_verified": false, - "line_number": 413, + "line_number": 1755, "type": "Base64 High Entropy String" }, { - "hashed_secret": "150852e9f1e877547306d59618a136fb535b40e3", - "is_secret": false, + "hashed_secret": "6fbc7dd864586173160874f2a86ca7d2d552cb85", "is_verified": false, - "line_number": 418, + "line_number": 1769, "type": "Base64 High Entropy String" }, { - "hashed_secret": "277e32c5ba00ef90c6f76c7004fde2ecac6d2e18", - "is_secret": false, + "hashed_secret": "81a961f2c89c6209328b74a8768e30fd76c3ac72", "is_verified": false, - "line_number": 423, + "line_number": 1855, "type": "Base64 High Entropy String" }, { - "hashed_secret": "b95e69c7f4328ea641952f875c3b079a1585c9d1", - "is_secret": false, + "hashed_secret": "797d4751c536c421cb82b9f62e0a804af30d78f5", "is_verified": false, - "line_number": 431, + "line_number": 1889, "type": "Base64 High Entropy String" }, { - "hashed_secret": "6b30fe731c8444c0263b57aacbdaedb771ec01a5", - "is_secret": false, + "hashed_secret": "0d55babfa89f240142c0adfc7b560500a1d3ae7c", "is_verified": false, - "line_number": 436, + "line_number": 1894, "type": "Base64 High Entropy String" }, { - "hashed_secret": "98eafa06e0c7e089c19e79dedf5989c3eb2f0568", + "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", "is_secret": false, "is_verified": false, - "line_number": 445, + "line_number": 1921, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bf47364c2d4ad0308ef016fe4a89f6c7dc21ef86", - "is_secret": false, + "hashed_secret": "4cf9419259c0ce8eee84b468af3c72db8b001620", "is_verified": false, - "line_number": 464, + "line_number": 1950, "type": "Base64 High Entropy String" }, { - "hashed_secret": "3e6c18abd5b90c63da0bd8b4c0d3a142e3d5a83d", - "is_secret": false, + "hashed_secret": "24816e3eb4308e247bde7c1d09ffb7b79c519b71", "is_verified": false, - "line_number": 474, + "line_number": 1983, "type": "Base64 High Entropy String" }, { - "hashed_secret": "209bf9cfe9000c6851cd4f94165d30ee1cd3dca1", - "is_secret": false, + "hashed_secret": "e9adfe8a333d45f4776fe0eab31608be5d7b6a7d", "is_verified": false, - "line_number": 482, + "line_number": 2004, "type": "Base64 High Entropy String" }, { - "hashed_secret": "cf09cb791688fe019284bfdc362abc41918645a5", - "is_secret": false, + "hashed_secret": "03d6fb388dd1b185129b14221f7127715822ece6", "is_verified": false, - "line_number": 487, + "line_number": 2013, "type": "Base64 High Entropy String" }, { - "hashed_secret": "6c1392daf02b9ba2a21c49c82508048525d5bc4b", - "is_secret": false, + "hashed_secret": "ee161bb3f899720f95cee50a5f9ef9c9ed96278b", "is_verified": false, - "line_number": 492, + "line_number": 2046, "type": "Base64 High Entropy String" }, { - "hashed_secret": "b4e2bf4f3a071b223da2f270d5a2348d65105d3e", - "is_secret": false, + "hashed_secret": "ebeb5b574fa1ed24a40248275e6136759e766466", "is_verified": false, - "line_number": 497, + "line_number": 2078, "type": "Base64 High Entropy String" }, { - "hashed_secret": "98d583792218c3c06ecbcac66e5bedcdaabd63e7", - "is_secret": false, + "hashed_secret": "a6a555a428522ccf439fd516ce7c7e269274363f", "is_verified": false, - "line_number": 507, + "line_number": 2083, "type": "Base64 High Entropy String" }, { - "hashed_secret": "575c9b4e0765ae6ab9a4f38eb1186ea361691f73", + "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", "is_secret": false, "is_verified": false, - "line_number": 514, + "line_number": 2111, "type": "Base64 High Entropy String" }, { - "hashed_secret": "16225dde2ec301d038a0bdbda68de4a174fbfdd0", - "is_secret": false, + "hashed_secret": "3f1646b60abe74297d2f37a1eee5dc771ad834fc", "is_verified": false, - "line_number": 519, + "line_number": 2138, "type": "Base64 High Entropy String" }, { - "hashed_secret": "80d73b6f7e87f07e3ae70ef1e692aa9569574551", - "is_secret": false, + "hashed_secret": "fd933c71e82d5519ae0cb0779b370d02f6935759", "is_verified": false, - "line_number": 524, + "line_number": 2143, "type": "Base64 High Entropy String" }, { - "hashed_secret": "38952752ebde485c02a80bff1d81ebe95664bcca", - "is_secret": false, + "hashed_secret": "7090aa59cb52ad1f1810b08c4ac1ddf5c8fce523", "is_verified": false, - "line_number": 529, + "line_number": 2150, "type": "Base64 High Entropy String" }, { - "hashed_secret": "150b60d278251f2470dd690016afe038bc1bb7f1", - "is_secret": false, + "hashed_secret": "756444bea4ea3d67844d8ddf58ad32356e9c2430", "is_verified": false, - "line_number": 534, + "line_number": 2188, "type": "Base64 High Entropy String" }, { - "hashed_secret": "535582d92da3a4158e592ec29868bfd8467b8bce", - "is_secret": false, + "hashed_secret": "f74135fdd6b8dafdfb01ebbc61c5e5c24ee27cf8", "is_verified": false, - "line_number": 539, + "line_number": 2291, "type": "Base64 High Entropy String" }, { - "hashed_secret": "23b096d9b48ed5d9a778d3db5807c5c7a2357c93", - "is_secret": false, + "hashed_secret": "56fbae787f4aed7d0632e95840d71bd378d3a36f", "is_verified": false, - "line_number": 544, + "line_number": 2303, "type": "Base64 High Entropy String" }, { - "hashed_secret": "127f92724797904fb4e6de2dfff2c71c07739612", - "is_secret": false, + "hashed_secret": "81cb6be182eb79444202c4563080aee75296a672", "is_verified": false, - "line_number": 549, + "line_number": 2308, "type": "Base64 High Entropy String" }, { - "hashed_secret": "f74b21c2fc87ad48118b3723372ecfe25aaae730", - "is_secret": false, + "hashed_secret": "f0f3f7bce32184893046ac5f8cc80da56c3ca539", "is_verified": false, - "line_number": 559, + "line_number": 2317, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bc788b9febb8e95114c2e78a9d5297f80bbedb2c", - "is_secret": false, + "hashed_secret": "097893233346336f4003acfb6eb173ee59e648f0", "is_verified": false, - "line_number": 564, + "line_number": 2327, "type": "Base64 High Entropy String" }, { - "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", - "is_secret": false, + "hashed_secret": "bb14c3b4ef4a9f2e86ffdd44b88d9b6729419671", "is_verified": false, - "line_number": 575, + "line_number": 2332, "type": "Base64 High Entropy String" }, { - "hashed_secret": "36a64bd1be32f031420a87c448636720426e0072", - "is_secret": false, + "hashed_secret": "71344a35cff67ef081920095d1406601fb5e9b97", "is_verified": false, - "line_number": 580, + "line_number": 2340, "type": "Base64 High Entropy String" }, { - "hashed_secret": "06a3dc8802aa9b4f2f48ad081cbe64482ce9f491", - "is_secret": false, + "hashed_secret": "eb3db6990fd43477a35dfeffc90b3f1ffa83c7bd", "is_verified": false, - "line_number": 585, + "line_number": 2349, "type": "Base64 High Entropy String" }, { - "hashed_secret": "6c8453f18e4aa0280d847454c9a803c12e2d14d7", - "is_secret": false, + "hashed_secret": "266288bdc14807b538d1e48a5891e361fa9b4a14", "is_verified": false, - "line_number": 590, + "line_number": 2357, "type": "Base64 High Entropy String" }, { - "hashed_secret": "3df46004e168f8d8e3422adfbf0b7c237a41f437", - "is_secret": false, + "hashed_secret": "800477261175fd21f23e7321923e1fba6ae55471", "is_verified": false, - "line_number": 595, + "line_number": 2369, "type": "Base64 High Entropy String" }, { - "hashed_secret": "5c270f653b2fcd5b7c700b53f8543df4147a4aba", - "is_secret": false, - "is_verified": false, - "line_number": 600, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "98a159a135963e5e65a546879c332b2c3942aec3", - "is_secret": false, - "is_verified": false, - "line_number": 605, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "58d846ede841bbec0d67a42d03426806635fee2f", - "is_secret": false, - "is_verified": false, - "line_number": 610, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "23e42656fba130d56c20abddb94b6b7bfcad69a8", - "is_secret": false, - "is_verified": false, - "line_number": 618, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "f883f0bd87d8455814f491e2067bd3f62454c7c2", - "is_secret": false, - "is_verified": false, - "line_number": 623, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "8ece0f01da9189bae69a60da116040400bbc10e5", - "is_secret": false, - "is_verified": false, - "line_number": 628, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "75a3c0b9934bd460ff7af9763edb25d749ab7b4e", - "is_secret": false, - "is_verified": false, - "line_number": 633, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "baac57cb314beab87420d1da6906a1d2377c7d73", - "is_secret": false, - "is_verified": false, - "line_number": 638, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "d0a953de593a0a7b26b925a6476d8382cd31cb0e", - "is_secret": false, - "is_verified": false, - "line_number": 654, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "8b15238d25347ab18f4cbbe191de9aed597c8ea4", - "is_secret": false, - "is_verified": false, - "line_number": 659, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "1e2ab7a2fd9b6afcbe08afcb9dc652b76cf367d8", - "is_secret": false, - "is_verified": false, - "line_number": 668, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "ae745d719f97b3ddb9791348b1f29ff8208c0c5c", - "is_secret": false, - "is_verified": false, - "line_number": 676, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "b72a53c8bebd6540eeffeba5b0c28965bbb2a664", - "is_secret": false, - "is_verified": false, - "line_number": 681, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "97cbb7fbdfe498c80489e26bcdc78fce5db9b258", - "is_secret": false, - "is_verified": false, - "line_number": 686, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "bc98c415b1c6ee93adf8e97a4a536b6342337c19", - "is_secret": false, - "is_verified": false, - "line_number": 691, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "5a6baaacb03a030567b857cb8cfe440407e6385e", - "is_secret": false, - "is_verified": false, - "line_number": 696, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "e55a8322e5c7485be2f721155d9ed15afc586a4c", - "is_secret": false, - "is_verified": false, - "line_number": 705, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "47709a15a1b02a87f65dfcd5f3e78e0d2206c95f", - "is_secret": false, - "is_verified": false, - "line_number": 710, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "5782d0f39536b22f2c6aa29d3b815a57f43e4800", - "is_secret": false, - "is_verified": false, - "line_number": 719, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "401f90e6afa890c5ee44071351e4a149e7c1f5e0", - "is_secret": false, - "is_verified": false, - "line_number": 724, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "51f38b23af543da8b637a3bd62f5fb2c460e3b3d", - "is_secret": false, - "is_verified": false, - "line_number": 729, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "8287678ab8009ae16b02930c9e260d1f28578fbe", - "is_secret": false, - "is_verified": false, - "line_number": 734, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "d4c050e6914eb68a5c657fb8bb09f6ac5eae1e86", - "is_secret": false, - "is_verified": false, - "line_number": 739, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "922ac7db4914c20910496a41c474631928d6c2f2", - "is_secret": false, - "is_verified": false, - "line_number": 750, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", - "is_secret": false, - "is_verified": false, - "line_number": 771, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "d7966031d8525b080d7234049cbb040ac9a3f908", - "is_secret": false, - "is_verified": false, - "line_number": 798, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "ff3d359d573f78d89424e03ec8688eee19305f9f", - "is_secret": false, - "is_verified": false, - "line_number": 808, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "949b4ff40f26797f9290fe46eaa8691caef5c5ab", - "is_secret": false, - "is_verified": false, - "line_number": 817, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "ce4ea19f66e9140bdb497b19c6ae94c32ee565f0", - "is_secret": false, - "is_verified": false, - "line_number": 825, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "f6368525e9e22577efc8d8b737794e845958ba92", - "is_secret": false, - "is_verified": false, - "line_number": 834, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "1508bbaf29927b5348d4df62823dab122a0d3b48", - "is_secret": false, - "is_verified": false, - "line_number": 839, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "12917e7235ce486ca51a296b896afa5e3b4fda54", - "is_secret": false, - "is_verified": false, - "line_number": 844, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "49e05eb75fd04d8f44cf235d4e8eddc30a2b93e5", - "is_secret": false, - "is_verified": false, - "line_number": 849, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "aa8ea120ddc5aaa27cb02e0b04ac1c53b249a724", - "is_secret": false, - "is_verified": false, - "line_number": 869, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "b3e00452fd69737cc747d0661fa3b3949a4a0805", - "is_secret": false, - "is_verified": false, - "line_number": 876, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "af2ceb518ddc689b0e2a03ffebb64d4499817c17", - "is_secret": false, - "is_verified": false, - "line_number": 887, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "7da94b235f996b5c65b66c3e70b5eeaf97bab5d4", - "is_secret": false, - "is_verified": false, - "line_number": 892, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "f8363d7113ba35fd06b33afe20c8ad21a3202197", - "is_secret": false, - "is_verified": false, - "line_number": 900, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "6902b24068ea12c3a3e31596614aa6fa0fba3c39", - "is_secret": false, - "is_verified": false, - "line_number": 908, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "2c732c0a0dccfc1588888172188ce9a1abb7166e", - "is_secret": false, - "is_verified": false, - "line_number": 916, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "c59aac9ab2704f627d29c762e716ba84b15be3f1", - "is_secret": false, - "is_verified": false, - "line_number": 921, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "20249a3c96028e5ad19143d86ec5d2ee233935ed", - "is_secret": false, - "is_verified": false, - "line_number": 937, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "2a57a9814486d6f83257ec94e65d1024819611b8", - "is_secret": false, - "is_verified": false, - "line_number": 942, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "d5e822897b1f37e6ce1a864e2ba9af8f9bfc5539", - "is_secret": false, - "is_verified": false, - "line_number": 950, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "dbee1beb29275ad50ef0a68067ca144985beca2c", - "is_secret": false, - "is_verified": false, - "line_number": 957, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "b0cb4b5554183f2c7bc1ca25d902db5769798a7a", - "is_secret": false, - "is_verified": false, - "line_number": 962, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "29f79b77802802c5ae2d3c2acb9179280de37914", - "is_secret": false, - "is_verified": false, - "line_number": 967, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "18469023a89dd192b5275d8b955c9fd2202e0c03", - "is_secret": false, - "is_verified": false, - "line_number": 983, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "0d3ce7468071b4e48ba9cd014ade7037dc57ef41", - "is_secret": false, - "is_verified": false, - "line_number": 991, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "955d2d24c472b4eb0b4488f935a0f65e38001df8", - "is_secret": false, - "is_verified": false, - "line_number": 996, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "42e05c82cd06a9ed1d15e0f472c2efc4b3254cae", - "is_secret": false, - "is_verified": false, - "line_number": 1010, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "7a87fb248397359e9c6ca6e46f39805789059102", - "is_secret": false, - "is_verified": false, - "line_number": 1018, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "7fbf450bf4ee54f013454f70af3a9743c0909f54", - "is_secret": false, - "is_verified": false, - "line_number": 1034, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "df8e0babfad52a541f6e470cf3a143402c2c2a1e", - "is_secret": false, - "is_verified": false, - "line_number": 1039, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", - "is_secret": false, - "is_verified": false, - "line_number": 1044, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "9e897caf5658aea914e1034f46663cadb5a76348", - "is_secret": false, - "is_verified": false, - "line_number": 1054, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "3aec99f39b829f94874ccd0a0d90315c6690cb94", - "is_secret": false, - "is_verified": false, - "line_number": 1064, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "eca5fc6e4f5f895143d3fcedefc42dfe6e79f918", - "is_secret": false, - "is_verified": false, - "line_number": 1069, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "307a947aa422c67fdefb07178198a004fb2c0d94", - "is_secret": false, - "is_verified": false, - "line_number": 1074, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "0ba2fc9a137313ae1fdda2b5476dedf0595bda3a", - "is_secret": false, + "hashed_secret": "3f0c251b9c2c21454445a98fde6915ceacde2136", "is_verified": false, - "line_number": 1083, + "line_number": 2387, "type": "Base64 High Entropy String" } ], diff --git a/gen3/bin/kube-setup-workvm.sh b/gen3/bin/kube-setup-workvm.sh index 4b47be0fa..fd4d9206e 100644 --- a/gen3/bin/kube-setup-workvm.sh +++ b/gen3/bin/kube-setup-workvm.sh @@ -6,6 +6,7 @@ # s3_bucket="${s3_bucket:-${2:-unknown}}" +export DEBIAN_FRONTEND=noninteractive # Make it easy to run this directly ... _setup_workvm_dir="$(dirname -- "${BASH_SOURCE:-$0}")" @@ -29,15 +30,16 @@ fi if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then # -E passes through *_proxy environment - sudo -E apt-get update - sudo -E apt-get install -y git jq pwgen python-dev python-pip unzip python3-dev python3-pip python3-venv + gen3_log_info "Install git jq pwgen unzip python3-dev python3-pip python3-venv libpq-dev apt-transport-https ca-certificates gnupg apt-utils" + sudo -E apt-get update -qq + sudo -E apt-get install -qq -y git jq pwgen unzip python3-dev python3-pip python3-venv libpq-dev apt-transport-https ca-certificates gnupg apt-utils > /dev/null ( # subshell # install aws cli v2 - https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html # increase min version periodically - see https://github.com/aws/aws-cli/blob/v2/CHANGELOG.rst update_awscli() { local version="0.0.0" - if aws --version; then + if aws --version > /dev/null 2>&1; then version="$(aws --version | awk '{ print $1 }' | awk -F / '{ print $2 }')" fi if semver_ge "$version" "2.7.0"; then @@ -46,6 +48,7 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then fi # update to latest version ( # subshell + gen3_log_info "Installing aws cli" export DEBIAN_FRONTEND=noninteractive if [[ -f /usr/local/bin/aws ]] && ! semver_ge "$version" "2.7.0"; then sudo rm /usr/local/bin/aws @@ -54,13 +57,14 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then temp_dir="aws_install-$(date +%m%d%Y)" mkdir $temp_dir cd $temp_dir - curl -o awscli.zip https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip - unzip awscli.zip + curl -s -o awscli.zip https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m || "x86_64").zip + unzip -qq awscli.zip if semver_ge "$version" "2.7.0"; then yes | sudo ./aws/install --update else yes | sudo ./aws/install fi + aws --version # cleanup cd $HOME rm -rf $temp_dir @@ -70,18 +74,23 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then update_awscli ) - sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install --upgrade pip + gen3_log_info "Upgrading pip.." + sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install -q --upgrade pip + + gen3_log_info "Installing jinja2 via pip" + # jinja2 needed by render_creds.py - sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install jinja2 - # yq === jq for yaml - sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install yq + sudo -E XDG_CACHE_HOME=/var/cache python3 -m pip install -q jinja2 yq --ignore-installed + # install nodejs - if ! which node > /dev/null 2>&1; then - curl -sL https://deb.nodesource.com/setup_12.x | sudo -E bash - - sudo -E apt-get update - sudo -E apt-get install -y nodejs - fi + gen3_log_info "Install node js 16" + curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash - > /dev/null + sudo apt install -qq -y nodejs > /dev/null + + gen3_log_info "Node: Version $(node --version)" + + if [[ ! -f /etc/apt/sources.list.d/google-cloud-sdk.list ]]; then # might need to uninstall gcloud installed from ubuntu repo if which gcloud > /dev/null 2>&1; then @@ -89,7 +98,8 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then fi fi if ! which psql > /dev/null 2>&1; then - ( + ( + gen3_log_info "Install postgres-client" # use the postgres dpkg server # https://www.postgresql.org/download/linux/ubuntu/ DISTRO="$(lsb_release -c -s)" # ex - xenial @@ -97,32 +107,31 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" | sudo tee /etc/apt/sources.list.d/pgdg.list fi wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - - sudo -E apt-get update - sudo -E apt-get install -y postgresql-client-13 + sudo -E apt-get -qq update + sudo -E apt-get install -qq -y postgresql-client-13 > /dev/null ) fi - # gen3sdk currently requires this - sudo -E apt-get install -y libpq-dev + if ! which gcloud > /dev/null 2>&1; then ( - export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" - sudo -E bash -c "echo 'deb https://packages.cloud.google.com/apt $CLOUD_SDK_REPO main' > /etc/apt/sources.list.d/google-cloud-sdk.list" - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo -E apt-key add - - sudo -E apt-get update - sudo -E apt-get install -y google-cloud-sdk \ - google-cloud-sdk-cbt + gen3_log_info "Install google cloud cli" + sudo -E bash -c "echo 'deb https://packages.cloud.google.com/apt cloud-sdk main' | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list" + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo -E apt-key add - + curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key --keyring /usr/share/keyrings/cloud.google.gpg add - + sudo -E apt-get update -qq + sudo -E apt-get install -qq -y google-cloud-sdk \ + google-cloud-sdk-cbt > /dev/null + ) + fi - k8s_server_version=$(kubectl version --short | awk -F[v.] '/Server/ {print $3"."$4}') - if [[ ! -z "${k8s_server_version// }" ]]; then - # install kubectl - install_version=$(apt-cache madison kubectl | awk '$3 ~ /'$k8s_server_version'/ {print $3}'| head -n 1) - gen3_log_info "Installing kubectl version $install_version" - sudo -E apt-get install -y kubectl=$install_version --allow-downgrades - else - # install kubectl - sudo -E apt-get install -y kubectl=1.21.14-00 --allow-downgrades + if ! which kubectl > /dev/null 2>&1; then + gen3_log_info "Installing kubectl" + sudo -E apt-get install -qq -y kubectl > /dev/null + else + gen3_log_info "Upgrading kubectl" + sudo -E apt-get upgrade -qq -y kubectl > /dev/null fi mkdir -p ~/.config @@ -130,26 +139,29 @@ if sudo -n true > /dev/null 2>&1 && [[ $(uname -s) == "Linux" ]]; then ( # in a subshell - install terraform install_terraform() { - curl -o "${XDG_RUNTIME_DIR}/terraform.zip" https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip + gen3_log_info "Installing terraform 0.11" + curl -s -o "${XDG_RUNTIME_DIR}/terraform.zip" https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip sudo /bin/rm -rf /usr/local/bin/terraform > /dev/null 2>&1 || true - sudo unzip "${XDG_RUNTIME_DIR}/terraform.zip" -d /usr/local/bin; + sudo unzip -qq "${XDG_RUNTIME_DIR}/terraform.zip" -d /usr/local/bin; /bin/rm "${XDG_RUNTIME_DIR}/terraform.zip" } install_terraform12() { + gen3_log_info "Installing terraform 0.12" mkdir "${XDG_RUNTIME_DIR}/t12" - curl -o "${XDG_RUNTIME_DIR}/t12/terraform12.zip" https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip + curl -s -o "${XDG_RUNTIME_DIR}/t12/terraform12.zip" https://releases.hashicorp.com/terraform/0.12.31/terraform_0.12.31_linux_amd64.zip sudo /bin/rm -rf /usr/local/bin/terraform12 > /dev/null 2>&1 || true - unzip "${XDG_RUNTIME_DIR}/t12/terraform12.zip" -d "${XDG_RUNTIME_DIR}/t12"; + unzip -qq "${XDG_RUNTIME_DIR}/t12/terraform12.zip" -d "${XDG_RUNTIME_DIR}/t12"; sudo cp "${XDG_RUNTIME_DIR}/t12/terraform" "/usr/local/bin/terraform12" /bin/rm -rf "${XDG_RUNTIME_DIR}/t12" } install_terraform1.2() { + gen3_log_info "Installing terraform 1.2" mkdir "${XDG_RUNTIME_DIR}/t1.2" - curl -o "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" https://releases.hashicorp.com/terraform/1.2.3/terraform_1.2.3_linux_amd64.zip + curl -s -o "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" https://releases.hashicorp.com/terraform/1.2.3/terraform_1.2.3_linux_amd64.zip sudo /bin/rm -rf /usr/local/bin/terraform1.2 > /dev/null 2>&1 || true - unzip "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" -d "${XDG_RUNTIME_DIR}/t1.2"; + unzip -qq "${XDG_RUNTIME_DIR}/t1.2/terraform1.2.zip" -d "${XDG_RUNTIME_DIR}/t1.2"; sudo cp "${XDG_RUNTIME_DIR}/t1.2/terraform" "/usr/local/bin/terraform1.2" /bin/rm -rf "${XDG_RUNTIME_DIR}/t1.2" } @@ -213,8 +225,9 @@ EOM ) fi if ! which packer > /dev/null 2>&1; then - curl -o "${XDG_RUNTIME_DIR}/packer.zip" https://releases.hashicorp.com/packer/1.5.1/packer_1.5.1_linux_amd64.zip - sudo unzip "${XDG_RUNTIME_DIR}/packer.zip" -d /usr/local/bin + gen3_log_info "Installing packer" + curl -s -o "${XDG_RUNTIME_DIR}/packer.zip" https://releases.hashicorp.com/packer/1.5.1/packer_1.5.1_linux_amd64.zip + sudo unzip -qq "${XDG_RUNTIME_DIR}/packer.zip" -d /usr/local/bin /bin/rm "${XDG_RUNTIME_DIR}/packer.zip" fi # https://docs.aws.amazon.com/eks/latest/userguide/install-aws-iam-authenticator.html @@ -222,23 +235,16 @@ EOM ( gen3_log_info "installing aws-iam-authenticator" cd /usr/local/bin - sudo curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator + sudo curl -s -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.8/2020-09-18/bin/linux/amd64/aws-iam-authenticator sudo chmod a+rx ./aws-iam-authenticator - sudo rm /usr/local/bin/heptio-authenticator-aws || true - # link heptio-authenticator-aws for backward compatability with old scripts - sudo ln -s /usr/local/bin/aws-iam-authenticator heptio-authenticator-aws ) fi ( # in a subshell install helm install_helm() { helm_release_URL="https://get.helm.sh/helm-v3.4.0-linux-amd64.tar.gz" - curl -o "${XDG_RUNTIME_DIR}/helm.tar.gz" ${helm_release_URL} + curl -s -o "${XDG_RUNTIME_DIR}/helm.tar.gz" ${helm_release_URL} tar xf "${XDG_RUNTIME_DIR}/helm.tar.gz" -C ${XDG_RUNTIME_DIR} sudo mv -f "${XDG_RUNTIME_DIR}/linux-amd64/helm" /usr/local/bin - - # helm3 has no default repo, need to add it manually - helm repo add stable https://charts.helm.sh/stable --force-update - helm repo update } migrate_helm() { @@ -354,3 +360,5 @@ fi npm install || true fi ) + +source ${WORKSPACE}/.${RC_FILE} \ No newline at end of file diff --git a/package-lock.json b/package-lock.json index f88a979b5..69c298911 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,14 +1,445 @@ { "name": "@gen3/cloud-automation", "version": "1.0.0", - "lockfileVersion": 1, + "lockfileVersion": 2, "requires": true, - "dependencies": { - "@fast-csv/format": { + "packages": { + "": { + "name": "@gen3/cloud-automation", + "version": "1.0.0", + "license": "Apache-2.0", + "dependencies": { + "ansi-regex": "^6.0.1", + "async": "^3.2.2", + "aws-sdk": "^2.814.0", + "elasticdump": "^6.84.1", + "express": "^4.17.1", + "json-schema": "^0.4.0", + "minimatch": "^3.0.5", + "minimist": "^1.2.6", + "requestretry": "^7.0.0" + }, + "devDependencies": {} + }, + "node_modules/accepts": { + "version": "1.3.8", + "license": "MIT", + "dependencies": { + "mime-types": "~2.1.34", + "negotiator": "0.6.3" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/ajv": {}, + "node_modules/ansi-regex": { + "version": "6.0.1", + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/array-flatten": { + "version": "1.1.1", + "license": "MIT" + }, + "node_modules/assert-plus": { + "version": "1.0.0", + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/async": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/async/-/async-3.2.4.tgz", + "integrity": "sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "license": "MIT" + }, + "node_modules/aws-sdk": { + "version": "2.1273.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1273.0.tgz", + "integrity": "sha512-QF37fm1DfUxjw+IJtDMTDBckVwAOf8EHQjs4NxJp5TtRkeqtWkxNzq/ViI8kAS+0n8JZaom8Oenmy8ufGfLMAQ==", + "dependencies": { + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.16.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "util": "^0.12.4", + "uuid": "8.0.0", + "xml2js": "0.4.19" + }, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/aws-sign2": { + "version": "0.7.0", + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/aws4": { + "version": "1.11.0", + "license": "MIT" + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/big.js": { + "version": "5.2.2", + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/body-parser": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", + "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "dependencies": { + "bytes": "3.1.2", + "content-type": "~1.0.4", + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.1", + "type-is": "~1.6.18", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/buffer": { + "version": "4.9.2", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-4.9.2.tgz", + "integrity": "sha512-xq+q3SRMOxGivLhBNaUdC64hDTQwejJ+H0T/NB1XMtTVEwNTrfFF3gAxiyW0Bu/xWEGhjVKgUcMhCrUy2+uCWg==", + "dependencies": { + "base64-js": "^1.0.2", + "ieee754": "^1.1.4", + "isarray": "^1.0.0" + } + }, + "node_modules/buffer-queue": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/bytes": { + "version": "3.1.2", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/caseless": { + "version": "0.12.0", + "license": "Apache-2.0" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "license": "MIT" + }, + "node_modules/content-disposition": { + "version": "0.5.4", + "license": "MIT", + "dependencies": { + "safe-buffer": "5.2.1" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/content-type": { + "version": "1.0.4", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie": { + "version": "0.5.0", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/cookie-signature": { + "version": "1.0.6", + "license": "MIT" + }, + "node_modules/debug": { + "version": "2.6.9", + "license": "MIT", + "dependencies": { + "ms": "2.0.0" + } + }, + "node_modules/delay": { + "version": "5.0.0", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/depd": { + "version": "2.0.0", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/destroy": { + "version": "1.2.0", + "license": "MIT", + "engines": { + "node": ">= 0.8", + "npm": "1.2.8000 || >= 1.4.16" + } + }, + "node_modules/ee-first": { + "version": "1.1.1", + "license": "MIT" + }, + "node_modules/elasticdump": { + "version": "6.94.1", + "resolved": "https://registry.npmjs.org/elasticdump/-/elasticdump-6.94.1.tgz", + "integrity": "sha512-VThINQBW1MG7k7oVGndPBXCL6cFSfByu2EZo0gch9l7voyv1FfxyrIp9cZ5Ft9Vwygjh7sXSomnWaQ+qzmkfKA==", + "dependencies": { + "async": "^2.6.4", + "aws-sdk": "2.1122.0", + "aws4": "^1.11.0", + "big.js": "^5.2.2", + "bytes": "^3.1.2", + "delay": "^5.0.0", + "extends-classes": "1.0.5", + "fast-csv": "4.3.6", + "http-status": "^1.5.1", + "ini": "^2.0.0", + "JSONStream": "^1.3.5", + "lodash": "^4.17.21", + "lossless-json": "^1.0.5", + "minimist": "^1.2.6", + "p-queue": "^6.6.2", + "request": "2.88.2", + "requestretry": "^7.1.0", + "s3-stream-upload": "2.0.2", + "s3urls": "^1.5.2", + "semver": "5.7.1", + "socks5-http-client": "^1.0.4", + "socks5-https-client": "^1.2.1" + }, + "bin": { + "elasticdump": "bin/elasticdump", + "multielasticdump": "bin/multielasticdump" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/elasticdump/node_modules/async": { + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", + "dependencies": { + "lodash": "^4.17.14" + } + }, + "node_modules/elasticdump/node_modules/aws-sdk": { + "version": "2.1122.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1122.0.tgz", + "integrity": "sha512-545VawhsCQ7yEx9jZKV0hTTW3FS/waycISWMvnNwqRfpU9o4FQ4DSu3je7ekn5yFKM+91dxJC+IfJgtIV8WaUw==", + "dependencies": { + "buffer": "4.9.2", + "events": "1.1.1", + "ieee754": "1.1.13", + "jmespath": "0.16.0", + "querystring": "0.2.0", + "sax": "1.2.1", + "url": "0.10.3", + "uuid": "3.3.2", + "xml2js": "0.4.19" + }, + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/elasticdump/node_modules/uuid": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", + "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/encodeurl": { + "version": "1.0.2", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/escape-html": { + "version": "1.0.3", + "license": "MIT" + }, + "node_modules/etag": { + "version": "1.8.1", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/events": { + "version": "1.1.1", + "license": "MIT", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/express": { + "version": "4.18.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", + "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "dependencies": { + "accepts": "~1.3.8", + "array-flatten": "1.1.1", + "body-parser": "1.20.1", + "content-disposition": "0.5.4", + "content-type": "~1.0.4", + "cookie": "0.5.0", + "cookie-signature": "1.0.6", + "debug": "2.6.9", + "depd": "2.0.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "finalhandler": "1.2.0", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "merge-descriptors": "1.0.1", + "methods": "~1.1.2", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "path-to-regexp": "0.1.7", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", + "range-parser": "~1.2.1", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "type-is": "~1.6.18", + "utils-merge": "1.0.1", + "vary": "~1.1.2" + }, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "license": "MIT" + }, + "node_modules/extends-classes": { + "version": "1.0.5", + "license": "MIT", + "dependencies": { + "method-missing": "^1.1.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/extsprintf": { + "version": "1.3.0", + "engines": [ + "node >=0.6.0" + ], + "license": "MIT" + }, + "node_modules/fast-csv": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/fast-csv/-/fast-csv-4.3.6.tgz", + "integrity": "sha512-2RNSpuwwsJGP0frGsOmTb9oUF+VkFSM4SyLTDgwf2ciHWTarN0lQTC+F2f/t5J9QjW+c65VFIAAu85GsvMIusw==", + "dependencies": { + "@fast-csv/format": "4.3.5", + "@fast-csv/parse": "4.3.6" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/fast-csv/node_modules/@fast-csv/format": { "version": "4.3.5", "resolved": "https://registry.npmjs.org/@fast-csv/format/-/format-4.3.5.tgz", "integrity": "sha512-8iRn6QF3I8Ak78lNAa+Gdl5MJJBM5vRHivFtMRUWINdevNo00K7OXxS2PshawLKTejVwieIlPmK5YlLu6w4u8A==", - "requires": { + "dependencies": { "@types/node": "^14.0.1", "lodash.escaperegexp": "^4.1.2", "lodash.isboolean": "^3.0.3", @@ -17,11 +448,11 @@ "lodash.isnil": "^4.0.0" } }, - "@fast-csv/parse": { + "node_modules/fast-csv/node_modules/@fast-csv/parse": { "version": "4.3.6", "resolved": "https://registry.npmjs.org/@fast-csv/parse/-/parse-4.3.6.tgz", "integrity": "sha512-uRsLYksqpbDmWaSmzvJcuApSEe38+6NQZBUsuAyMZKqHxH0g1wcJgsKUvN3WC8tewaqFjBMMGrkHmC+T7k8LvA==", - "requires": { + "dependencies": { "@types/node": "^14.0.1", "lodash.escaperegexp": "^4.1.2", "lodash.groupby": "^4.6.0", @@ -31,62 +462,1018 @@ "lodash.uniq": "^4.5.0" } }, - "@types/node": { - "version": "14.18.21", - "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.21.tgz", - "integrity": "sha512-x5W9s+8P4XteaxT/jKF0PSb7XEvo5VmqEWgsMlyeY4ZlLK8I6aH6g5TPPyDlLAep+GYf4kefb7HFyc7PAO3m+Q==" + "node_modules/fast-csv/node_modules/@types/node": { + "version": "14.18.34", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.34.tgz", + "integrity": "sha512-hcU9AIQVHmPnmjRK+XUUYlILlr9pQrsqSrwov/JK1pnf3GTQowVBhx54FbvM0AU/VXGH4i3+vgXS5EguR7fysA==" }, - "JSONStream": { - "version": "1.3.5", - "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz", - "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==", - "requires": { - "jsonparse": "^1.2.0", - "through": ">=2.2.7 <3" + "node_modules/finalhandler": { + "version": "1.2.0", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "on-finished": "2.4.1", + "parseurl": "~1.3.3", + "statuses": "2.0.1", + "unpipe": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/for-each": { + "version": "0.3.3", + "license": "MIT", + "dependencies": { + "is-callable": "^1.1.3" + } + }, + "node_modules/forever-agent": { + "version": "0.6.1", + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/form-data": { + "version": "2.3.3", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.6", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 0.12" + } + }, + "node_modules/forwarded": { + "version": "0.2.0", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/fresh": { + "version": "0.5.2", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/function-bind": { + "version": "1.1.1", + "license": "MIT" + }, + "node_modules/get-intrinsic": {}, + "node_modules/gopd": { + "version": "1.0.1", + "license": "MIT", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/har-schema": { + "version": "2.0.0", + "license": "ISC", + "engines": { + "node": ">=4" + } + }, + "node_modules/har-validator": { + "version": "5.1.5", + "license": "MIT", + "dependencies": { + "ajv": "^6.12.3", + "har-schema": "^2.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/has": { + "version": "1.0.3", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.1" + }, + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/http-errors": { + "version": "2.0.0", + "license": "MIT", + "dependencies": { + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/http-signature": { + "version": "1.2.0", + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + }, + "engines": { + "node": ">=0.8", + "npm": ">=1.3.7" + } + }, + "node_modules/http-status": { + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/http-status/-/http-status-1.5.3.tgz", + "integrity": "sha512-jCClqdnnwigYslmtfb28vPplOgoiZ0siP2Z8C5Ua+3UKbx410v+c+jT+jh1bbI4TvcEySuX0vd/CfFZFbDkJeQ==", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.1.13", + "license": "BSD-3-Clause" + }, + "node_modules/inherits": { + "version": "2.0.4", + "license": "ISC" + }, + "node_modules/ini": { + "version": "2.0.0", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/ip-address": {}, + "node_modules/ipaddr.js": { + "version": "1.9.1", + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/is-callable": {}, + "node_modules/is-typedarray": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/isarray": { + "version": "1.0.0", + "license": "MIT" + }, + "node_modules/isstream": { + "version": "0.1.2", + "license": "MIT" + }, + "node_modules/jmespath": { + "version": "0.16.0", + "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.16.0.tgz", + "integrity": "sha512-9FzQjJ7MATs1tSpnco1K6ayiYE3figslrXA72G2HQ/n76RzvYlofyi5QM+iX4YRs/pu3yzxlVQSST23+dMDknw==", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/json-schema": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", + "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" + }, + "node_modules/json-stringify-safe": { + "version": "5.0.1", + "license": "ISC" + }, + "node_modules/jsonparse": { + "version": "1.3.1", + "engines": [ + "node >= 0.2.0" + ], + "license": "MIT" + }, + "node_modules/JSONStream": { + "version": "1.3.5", + "license": "(MIT OR Apache-2.0)", + "dependencies": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + }, + "bin": { + "JSONStream": "bin.js" + }, + "engines": { + "node": "*" + } + }, + "node_modules/jsprim": { + "version": "1.4.2", + "license": "MIT", + "dependencies": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.4.0", + "verror": "1.10.0" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.escaperegexp": { + "version": "4.1.2", + "license": "MIT" + }, + "node_modules/lodash.groupby": { + "version": "4.6.0", + "license": "MIT" + }, + "node_modules/lodash.isboolean": { + "version": "3.0.3", + "license": "MIT" + }, + "node_modules/lodash.isequal": { + "version": "4.5.0", + "license": "MIT" + }, + "node_modules/lodash.isfunction": { + "version": "3.0.9", + "license": "MIT" + }, + "node_modules/lodash.isnil": { + "version": "4.0.0", + "license": "MIT" + }, + "node_modules/lodash.isundefined": { + "version": "3.0.1", + "license": "MIT" + }, + "node_modules/lodash.uniq": { + "version": "4.5.0", + "license": "MIT" + }, + "node_modules/lossless-json": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/lossless-json/-/lossless-json-1.0.5.tgz", + "integrity": "sha512-RicKUuLwZVNZ6ZdJHgIZnSeA05p8qWc5NW0uR96mpPIjN9WDLUg9+kj1esQU1GkPn9iLZVKatSQK5gyiaFHgJA==" + }, + "node_modules/media-typer": { + "version": "0.3.0", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/merge-descriptors": { + "version": "1.0.1", + "license": "MIT" + }, + "node_modules/method-missing": { + "version": "1.2.4", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/methods": { + "version": "1.1.2", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime": { + "version": "1.6.0", + "license": "MIT", + "bin": { + "mime": "cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/mime-db": {}, + "node_modules/mime-types": { + "version": "2.1.35", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/ms": { + "version": "2.0.0", + "license": "MIT" + }, + "node_modules/negotiator": {}, + "node_modules/oauth-sign": { + "version": "0.9.0", + "license": "Apache-2.0", + "engines": { + "node": "*" + } + }, + "node_modules/on-finished": { + "version": "2.4.1", + "license": "MIT", + "dependencies": { + "ee-first": "1.1.1" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/p-finally": { + "version": "1.0.0", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/p-queue": { + "version": "6.6.2", + "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-6.6.2.tgz", + "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", + "dependencies": { + "eventemitter3": "^4.0.4", + "p-timeout": "^3.2.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-queue/node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + }, + "node_modules/p-timeout": { + "version": "3.2.0", + "license": "MIT", + "dependencies": { + "p-finally": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/parseurl": { + "version": "1.3.3", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/path-to-regexp": { + "version": "0.1.7", + "license": "MIT" + }, + "node_modules/performance-now": { + "version": "2.1.0", + "license": "MIT" + }, + "node_modules/proxy-addr": { + "version": "2.0.7", + "license": "MIT", + "dependencies": { + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/psl": {}, + "node_modules/punycode": { + "version": "2.1.1", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/qs": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/qs/node_modules/call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/qs/node_modules/get-intrinsic": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", + "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/qs/node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/qs/node_modules/object-inspect": { + "version": "1.12.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", + "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/qs/node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/querystring": { + "version": "0.2.0", + "engines": { + "node": ">=0.4.x" + } + }, + "node_modules/range-parser": { + "version": "1.2.1", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/raw-body": { + "version": "2.5.1", + "license": "MIT", + "dependencies": { + "bytes": "3.1.2", + "http-errors": "2.0.0", + "iconv-lite": "0.4.24", + "unpipe": "1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/readable-stream": {}, + "node_modules/request": { + "version": "2.88.2", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", + "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", + "deprecated": "request has been deprecated, see https://github.com/request/request/issues/3142", + "dependencies": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.3", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.5.0", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/request/node_modules/qs": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", + "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/request/node_modules/uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", + "deprecated": "Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details.", + "bin": { + "uuid": "bin/uuid" + } + }, + "node_modules/requestretry": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/requestretry/-/requestretry-7.1.0.tgz", + "integrity": "sha512-TqVDgp251BW4b8ddQ2ptaj/57Z3LZHLscAUT7v6qs70buqF2/IoOVjYbpjJ6HiW7j5+waqegGI8xKJ/+uzgDmw==", + "dependencies": { + "extend": "^3.0.2", + "lodash": "^4.17.15" + }, + "peerDependencies": { + "request": "2.*.*" + } + }, + "node_modules/s3-stream-upload": { + "version": "2.0.2", + "engines": [ + "node >= 0.10.2" + ], + "license": "MIT", + "dependencies": { + "buffer-queue": "~1.0.0", + "readable-stream": "^2.3.0" + } + }, + "node_modules/s3signed": { + "version": "0.1.0", + "license": "ISC", + "dependencies": { + "aws-sdk": "^2.0.4" + }, + "bin": { + "s3signed": "bin/s3signed.js" + } + }, + "node_modules/s3urls": { + "version": "1.5.2", + "license": "ISC", + "dependencies": { + "minimist": "^1.1.0", + "s3signed": "^0.1.0" + }, + "bin": { + "s3urls": "bin/s3urls.js" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "license": "MIT" + }, + "node_modules/sax": { + "version": "1.2.1", + "license": "ISC" + }, + "node_modules/semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/send": { + "version": "0.18.0", + "license": "MIT", + "dependencies": { + "debug": "2.6.9", + "depd": "2.0.0", + "destroy": "1.2.0", + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "etag": "~1.8.1", + "fresh": "0.5.2", + "http-errors": "2.0.0", + "mime": "1.6.0", + "ms": "2.1.3", + "on-finished": "2.4.1", + "range-parser": "~1.2.1", + "statuses": "2.0.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/send/node_modules/ms": { + "version": "2.1.3", + "license": "MIT" + }, + "node_modules/serve-static": { + "version": "1.15.0", + "license": "MIT", + "dependencies": { + "encodeurl": "~1.0.2", + "escape-html": "~1.0.3", + "parseurl": "~1.3.3", + "send": "0.18.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/setprototypeof": { + "version": "1.2.0", + "license": "ISC" + }, + "node_modules/socks5-client": { + "version": "1.2.8", + "license": "MIT", + "dependencies": { + "ip-address": "~6.1.0" + }, + "engines": { + "node": ">= 6.4.0" + } + }, + "node_modules/socks5-http-client": { + "version": "1.0.4", + "license": "MIT", + "dependencies": { + "socks5-client": "~1.2.6" + }, + "engines": { + "node": ">= 6.4.0" + } + }, + "node_modules/socks5-https-client": { + "version": "1.2.1", + "license": "MIT", + "dependencies": { + "socks5-client": "~1.2.3" + }, + "engines": { + "node": ">= 6.4.0" + } + }, + "node_modules/sshpk": {}, + "node_modules/statuses": { + "version": "2.0.1", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/through": { + "version": "2.3.8", + "license": "MIT" + }, + "node_modules/toidentifier": { + "version": "1.0.1", + "license": "MIT", + "engines": { + "node": ">=0.6" + } + }, + "node_modules/tough-cookie": { + "version": "2.5.0", + "license": "BSD-3-Clause", + "dependencies": { + "psl": "^1.1.28", + "punycode": "^2.1.1" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "license": "Apache-2.0", + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, + "node_modules/type-is": { + "version": "1.6.18", + "license": "MIT", + "dependencies": { + "media-typer": "0.3.0", + "mime-types": "~2.1.24" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/unpipe": { + "version": "1.0.0", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/url": { + "version": "0.10.3", + "license": "MIT", + "dependencies": { + "punycode": "1.3.2", + "querystring": "0.2.0" + } + }, + "node_modules/url/node_modules/punycode": { + "version": "1.3.2", + "license": "MIT" + }, + "node_modules/util": { + "version": "0.12.5", + "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } + }, + "node_modules/util/node_modules/available-typed-arrays": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", + "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/util/node_modules/call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/util/node_modules/get-intrinsic": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", + "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/util/node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/util/node_modules/has-tostringtag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", + "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "dependencies": { + "has-symbols": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/util/node_modules/is-arguments": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", + "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", + "dependencies": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/util/node_modules/is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "dependencies": { + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/util/node_modules/is-typed-array": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", + "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", + "dependencies": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/util/node_modules/which-typed-array": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", + "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", + "dependencies": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.0", + "is-typed-array": "^1.1.10" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/utils-merge": { + "version": "1.0.1", + "license": "MIT", + "engines": { + "node": ">= 0.4.0" + } + }, + "node_modules/uuid": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz", + "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw==", + "bin": { + "uuid": "dist/bin/uuid" + } + }, + "node_modules/vary": { + "version": "1.1.2", + "license": "MIT", + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/verror": { + "version": "1.10.0", + "engines": [ + "node >=0.6.0" + ], + "license": "MIT", + "dependencies": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "node_modules/verror/node_modules/core-util-is": { + "version": "1.0.2", + "license": "MIT" + }, + "node_modules/xml2js": { + "version": "0.4.19", + "license": "MIT", + "dependencies": { + "sax": ">=0.6.0", + "xmlbuilder": "~9.0.1" } }, - "accepts": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.7.tgz", - "integrity": "sha512-Il80Qs2WjYlJIBNzNkK6KYqlVMTbZLXgHx2oT0pU/fjRHyEp+PEfEPY0R3WCwAGVOtauxh1hOxNgIf5bv7dQpA==", - "requires": { - "mime-types": "~2.1.24", - "negotiator": "0.6.2" + "node_modules/xmlbuilder": { + "version": "9.0.7", + "license": "MIT", + "engines": { + "node": ">=4.0" } - }, - "ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + } + }, + "dependencies": { + "accepts": { + "version": "1.3.8", "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" + "mime-types": "~2.1.34", + "negotiator": "0.6.3" } }, + "ajv": {}, "ansi-regex": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz", - "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==" + "version": "6.0.1" }, "array-flatten": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", - "integrity": "sha1-ml9pkFGx5wczKPKgCJaLZOopVdI=" - }, - "asn1": { - "version": "0.2.6", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", - "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", - "requires": { - "safer-buffer": "~2.1.0" - } + "version": "1.1.1" }, "assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==" + "version": "1.0.0" }, "async": { "version": "3.2.4", @@ -94,14 +1481,12 @@ "integrity": "sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ==" }, "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + "version": "0.4.0" }, "aws-sdk": { - "version": "2.1154.0", - "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1154.0.tgz", - "integrity": "sha512-SIxLcWGsnW9Sl2P+a+uoqebBsfjeAZZOQokzgDj3VoESnFzsjI+2REi9CdvvSvwlfFUP7sFr6A0khrYNDJLebQ==", + "version": "2.1273.0", + "resolved": "https://registry.npmjs.org/aws-sdk/-/aws-sdk-2.1273.0.tgz", + "integrity": "sha512-QF37fm1DfUxjw+IJtDMTDBckVwAOf8EHQjs4NxJp5TtRkeqtWkxNzq/ViI8kAS+0n8JZaom8Oenmy8ufGfLMAQ==", "requires": { "buffer": "4.9.2", "events": "1.1.1", @@ -110,78 +1495,47 @@ "querystring": "0.2.0", "sax": "1.2.1", "url": "0.10.3", + "util": "^0.12.4", "uuid": "8.0.0", "xml2js": "0.4.19" - }, - "dependencies": { - "uuid": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz", - "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw==" - } } }, "aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==" + "version": "0.7.0" }, "aws4": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", - "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" + "version": "1.11.0" }, "balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + "version": "1.0.2" }, "base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==" - }, - "bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", - "requires": { - "tweetnacl": "^0.14.3" - } + "version": "1.5.1" }, "big.js": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz", - "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==" + "version": "5.2.2" }, "body-parser": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.19.0.tgz", - "integrity": "sha512-dhEPs72UPbDnAQJ9ZKMNTP6ptJaionhP5cBb541nXPlW60Jepo9RV/a4fX4XWW9CuFNK22krhrj1+rgzifNCsw==", + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", + "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", "requires": { - "bytes": "3.1.0", + "bytes": "3.1.2", "content-type": "~1.0.4", "debug": "2.6.9", - "depd": "~1.1.2", - "http-errors": "1.7.2", + "depd": "2.0.0", + "destroy": "1.2.0", + "http-errors": "2.0.0", "iconv-lite": "0.4.24", - "on-finished": "~2.3.0", - "qs": "6.7.0", - "raw-body": "2.4.0", - "type-is": "~1.6.17" - }, - "dependencies": { - "qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==" - } + "on-finished": "2.4.1", + "qs": "6.11.0", + "raw-body": "2.5.1", + "type-is": "~1.6.18", + "unpipe": "1.0.0" } }, "brace-expansion": { "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "requires": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -198,124 +1552,64 @@ } }, "buffer-queue": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/buffer-queue/-/buffer-queue-1.0.0.tgz", - "integrity": "sha512-HNAysvwrmORbm5w5rB6yCz2Sab+ATCW6RSAOVWJmaRnPviPfuNO8+f3R0MyCJMUhL8sMx88LcawtIcfjHERhVA==" + "version": "1.0.0" }, "bytes": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.0.tgz", - "integrity": "sha512-zauLjrfCG+xvoyaqLoV8bLVXXNGC4JqlxFCutSDWA6fJrTo2ZuvLYTqZ7aHBLZSMOopbzwv8f+wZcVzfVTI2Dg==" + "version": "3.1.2" }, "caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==" + "version": "0.12.0" }, "combined-stream": { "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", "requires": { "delayed-stream": "~1.0.0" } }, "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + "version": "0.0.1" }, "content-disposition": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.3.tgz", - "integrity": "sha512-ExO0774ikEObIAEV9kDo50o+79VCUdEB6n6lzKgGwupcVeRlhrj3qGAfwq8G6uBJjkqLrhT0qEYFcWng8z1z0g==", + "version": "0.5.4", "requires": { - "safe-buffer": "5.1.2" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } + "safe-buffer": "5.2.1" } }, "content-type": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz", - "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==" + "version": "1.0.4" }, "cookie": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.4.0.tgz", - "integrity": "sha512-+Hp8fLp57wnUSt0tY0tHEXh4voZRDnoIrZPqlo3DPiI4y9lwg/jqx+1Om94/W6ZaPDOUbnjOt/99w66zk+l1Xg==" + "version": "0.5.0" }, "cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha1-4wOogrNCzD7oylE6eZmXNNqzriw=" - }, - "core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==" - }, - "dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", - "requires": { - "assert-plus": "^1.0.0" - } + "version": "1.0.6" }, "debug": { "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", "requires": { "ms": "2.0.0" } }, "delay": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/delay/-/delay-5.0.0.tgz", - "integrity": "sha512-ReEBKkIfe4ya47wlPYf/gu5ib6yUG0/Aez0JQZQz94kiWtRQvZIQbTiehsnwHvLSWJnQdhVeqYue7Id1dKr0qw==" + "version": "5.0.0" }, "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==" + "version": "1.0.0" }, "depd": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz", - "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=" + "version": "2.0.0" }, "destroy": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz", - "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=" - }, - "ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", - "requires": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } + "version": "1.2.0" }, "ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" + "version": "1.1.1" }, "elasticdump": { - "version": "6.84.1", - "resolved": "https://registry.npmjs.org/elasticdump/-/elasticdump-6.84.1.tgz", - "integrity": "sha512-qgHJeGGNMJFwGMpidCOCKZsbq6bUth2cvns1QdrJnCIoojv5x0J4C6Xm5zh8sZCYr7y5nrwfgMUkrbMNLHdGwQ==", + "version": "6.94.1", + "resolved": "https://registry.npmjs.org/elasticdump/-/elasticdump-6.94.1.tgz", + "integrity": "sha512-VThINQBW1MG7k7oVGndPBXCL6cFSfByu2EZo0gch9l7voyv1FfxyrIp9cZ5Ft9Vwygjh7sXSomnWaQ+qzmkfKA==", "requires": { - "JSONStream": "^1.3.5", "async": "^2.6.4", "aws-sdk": "2.1122.0", "aws4": "^1.11.0", @@ -326,6 +1620,7 @@ "fast-csv": "4.3.6", "http-status": "^1.5.1", "ini": "^2.0.0", + "JSONStream": "^1.3.5", "lodash": "^4.17.21", "lossless-json": "^1.0.5", "minimist": "^1.2.6", @@ -363,104 +1658,74 @@ "xml2js": "0.4.19" } }, - "bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==" + "uuid": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", + "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==" } } }, "encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha1-rT/0yG7C0CkyL1oCw6mmBslbP1k=" + "version": "1.0.2" }, "escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=" + "version": "1.0.3" }, "etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=" - }, - "eventemitter3": { - "version": "4.0.7", - "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", - "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + "version": "1.8.1" }, "events": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/events/-/events-1.1.1.tgz", - "integrity": "sha512-kEcvvCBByWXGnZy6JUlgAp2gBIUjfCAV6P6TgT1/aaQKcmuAEC4OZTV1I4EWQLz2gxZw76atuVyvHhTxvi0Flw==" + "version": "1.1.1" }, "express": { - "version": "4.17.1", - "resolved": "https://registry.npmjs.org/express/-/express-4.17.1.tgz", - "integrity": "sha512-mHJ9O79RqluphRrcw2X/GTh3k9tVv8YcoyY4Kkh4WDMUYKRZUq0h1o0w2rrrxBqM7VoeUVqgb27xlEMXTnYt4g==", + "version": "4.18.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", + "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", "requires": { - "accepts": "~1.3.7", + "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.19.0", - "content-disposition": "0.5.3", + "body-parser": "1.20.1", + "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.4.0", + "cookie": "0.5.0", "cookie-signature": "1.0.6", "debug": "2.6.9", - "depd": "~1.1.2", + "depd": "2.0.0", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "etag": "~1.8.1", - "finalhandler": "~1.1.2", + "finalhandler": "1.2.0", "fresh": "0.5.2", + "http-errors": "2.0.0", "merge-descriptors": "1.0.1", "methods": "~1.1.2", - "on-finished": "~2.3.0", + "on-finished": "2.4.1", "parseurl": "~1.3.3", "path-to-regexp": "0.1.7", - "proxy-addr": "~2.0.5", - "qs": "6.7.0", + "proxy-addr": "~2.0.7", + "qs": "6.11.0", "range-parser": "~1.2.1", - "safe-buffer": "5.1.2", - "send": "0.17.1", - "serve-static": "1.14.1", - "setprototypeof": "1.1.1", - "statuses": "~1.5.0", + "safe-buffer": "5.2.1", + "send": "0.18.0", + "serve-static": "1.15.0", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", "type-is": "~1.6.18", "utils-merge": "1.0.1", "vary": "~1.1.2" - }, - "dependencies": { - "qs": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.7.0.tgz", - "integrity": "sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==" - }, - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } } }, "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + "version": "3.0.2" }, "extends-classes": { "version": "1.0.5", - "resolved": "https://registry.npmjs.org/extends-classes/-/extends-classes-1.0.5.tgz", - "integrity": "sha512-ccyBHFN+wFM0dz0hvuQntSH9KST9951ua1hr3yxeFfu+h3H/eHw1RavE8XAEVi9K8dh534Mk3xA+pjk7VHkUcQ==", "requires": { "method-missing": "^1.1.2" } }, "extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==" + "version": "1.3.0" }, "fast-csv": { "version": "4.3.6", @@ -469,41 +1734,65 @@ "requires": { "@fast-csv/format": "4.3.5", "@fast-csv/parse": "4.3.6" + }, + "dependencies": { + "@fast-csv/format": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/@fast-csv/format/-/format-4.3.5.tgz", + "integrity": "sha512-8iRn6QF3I8Ak78lNAa+Gdl5MJJBM5vRHivFtMRUWINdevNo00K7OXxS2PshawLKTejVwieIlPmK5YlLu6w4u8A==", + "requires": { + "@types/node": "^14.0.1", + "lodash.escaperegexp": "^4.1.2", + "lodash.isboolean": "^3.0.3", + "lodash.isequal": "^4.5.0", + "lodash.isfunction": "^3.0.9", + "lodash.isnil": "^4.0.0" + } + }, + "@fast-csv/parse": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/@fast-csv/parse/-/parse-4.3.6.tgz", + "integrity": "sha512-uRsLYksqpbDmWaSmzvJcuApSEe38+6NQZBUsuAyMZKqHxH0g1wcJgsKUvN3WC8tewaqFjBMMGrkHmC+T7k8LvA==", + "requires": { + "@types/node": "^14.0.1", + "lodash.escaperegexp": "^4.1.2", + "lodash.groupby": "^4.6.0", + "lodash.isfunction": "^3.0.9", + "lodash.isnil": "^4.0.0", + "lodash.isundefined": "^3.0.1", + "lodash.uniq": "^4.5.0" + } + }, + "@types/node": { + "version": "14.18.34", + "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.34.tgz", + "integrity": "sha512-hcU9AIQVHmPnmjRK+XUUYlILlr9pQrsqSrwov/JK1pnf3GTQowVBhx54FbvM0AU/VXGH4i3+vgXS5EguR7fysA==" + } } }, - "fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" - }, "finalhandler": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.1.2.tgz", - "integrity": "sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA==", + "version": "1.2.0", "requires": { "debug": "2.6.9", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", - "on-finished": "~2.3.0", + "on-finished": "2.4.1", "parseurl": "~1.3.3", - "statuses": "~1.5.0", + "statuses": "2.0.1", "unpipe": "~1.0.0" } }, + "for-each": { + "version": "0.3.3", + "requires": { + "is-callable": "^1.1.3" + } + }, "forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==" + "version": "0.6.1" }, "form-data": { "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", "requires": { "asynckit": "^0.4.0", "combined-stream": "^1.0.6", @@ -511,60 +1800,49 @@ } }, "forwarded": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.1.2.tgz", - "integrity": "sha1-mMI9qxF1ZXuMBXPozszZGw/xjIQ=" + "version": "0.2.0" }, "fresh": { - "version": "0.5.2", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz", - "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=" + "version": "0.5.2" }, - "getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "function-bind": { + "version": "1.1.1" + }, + "get-intrinsic": {}, + "gopd": { + "version": "1.0.1", "requires": { - "assert-plus": "^1.0.0" + "get-intrinsic": "^1.1.3" } }, "har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==" + "version": "2.0.0" }, "har-validator": { "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", "requires": { "ajv": "^6.12.3", "har-schema": "^2.0.0" } }, + "has": { + "version": "1.0.3", + "requires": { + "function-bind": "^1.1.1" + } + }, "http-errors": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.2.tgz", - "integrity": "sha512-uUQBt3H/cSIVfch6i1EuPNy/YsRSOUBXTVfZ+yR7Zjez3qjBz6i9+i4zjNaoqcoFVI4lQJ5plg63TvGfRSDCRg==", + "version": "2.0.0", "requires": { - "depd": "~1.1.2", - "inherits": "2.0.3", - "setprototypeof": "1.1.1", - "statuses": ">= 1.5.0 < 2", - "toidentifier": "1.0.0" - }, - "dependencies": { - "inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" - } + "depd": "2.0.0", + "inherits": "2.0.4", + "setprototypeof": "1.2.0", + "statuses": "2.0.1", + "toidentifier": "1.0.1" } }, "http-signature": { "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==", "requires": { "assert-plus": "^1.0.0", "jsprim": "^1.2.2", @@ -572,104 +1850,64 @@ } }, "http-status": { - "version": "1.5.2", - "resolved": "https://registry.npmjs.org/http-status/-/http-status-1.5.2.tgz", - "integrity": "sha512-HzxX+/hV/8US1Gq4V6R6PgUmJ5Pt/DGATs4QhdEOpG8LrdS9/3UG2nnOvkqUpRks04yjVtV5p/NODjO+wvf6vg==" + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/http-status/-/http-status-1.5.3.tgz", + "integrity": "sha512-jCClqdnnwigYslmtfb28vPplOgoiZ0siP2Z8C5Ua+3UKbx410v+c+jT+jh1bbI4TvcEySuX0vd/CfFZFbDkJeQ==" }, "iconv-lite": { "version": "0.4.24", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", - "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "requires": { "safer-buffer": ">= 2.1.2 < 3" } }, "ieee754": { - "version": "1.1.13", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.1.13.tgz", - "integrity": "sha512-4vf7I2LYV/HaWerSo3XmlMkp5eZ83i+/CDluXi/IGTs/O1sejBNhTtnxzmRZfvOUqj7lZjqHkeTvpgSFDlWZTg==" + "version": "1.1.13" }, "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + "version": "2.0.4" }, "ini": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz", - "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==" - }, - "ip-address": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-6.1.0.tgz", - "integrity": "sha512-u9YYtb1p2fWSbzpKmZ/b3QXWA+diRYPxc2c4y5lFB/MMk5WZ7wNZv8S3CFcIGVJ5XtlaCAl/FQy/D3eQ2XtdOA==", - "requires": { - "jsbn": "1.1.0", - "lodash": "^4.17.15", - "sprintf-js": "1.1.2" - }, - "dependencies": { - "jsbn": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", - "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==" - } - } + "version": "2.0.0" }, + "ip-address": {}, "ipaddr.js": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.0.tgz", - "integrity": "sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA==" + "version": "1.9.1" }, + "is-callable": {}, "is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==" + "version": "1.0.0" }, "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + "version": "1.0.0" }, "isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==" + "version": "0.1.2" }, "jmespath": { "version": "0.16.0", "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.16.0.tgz", "integrity": "sha512-9FzQjJ7MATs1tSpnco1K6ayiYE3figslrXA72G2HQ/n76RzvYlofyi5QM+iX4YRs/pu3yzxlVQSST23+dMDknw==" }, - "jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==" - }, "json-schema": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" - }, "json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==" + "version": "5.0.1" }, "jsonparse": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz", - "integrity": "sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==" + "version": "1.3.1" + }, + "JSONStream": { + "version": "1.3.5", + "requires": { + "jsonparse": "^1.2.0", + "through": ">=2.2.7 <3" + } }, "jsprim": { "version": "1.4.2", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.2.tgz", - "integrity": "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==", "requires": { "assert-plus": "1.0.0", "extsprintf": "1.3.0", @@ -683,44 +1921,28 @@ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, "lodash.escaperegexp": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/lodash.escaperegexp/-/lodash.escaperegexp-4.1.2.tgz", - "integrity": "sha512-TM9YBvyC84ZxE3rgfefxUWiQKLilstD6k7PTGt6wfbtXF8ixIJLOL3VYyV/z+ZiPLsVxAsKAFVwWlWeb2Y8Yyw==" + "version": "4.1.2" }, "lodash.groupby": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/lodash.groupby/-/lodash.groupby-4.6.0.tgz", - "integrity": "sha512-5dcWxm23+VAoz+awKmBaiBvzox8+RqMgFhi7UvX9DHZr2HdxHXM/Wrf8cfKpsW37RNrvtPn6hSwNqurSILbmJw==" + "version": "4.6.0" }, "lodash.isboolean": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/lodash.isboolean/-/lodash.isboolean-3.0.3.tgz", - "integrity": "sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==" + "version": "3.0.3" }, "lodash.isequal": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.isequal/-/lodash.isequal-4.5.0.tgz", - "integrity": "sha512-pDo3lu8Jhfjqls6GkMgpahsF9kCyayhgykjyLMNFTKWrpVdAQtYyB4muAMWozBB4ig/dtWAmsMxLEI8wuz+DYQ==" + "version": "4.5.0" }, "lodash.isfunction": { - "version": "3.0.9", - "resolved": "https://registry.npmjs.org/lodash.isfunction/-/lodash.isfunction-3.0.9.tgz", - "integrity": "sha512-AirXNj15uRIMMPihnkInB4i3NHeb4iBtNg9WRWuK2o31S+ePwwNmDPaTL3o7dTJ+VXNZim7rFs4rxN4YU1oUJw==" + "version": "3.0.9" }, "lodash.isnil": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/lodash.isnil/-/lodash.isnil-4.0.0.tgz", - "integrity": "sha512-up2Mzq3545mwVnMhTDMdfoG1OurpA/s5t88JmQX809eH3C8491iu2sfKhTfhQtKY78oPNhiaHJUpT/dUDAAtng==" + "version": "4.0.0" }, "lodash.isundefined": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/lodash.isundefined/-/lodash.isundefined-3.0.1.tgz", - "integrity": "sha512-MXB1is3s899/cD8jheYYE2V9qTHwKvt+npCwpD+1Sxm3Q3cECXCiYHjeHWXNwr6Q0SOBPrYUDxendrO6goVTEA==" + "version": "3.0.1" }, "lodash.uniq": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz", - "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ==" + "version": "4.5.0" }, "lossless-json": { "version": "1.0.5", @@ -728,83 +1950,53 @@ "integrity": "sha512-RicKUuLwZVNZ6ZdJHgIZnSeA05p8qWc5NW0uR96mpPIjN9WDLUg9+kj1esQU1GkPn9iLZVKatSQK5gyiaFHgJA==" }, "media-typer": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", - "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=" + "version": "0.3.0" }, "merge-descriptors": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.1.tgz", - "integrity": "sha1-sAqqVW3YtEVoFQ7J0blT8/kMu2E=" + "version": "1.0.1" }, "method-missing": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/method-missing/-/method-missing-1.2.4.tgz", - "integrity": "sha512-tmj4CKZJVQd/ZuN9hnYD8HBAs/3RdDdqUeJG9RbVYlEZLuPYK4EW+EekMqLsCV4w1HastX+Pk2Ov87OQmeo01A==" + "version": "1.2.4" }, "methods": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz", - "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=" + "version": "1.1.2" }, "mime": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz", - "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==" - }, - "mime-db": { - "version": "1.40.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.40.0.tgz", - "integrity": "sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA==" + "version": "1.6.0" }, + "mime-db": {}, "mime-types": { - "version": "2.1.24", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.24.tgz", - "integrity": "sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ==", + "version": "2.1.35", "requires": { - "mime-db": "1.40.0" + "mime-db": "1.52.0" } }, "minimatch": { "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "requires": { "brace-expansion": "^1.1.7" } }, "minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.7.tgz", + "integrity": "sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==" }, "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" - }, - "negotiator": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.2.tgz", - "integrity": "sha512-hZXc7K2e+PgeI1eDBe/10Ard4ekbfrrqG8Ep+8Jmf4JID2bNg7NvCPOZN+kfF574pFQI7mum2AUqDidoKqcTOw==" + "version": "2.0.0" }, + "negotiator": {}, "oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" + "version": "0.9.0" }, "on-finished": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz", - "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=", + "version": "2.4.1", "requires": { "ee-first": "1.1.1" } }, "p-finally": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", - "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==" + "version": "1.0.0" }, "p-queue": { "version": "6.6.2", @@ -813,102 +2005,106 @@ "requires": { "eventemitter3": "^4.0.4", "p-timeout": "^3.2.0" + }, + "dependencies": { + "eventemitter3": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==" + } } }, "p-timeout": { "version": "3.2.0", - "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-3.2.0.tgz", - "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", "requires": { "p-finally": "^1.0.0" } }, "parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==" + "version": "1.3.3" }, "path-to-regexp": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.7.tgz", - "integrity": "sha1-32BBeABfUi8V60SQ5yR6G/qmf4w=" + "version": "0.1.7" }, "performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==" - }, - "process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + "version": "2.1.0" }, "proxy-addr": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.5.tgz", - "integrity": "sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ==", + "version": "2.0.7", "requires": { - "forwarded": "~0.1.2", - "ipaddr.js": "1.9.0" + "forwarded": "0.2.0", + "ipaddr.js": "1.9.1" } }, - "psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" - }, + "psl": {}, "punycode": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", - "integrity": "sha512-RofWgt/7fL5wP1Y7fxE7/EmTLzQVnB0ycyibJ0OOHIlJqTNzglYFxVwETOcIoJqJmpDXJ9xImDv+Fq34F/d4Dw==" + "version": "2.1.1" }, "qs": { - "version": "6.5.3", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", - "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==" + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz", + "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", + "requires": { + "side-channel": "^1.0.4" + }, + "dependencies": { + "call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "requires": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + } + }, + "get-intrinsic": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", + "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", + "requires": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.3" + } + }, + "has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" + }, + "object-inspect": { + "version": "1.12.2", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", + "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==" + }, + "side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "requires": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + } + } + } }, "querystring": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha512-X/xY82scca2tau62i9mDyU9K+I+djTMUsvwf7xnUX5GLvVzgJybOJf4Y6o9Zx3oJK/LSXg5tTZBjwzqVPaPO2g==" + "version": "0.2.0" }, "range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==" + "version": "1.2.1" }, "raw-body": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.4.0.tgz", - "integrity": "sha512-4Oz8DUIwdvoa5qMJelxipzi/iJIi40O5cGV1wNYp5hvZP8ZN0T+jiNkL0QepXs+EsQ9XJ8ipEDoiH70ySUJP3Q==", + "version": "2.5.1", "requires": { - "bytes": "3.1.0", - "http-errors": "1.7.2", + "bytes": "3.1.2", + "http-errors": "2.0.0", "iconv-lite": "0.4.24", "unpipe": "1.0.0" } }, - "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } - } - }, + "readable-stream": {}, "request": { "version": "2.88.2", "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", @@ -934,6 +2130,18 @@ "tough-cookie": "~2.5.0", "tunnel-agent": "^0.6.0", "uuid": "^3.3.2" + }, + "dependencies": { + "qs": { + "version": "6.5.3", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.3.tgz", + "integrity": "sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==" + }, + "uuid": { + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", + "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" + } } }, "requestretry": { @@ -947,8 +2155,6 @@ }, "s3-stream-upload": { "version": "2.0.2", - "resolved": "https://registry.npmjs.org/s3-stream-upload/-/s3-stream-upload-2.0.2.tgz", - "integrity": "sha512-hSfGZ4InIUMH29niWCAkcDvmOGwADSy7j2Ktm6+nKI+ub6nPoLOboo1D+Q3mEIutTHu0J4+Sv92J0GOk5hAonQ==", "requires": { "buffer-queue": "~1.0.0", "readable-stream": "^2.3.0" @@ -956,35 +2162,25 @@ }, "s3signed": { "version": "0.1.0", - "resolved": "https://registry.npmjs.org/s3signed/-/s3signed-0.1.0.tgz", - "integrity": "sha512-08Jc0+GAaFjXgvl8qQytu6+wVBfcUUyCJDocj5kBUeq9YA+6mAM/6psDNxrg4PVkkLBvAK75mnjlaGckfOtDKA==", "requires": { "aws-sdk": "^2.0.4" } }, "s3urls": { "version": "1.5.2", - "resolved": "https://registry.npmjs.org/s3urls/-/s3urls-1.5.2.tgz", - "integrity": "sha512-3f4kprxnwAqoiVdR/XFoc997YEt0b6oY1VKrhl+kuWnHaUQ2cVe73TcQaww8geX5FKPuGBHl90xv70q7SlbBew==", "requires": { "minimist": "^1.1.0", "s3signed": "^0.1.0" } }, "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" + "version": "5.2.1" }, "safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + "version": "2.1.2" }, "sax": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.2.1.tgz", - "integrity": "sha512-8I2a3LovHTOpm7NV5yOyO8IHqgVsfK4+UuySrXU8YXkSRX7k6hCV9b3HrkKCr3nMpgj+0bmocaJJWpvp1oc7ZA==" + "version": "1.2.1" }, "semver": { "version": "5.7.1", @@ -992,233 +2188,229 @@ "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" }, "send": { - "version": "0.17.1", - "resolved": "https://registry.npmjs.org/send/-/send-0.17.1.tgz", - "integrity": "sha512-BsVKsiGcQMFwT8UxypobUKyv7irCNRHk1T0G680vk88yf6LBByGcZJOTJCrTP2xVN6yI+XjPJcNuE3V4fT9sAg==", + "version": "0.18.0", "requires": { "debug": "2.6.9", - "depd": "~1.1.2", - "destroy": "~1.0.4", + "depd": "2.0.0", + "destroy": "1.2.0", "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "etag": "~1.8.1", "fresh": "0.5.2", - "http-errors": "~1.7.2", + "http-errors": "2.0.0", "mime": "1.6.0", - "ms": "2.1.1", - "on-finished": "~2.3.0", + "ms": "2.1.3", + "on-finished": "2.4.1", "range-parser": "~1.2.1", - "statuses": "~1.5.0" + "statuses": "2.0.1" }, "dependencies": { "ms": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", - "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" + "version": "2.1.3" } } }, "serve-static": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.14.1.tgz", - "integrity": "sha512-JMrvUwE54emCYWlTI+hGrGv5I8dEwmco/00EvkzIIsR7MqrHonbD9pO2MOfFnpFntl7ecpZs+3mW+XbQZu9QCg==", + "version": "1.15.0", "requires": { "encodeurl": "~1.0.2", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.17.1" + "send": "0.18.0" } }, "setprototypeof": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.1.tgz", - "integrity": "sha512-JvdAWfbXeIGaZ9cILp38HntZSFSo3mWg6xGcJJsd+d4aRMOqauag1C63dJfDw7OaMYwEbHMOxEZ1lqVRYP2OAw==" + "version": "1.2.0" }, "socks5-client": { "version": "1.2.8", - "resolved": "https://registry.npmjs.org/socks5-client/-/socks5-client-1.2.8.tgz", - "integrity": "sha512-js8WqQ/JjZS3IQwUxRwSThvXzcRIHE8sde8nE5q7nqxiFGb8EoHmNJ9SF2lXqn3ux6pUV3+InH7ng7mANK6XfA==", "requires": { "ip-address": "~6.1.0" } }, "socks5-http-client": { "version": "1.0.4", - "resolved": "https://registry.npmjs.org/socks5-http-client/-/socks5-http-client-1.0.4.tgz", - "integrity": "sha512-K16meYkltPtps6yDOqK9Mwlfz+pdD2kQQQ/TCO/gu2AImUmfO6nF2uXX1YWrPs4NCfClQNih19wqLXmuUcZCrA==", "requires": { "socks5-client": "~1.2.6" } }, "socks5-https-client": { "version": "1.2.1", - "resolved": "https://registry.npmjs.org/socks5-https-client/-/socks5-https-client-1.2.1.tgz", - "integrity": "sha512-FbZ/X/2Xq3DAMhuRA4bnN0jy1QxaPTVPLFvyv6CEj0QDKSTdWp9yRxo1JhqXmWKhPQeJyUMajHJB2UjT43pFcw==", "requires": { "socks5-client": "~1.2.3" } }, - "sprintf-js": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", - "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==" - }, - "sshpk": { - "version": "1.17.0", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.17.0.tgz", - "integrity": "sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==", - "requires": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - } - }, + "sshpk": {}, "statuses": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz", - "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=" - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "requires": { - "safe-buffer": "~5.1.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" - } - } + "version": "2.0.1" }, "through": { - "version": "2.3.8", - "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz", - "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==" + "version": "2.3.8" }, "toidentifier": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz", - "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==" + "version": "1.0.1" }, "tough-cookie": { "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", "requires": { "psl": "^1.1.28", "punycode": "^2.1.1" - }, - "dependencies": { - "punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" - } } }, "tunnel-agent": { "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", "requires": { "safe-buffer": "^5.0.1" } }, - "tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" - }, "type-is": { "version": "1.6.18", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", - "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", "requires": { "media-typer": "0.3.0", "mime-types": "~2.1.24" } }, "unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=" + "version": "1.0.0" }, - "uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "url": { + "version": "0.10.3", "requires": { - "punycode": "^2.1.0" + "punycode": "1.3.2", + "querystring": "0.2.0" }, "dependencies": { "punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" + "version": "1.3.2" } } }, - "url": { - "version": "0.10.3", - "resolved": "https://registry.npmjs.org/url/-/url-0.10.3.tgz", - "integrity": "sha512-hzSUW2q06EqL1gKM/a+obYHLIO6ct2hwPuviqTTOcfFVc61UbfJ2Q32+uGL/HCPxKqrdGB5QUwIe7UqlDgwsOQ==", + "util": { + "version": "0.12.5", + "resolved": "https://registry.npmjs.org/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", "requires": { - "punycode": "1.3.2", - "querystring": "0.2.0" + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + }, + "dependencies": { + "available-typed-arrays": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", + "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==" + }, + "call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "requires": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + } + }, + "get-intrinsic": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", + "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", + "requires": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-symbols": "^1.0.3" + } + }, + "has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" + }, + "has-tostringtag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", + "integrity": "sha512-kFjcSNhnlGV1kyoGk7OXKSawH5JOb/LzUc5w9B02hOTO0dfFRjbHQKvg1d6cf3HbeUmtU9VbbV3qzZ2Teh97WQ==", + "requires": { + "has-symbols": "^1.0.2" + } + }, + "is-arguments": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-arguments/-/is-arguments-1.1.1.tgz", + "integrity": "sha512-8Q7EARjzEnKpt/PCD7e1cgUS0a6X8u5tdSiMqXhojOdoV9TsMsiO+9VLC5vAmO8N7/GmXn7yjR8qnA6bVAEzfA==", + "requires": { + "call-bind": "^1.0.2", + "has-tostringtag": "^1.0.0" + } + }, + "is-generator-function": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.10.tgz", + "integrity": "sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A==", + "requires": { + "has-tostringtag": "^1.0.0" + } + }, + "is-typed-array": { + "version": "1.1.10", + "resolved": "https://registry.npmjs.org/is-typed-array/-/is-typed-array-1.1.10.tgz", + "integrity": "sha512-PJqgEHiWZvMpaFZ3uTc8kHPM4+4ADTlDniuQL7cU/UDA0Ql7F70yGfHph3cLNe+c9toaigv+DFzTJKhc2CtO6A==", + "requires": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.0" + } + }, + "which-typed-array": { + "version": "1.1.9", + "resolved": "https://registry.npmjs.org/which-typed-array/-/which-typed-array-1.1.9.tgz", + "integrity": "sha512-w9c4xkx6mPidwp7180ckYWfMmvxpjlZuIudNtDf4N/tTAUB8VJbX25qZoAsrtGuYNnGw3pa0AXgbGKRB8/EceA==", + "requires": { + "available-typed-arrays": "^1.0.5", + "call-bind": "^1.0.2", + "for-each": "^0.3.3", + "gopd": "^1.0.1", + "has-tostringtag": "^1.0.0", + "is-typed-array": "^1.1.10" + } + } } }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" - }, "utils-merge": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", - "integrity": "sha1-n5VxD1CiZ5R7LMwSR0HBAoQn5xM=" + "version": "1.0.1" }, "uuid": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", - "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==" + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.0.0.tgz", + "integrity": "sha512-jOXGuXZAWdsTH7eZLtyXMqUb9EcWMGZNbL9YcGBJl4MH4nrxHmZJhEHvyLFrkxo+28uLb/NYRcStH48fnD0Vzw==" }, "vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=" + "version": "1.1.2" }, "verror": { "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==", "requires": { "assert-plus": "^1.0.0", "core-util-is": "1.0.2", "extsprintf": "^1.2.0" + }, + "dependencies": { + "core-util-is": { + "version": "1.0.2" + } } }, "xml2js": { "version": "0.4.19", - "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", - "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==", "requires": { "sax": ">=0.6.0", "xmlbuilder": "~9.0.1" } }, "xmlbuilder": { - "version": "9.0.7", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", - "integrity": "sha512-7YXTQc3P2l9+0rjaUbLwMKRhtmwg1M1eDf6nag7urC7pIPYLD9W/jmzQ4ptRSUbodw5S0jfoGTflLemQibSpeQ==" + "version": "9.0.7" } } } From 76953402be36daad5c69309d104648ad5118d3ed Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 13 Dec 2022 12:47:30 -0700 Subject: [PATCH 031/362] adding node affinity (#2102) * adding the node affinity to reflect the changes made in feat/spot-instances * updating these files to match changes made in feat/spot-instances as the PR is still failing * changing from ONDEMAND to SPOT * forgot to change for sheepdog * changing back to "ONDEMAND" * changing sheepdog back to spot * trying aws helper set to spot --- gen3/lib/testData/default/expectedFenceResult.yaml | 11 ++++++++++- gen3/lib/testData/default/expectedSheepdogResult.yaml | 11 ++++++++++- .../test1.manifest.g3k/expectedFenceResult.yaml | 11 ++++++++++- .../test1.manifest.g3k/expectedSheepdogResult.yaml | 11 ++++++++++- kube/services/jobs/envtest-job.yaml | 2 +- 5 files changed, 41 insertions(+), 5 deletions(-) diff --git a/gen3/lib/testData/default/expectedFenceResult.yaml b/gen3/lib/testData/default/expectedFenceResult.yaml index 7bc373ad0..62dc751d4 100644 --- a/gen3/lib/testData/default/expectedFenceResult.yaml +++ b/gen3/lib/testData/default/expectedFenceResult.yaml @@ -32,7 +32,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -41,6 +41,15 @@ spec: values: - fence topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: # ----------------------------------------------------------------------------- diff --git a/gen3/lib/testData/default/expectedSheepdogResult.yaml b/gen3/lib/testData/default/expectedSheepdogResult.yaml index ea8f81dbd..f40a698f6 100644 --- a/gen3/lib/testData/default/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/default/expectedSheepdogResult.yaml @@ -27,7 +27,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -36,6 +36,15 @@ spec: values: - sheepdog topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: - name: config-volume diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index 66fb41ca4..d4196c070 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -35,7 +35,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -44,6 +44,15 @@ spec: values: - fence topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND automountServiceAccountToken: false volumes: # ----------------------------------------------------------------------------- diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml index 5d0025950..5ebdc1bb1 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml @@ -31,7 +31,7 @@ spec: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 + - weight: 25 podAffinityTerm: labelSelector: matchExpressions: @@ -40,6 +40,15 @@ spec: values: - sheepdog topologyKey: "kubernetes.io/hostname" + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/jobs/envtest-job.yaml b/kube/services/jobs/envtest-job.yaml index 4f6b6d054..50923579c 100644 --- a/kube/services/jobs/envtest-job.yaml +++ b/kube/services/jobs/envtest-job.yaml @@ -19,7 +19,7 @@ spec: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - SPOT restartPolicy: Never automountServiceAccountToken: false containers: From 221523ced8e4b990b8728a81fa6ffd3f3296c0ab Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 13 Dec 2022 12:52:57 -0700 Subject: [PATCH 032/362] updated revproxy documentation (#2094) * updated revproxy documentation * need to open a new PR for release notes --- doc/kube-setup-revproxy.md | 3 ++- gen3/bin/kube-setup-revproxy.sh | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/kube-setup-revproxy.md b/doc/kube-setup-revproxy.md index 5c483e12f..fdf0b0db5 100644 --- a/doc/kube-setup-revproxy.md +++ b/doc/kube-setup-revproxy.md @@ -5,7 +5,8 @@ Configure and launch the reverse proxy. ## References * the reverse proxy [readme](../kube/services/revproxy/README.md) has more details. -* WAF - the reverse proxy deploys the [modsecurity web application firewall](./waf.md). (This is only deployed if the "deploy_elb" flag is set to true in the manifest-global configmap (set/added via the global section of the manifest.json).deploy the revproxy-ELB-service and WAF) +* WAF - the reverse proxy deploys the [modsecurity web application firewall](./waf.md). +* IMPORTANT: The modsecurity WAF and Revproxy ELB service is only deployed if the "deploy_elb" flag is set to true in the manifest-global configmap. The Revproxy ELB is now deprecated- we suggest deploying an AWS ALB instead (please see kube-setup-ingress script) * Please see https://github.com/uc-cdis/cloud-automation/blob/master/doc/kube-setup-ingress.md as AWS WAF and ALB is recommended. * [maintenance mode](./maintenance.md) * the [ip blacklist](../gen3/lib/manifestDefaults/revproxy/) may be configured with a custom `manifests/revproxy/blacklist.conf` diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 02fcc5c38..bba81166d 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -303,4 +303,4 @@ fi if [ "$deployELB" = true ]; then gen3_deploy_revproxy_elb -fi +fi From c9bb142b5624044c9b67980aded6dba24b2dcfb0 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 13 Dec 2022 16:08:08 -0700 Subject: [PATCH 033/362] - adding networkpolicies for argocd (#2100) - annotating the argocd namespace for network policy use - fixing the indentation in the argocd nginx conf file --- gen3/bin/kube-setup-argocd.sh | 1 + .../gen3/services/argocd_netpolicy.yaml | 33 +++++++++++++++++ .../gen3/services/revproxy_netpolicy.yaml | 4 +++ .../gen3.nginx.conf/argocd-server.conf | 35 +++++++++---------- 4 files changed, 55 insertions(+), 18 deletions(-) create mode 100644 kube/services/netpolicy/gen3/services/argocd_netpolicy.yaml diff --git a/gen3/bin/kube-setup-argocd.sh b/gen3/bin/kube-setup-argocd.sh index a2eb44e00..70d499b0a 100644 --- a/gen3/bin/kube-setup-argocd.sh +++ b/gen3/bin/kube-setup-argocd.sh @@ -11,6 +11,7 @@ then gen3_log_info "ArgoCD is already deployed. Skipping..." else kubectl create namespace argocd + kubectl annotate namespace argocd app="argocd" kubectl apply -f "${GEN3_HOME}/kube/services/argocd/install.yaml" -n argocd gen3 kube-setup-revproxy export argocdsecret=`kubectl get secret argocd-initial-admin-secret -n argocd -o json | jq .data.password -r | base64 -d` # pragma: allowlist secret diff --git a/kube/services/netpolicy/gen3/services/argocd_netpolicy.yaml b/kube/services/netpolicy/gen3/services/argocd_netpolicy.yaml new file mode 100644 index 000000000..ced3e4a20 --- /dev/null +++ b/kube/services/netpolicy/gen3/services/argocd_netpolicy.yaml @@ -0,0 +1,33 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: netpolicy-argocd +spec: + podSelector: + matchExpressions: + - key: app + operator: In + values: + - revproxy + - cohort-middleware + - wts + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + ports: + - port: 80 + - port: 4000 + - port: 8080 + - port: 81 + - port: 82 + - port: 443 + egress: + - to: + - namespaceSelector: + matchLabels: + app: argocd + policyTypes: + - Ingress + - Egress diff --git a/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml b/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml index 3cfe88d1f..2f1462385 100644 --- a/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml +++ b/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml @@ -42,6 +42,10 @@ spec: - namespaceSelector: matchLabels: app: argo + - to: + - namespaceSelector: + matchLabels: + app: argocd policyTypes: - Ingress - Egress diff --git a/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf b/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf index cceefd3eb..bdd98712e 100644 --- a/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf +++ b/kube/services/revproxy/gen3.nginx.conf/argocd-server.conf @@ -1,20 +1,19 @@ - location /argocd { - error_page 403 @errorworkspace; - set $authz_resource "/argocd"; - set $authz_method "access"; - set $authz_service "argocd"; - # be careful - sub-request runs in same context as this request - auth_request /gen3-authz; - - set $proxy_service "argocd"; - # $upstream is written to the logs - set $upstream http://argocd-server.argocd.svc.cluster.local; + location /argocd/ { + error_page 403 @errorworkspace; + set $authz_resource "/argocd"; + set $authz_method "access"; + set $authz_service "argocd"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; - rewrite ^/argocd/(.*) /$1 break; + set $proxy_service "argocd"; + set $upstream http://argocd-server.argocd.svc.cluster.local; - proxy_set_header Connection ''; - proxy_http_version 1.1; - chunked_transfer_encoding off; - - proxy_pass $upstream; - } \ No newline at end of file + rewrite ^/argocd/(.*) /$1 break; + + proxy_set_header Connection ''; + proxy_http_version 1.1; + chunked_transfer_encoding off; + + proxy_pass $upstream; + } \ No newline at end of file From 9ce170222b112bcd29a80f5c9847f3e94590915e Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 14 Dec 2022 09:53:17 -0600 Subject: [PATCH 034/362] feat(waf-enabled): Enable waf through a flag in the manifest (#1973) * feat(waf-enabled): Enable waf through a flag in the manifest * Added the ingress_setup_waf function. Tested and working in my dev commons to be implemented everywhere once the waf rules are tested. * removing the "AWSManagedRulesAnonymousIpList" rule Co-authored-by: Edward Malinowski Co-authored-by: EliseCastle23 Co-authored-by: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Co-authored-by: cmlsn <100160785+cmlsn@users.noreply.github.com> --- gen3/bin/kube-setup-ingress.sh | 86 ++++++++++++++++++++----- gen3/bin/waf-rules-GPE-312.json | 108 ++++++++++++++++++++++++++++++++ 2 files changed, 178 insertions(+), 16 deletions(-) create mode 100644 gen3/bin/waf-rules-GPE-312.json diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index bf718c29e..5dcd24394 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -1,28 +1,68 @@ #!/bin/bash -# - - source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" gen3_load "gen3/lib/kube-setup-init" +gen3_load "gen3/lib/g3k_manifest" +# Deploy WAF if flag set in manifest +manifestPath=$(g3k_manifest_path) +deployWaf="$(jq -r ".[\"global\"][\"waf_enabled\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" ctx="$(g3kubectl config current-context)" ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" scriptDir="${GEN3_HOME}/kube/services/ingress" +gen3_ingress_setup_waf() { + gen3_log_info "Starting GPE-312 waf setup" + #variable to see if WAF already exists + export waf=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains("devplanetv1")).Name'` +if [[ -z $waf ]]; then + gen3_log_info "Creating Web ACL. This may take a few minutes." + aws wafv2 create-web-acl\ + --name $vpc_name-waf \ + --scope REGIONAL \ + --default-action Allow={} \ + --visibility-config SampledRequestsEnabled=true,CloudWatchMetricsEnabled=true,MetricName=GPE-312WebAclMetrics \ + --rules file://waf-rules-GPE-312.json \ + --region us-east-1 + #Need to sleep to avoid "WAFUnavailableEntityException" error since the waf takes a bit to spin up + sleep 240 +else + gen3_log_info "WAF already exists. Skipping..." +fi + gen3_log_info "Attaching ACL to ALB." + export acl_arn=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains("devplanetv1")).ARN'` + export alb_name=`kubectl get ingress gen3-ingress | awk '{print $4}' | tail +2 | sed 's/^\([A-Za-z0-9]*-[A-Za-z0-9]*-[A-Za-z0-9]*\).*/\1/;q'` + export alb_arn=`aws elbv2 describe-load-balancers --name $alb_name | yq -r .LoadBalancers[0].LoadBalancerArn` + export association=`aws wafv2 list-resources-for-web-acl --web-acl-arn $acl_arn | grep $alb_arn| sed -e 's/^[ \t]*//' | sed -e 's/^"//' -e 's/"$//'` + #variable to see if the association already exists + echo "acl_arn: $acl_arn" + echo "alb_arn: $alb_arn" +if [[ $association != $alb_arn ]]; then + aws wafv2 associate-web-acl\ + --web-acl-arn $acl_arn \ + --resource-arn $alb_arn \ + --region us-east-1 + + gen3_log_info "Add ACL arn annotation to ALB ingress" + kubectl annotate ingress gen3-ingress "alb.ingress.kubernetes.io/wafv2-acl-arn=$acl_arn" +else + gen3_log_info "ALB is already associated with ACL. Skipping..." +fi +} + + +gen3_ingress_setup_role() { # https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.4/deploy/installation/ # https://raw.githubusercontent.com/kubernetes-sigs/aws-load-balancer-controller/v2.4.1/docs/install/iam_policy.json # only do this if we are running in the default namespace -if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then - saName="aws-load-balancer-controller" - roleName=$(gen3 api safe-name ingress) - policyName=$(gen3 api safe-name ingress-policy) - ingressPolicy="$(mktemp "$XDG_RUNTIME_DIR/ingressPolicy.json_XXXXXX")" - arPolicyFile="$(mktemp "$XDG_RUNTIME_DIR/arPolicy.json_XXXXXX")" - + local saName="aws-load-balancer-controller" + local roleName=$(gen3 api safe-name ingress) + local policyName=$(gen3 api safe-name ingress-policy) + local ingressPolicy="$(mktemp "$XDG_RUNTIME_DIR/ingressPolicy.json_XXXXXX")" + local arPolicyFile="$(mktemp "$XDG_RUNTIME_DIR/arPolicy.json_XXXXXX")" # Create an inline policy for the ingress-controller cat - > "$ingressPolicy" < /dev/null; then # setup role + if ! gen3 awsrole info "$roleName" "kube-system" > /dev/null; then # setup role gen3_log_info "creating IAM role for ingress: $roleName, linking to sa $saName" gen3 awsrole create "$roleName" "$saName" "kube-system" || return 1 aws iam put-role-policy --role-name "$roleName" --policy-document file://${ingressPolicy} --policy-name "$policyName" 1>&2 @@ -255,18 +295,31 @@ EOM # update the annotation - just to be thorough gen3 awsrole sa-annotate "$saName" "$roleName" kube-system fi - +} + +gen3_ingress_deploy_helm_chart() { kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" - if (! helm status aws-load-balancer-controller -n kube-system > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then helm repo add eks https://aws.github.io/eks-charts 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - + # # TODO: Move to values.yaml file helm upgrade --install aws-load-balancer-controller eks/aws-load-balancer-controller -n kube-system --set clusterName=$(gen3 api environment) --set serviceAccount.create=false --set serviceAccount.name=aws-load-balancer-controller 2> >(grep -v 'This is insecure' >&2) else gen3_log_info "kube-setup-ingress exiting - ingress already deployed, use --force to redeploy" fi +} + +if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then + # Create role/SA for the alb's + gen3_ingress_setup_role + # Deploy the aws-load-balancer-controller helm chart and upgrade if --force flag applied + gen3_ingress_deploy_helm_chart $1 +else + if [[ -z $(kubectl get sa -n kube-system | grep aws-load-balancer-controller) ]]; then + gen3_log_err "Please run this in the default namespace first to setup the necessary roles" + exit 1 + fi fi @@ -274,5 +327,6 @@ gen3_log_info "Applying ingress resource" export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}') g3kubectl apply -f "${GEN3_HOME}/kube/services/revproxy/revproxy-service.yaml" envsubst <$scriptDir/ingress.yaml | g3kubectl apply -f - - - +if [ "$deployWaf" = true ]; then + gen3_ingress_setup_waf +fi \ No newline at end of file diff --git a/gen3/bin/waf-rules-GPE-312.json b/gen3/bin/waf-rules-GPE-312.json new file mode 100644 index 000000000..082a61f43 --- /dev/null +++ b/gen3/bin/waf-rules-GPE-312.json @@ -0,0 +1,108 @@ +[ + { + "Name": "AWS-AWSManagedRulesBotControlRuleSet", + "Priority": 0, + "Statement": { + "ManagedRuleGroupStatement": { + "VendorName": "AWS", + "Name": "AWSManagedRulesBotControlRuleSet" + } + }, + "OverrideAction": { + "None": {} + }, + "VisibilityConfig": { + "SampledRequestsEnabled": true, + "CloudWatchMetricsEnabled": true, + "MetricName": "AWS-AWSManagedRulesBotControlRuleSet" + } + }, + { + "Name": "AWS-AWSManagedRulesAdminProtectionRuleSet", + "Priority": 1, + "Statement": { + "ManagedRuleGroupStatement": { + "VendorName": "AWS", + "Name": "AWSManagedRulesAdminProtectionRuleSet", + "ExcludedRules": [ + { + "Name": "AdminProtection_URIPATH" + } + ] + } + }, + "OverrideAction": { + "None": {} + }, + "VisibilityConfig": { + "SampledRequestsEnabled": true, + "CloudWatchMetricsEnabled": true, + "MetricName": "AWS-AWSManagedRulesAdminProtectionRuleSet" + } + }, + { + "Name": "AWS-AWSManagedRulesAmazonIpReputationList", + "Priority": 2, + "Statement": { + "ManagedRuleGroupStatement": { + "VendorName": "AWS", + "Name": "AWSManagedRulesAmazonIpReputationList" + } + }, + "OverrideAction": { + "None": {} + }, + "VisibilityConfig": { + "SampledRequestsEnabled": true, + "CloudWatchMetricsEnabled": true, + "MetricName": "AWS-AWSManagedRulesAmazonIpReputationList" + } + }, + { + "Name": "AWS-AWSManagedRulesCommonRuleSet", + "Priority": 4, + "Statement": { + "ManagedRuleGroupStatement": { + "VendorName": "AWS", + "Name": "AWSManagedRulesCommonRuleSet", + "ExcludedRules": [ + { + "Name": "EC2MetaDataSSRF_BODY" + }, + { + "Name": "GenericLFI_BODY" + }, + { + "Name": "SizeRestrictions_QUERYSTRING" + } + ] + } + }, + "OverrideAction": { + "None": {} + }, + "VisibilityConfig": { + "SampledRequestsEnabled": true, + "CloudWatchMetricsEnabled": true, + "MetricName": "AWS-AWSManagedRulesCommonRuleSet" + } + }, + { + "Name": "AWS-AWSManagedRulesKnownBadInputsRuleSet", + "Priority": 5, + "Statement": { + "ManagedRuleGroupStatement": { + "VendorName": "AWS", + "Name": "AWSManagedRulesKnownBadInputsRuleSet" + } + }, + "OverrideAction": { + "None": {} + }, + "VisibilityConfig": { + "SampledRequestsEnabled": true, + "CloudWatchMetricsEnabled": true, + "MetricName": "AWS-AWSManagedRulesKnownBadInputsRuleSet" + } + } +] \ No newline at end of file From 4051ca150af025ced86c78c3a4e4810a7750f8b5 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 14 Dec 2022 08:53:58 -0700 Subject: [PATCH 035/362] the gitops-sa account was not able to access the argo namespace because the default namespace was not provided for the gitops-sa (#2104) --- kube/services/jenkins/rolebinding-devops.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kube/services/jenkins/rolebinding-devops.yaml b/kube/services/jenkins/rolebinding-devops.yaml index 2f262172e..53ad7d1ed 100644 --- a/kube/services/jenkins/rolebinding-devops.yaml +++ b/kube/services/jenkins/rolebinding-devops.yaml @@ -15,11 +15,13 @@ roleRef: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: argo-binding + name: argo-role-binding namespace: argo subjects: - kind: ServiceAccount name: gitops-sa + namespace: default + apiGroup: "" roleRef: kind: ClusterRole name: admin From cdccffcb4d5c8065b9afcf0f841239bcc882a20b Mon Sep 17 00:00:00 2001 From: Alexander VanTol Date: Wed, 14 Dec 2022 11:58:31 -0600 Subject: [PATCH 036/362] chore(revproxy): update nginx config to increase max request body size (manifestservice has no limit and we're running into issues with it being at 1m default) (#2052) Co-authored-by: Alexander VT --- kube/services/revproxy/nginx.conf | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/kube/services/revproxy/nginx.conf b/kube/services/revproxy/nginx.conf index 79c5d2e22..2e3a3b151 100644 --- a/kube/services/revproxy/nginx.conf +++ b/kube/services/revproxy/nginx.conf @@ -44,6 +44,9 @@ types_hash_max_size 2048; port_in_redirect off; # server_tokens off; +# increase max from default 1m +client_max_body_size 200m; + # For websockets map $http_upgrade $connection_upgrade { default upgrade; @@ -156,7 +159,7 @@ perl_set $namespace 'sub { return $ENV{"POD_NAMESPACE"}; }'; ## # For using fence, indexd, etc from a different namespace within the same k8 cluster - # support data ecosystem feature ... -## +## perl_set $des_domain 'sub { return $ENV{"DES_NAMESPACE"} ? qq{.$ENV{"DES_NAMESPACE"}.svc.cluster.local} : qq{.$ENV{"POD_NAMESPACE"}.svc.cluster.local}; }'; ## @@ -227,7 +230,7 @@ server { # check request against ip black list include /etc/nginx/manifest-revproxy/blacklist.conf; - + # # From https://enable-cors.org/server_nginx.html # This overrides the individual services @@ -245,7 +248,7 @@ server { # update service release cookie add_header Set-Cookie "service_releases=${service_releases};Path=/;Max-Age=600;HttpOnly;Secure;SameSite=Lax"; - + if ($request_method = 'OPTIONS') { return 204; } @@ -297,8 +300,8 @@ server { } # - # initialize proxy_service and upstream used as key in logs to - # unspecified values - + # initialize proxy_service and upstream used as key in logs to + # unspecified values - # individual service locations should override to "peregrine", ... # set $proxy_service "noproxy"; @@ -328,7 +331,7 @@ server { proxy_busy_buffers_size 32k; client_body_buffer_size 16k; proxy_read_timeout 300; - + # # also incoming from client: # * https://fullvalence.com/2016/07/05/cookie-size-in-nginx/ @@ -390,7 +393,7 @@ server { return 500 "{ \"error\": \"service failure - try again later\"}"; } - location = /_status { + location = /_status { default_type application/json; set $upstream http://localhost; return 200 "{ \"message\": \"Feelin good!\", \"csrf\": \"$csrf_token\" }\n"; From 45e9d1fa1896571811968a294adace9e80453cc3 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 14 Dec 2022 11:59:04 -0600 Subject: [PATCH 037/362] chore(fail-fast): Update gen3 reset to exit on error (#2088) * chore(fail-fast): Update gen3 reset to exit on error * chore(fail-fast): Update gen3 reset to exit on error Co-authored-by: Edward Malinowski Co-authored-by: J. Q <55899496+jawadqur@users.noreply.github.com> --- gen3/bin/reset.sh | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/gen3/bin/reset.sh b/gen3/bin/reset.sh index 6dac0ea16..0ea135df1 100644 --- a/gen3/bin/reset.sh +++ b/gen3/bin/reset.sh @@ -1,9 +1,5 @@ #!/bin/bash -# TODO: Experiencing the following error: -# [31mERROR: 21:00:30 - Lock already exists and timed out waiting for lock to unlock[39m -# + exit 1 -# Needs further investigation. Commenting out the next line for now -# set -e +set -e # # script to reset kubernetes namespace gen3 objects/services From e4946ba66b41e4e4e2accae2b0e4282c999758a3 Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Wed, 14 Dec 2022 12:00:06 -0600 Subject: [PATCH 038/362] fix: upgrade lit-html from 1.3.0 to 1.4.1 (#2106) Snyk has created this PR to upgrade lit-html from 1.3.0 to 1.4.1. See this package in npm: https://www.npmjs.com/package/lit-html See this project in Snyk: https://app.snyk.io/org/plan-x/project/2334b58a-d787-40b8-8f93-2aa7a812c519?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot --- files/dashboard/usage-reports/package-lock.json | 6 +++--- files/dashboard/usage-reports/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/files/dashboard/usage-reports/package-lock.json b/files/dashboard/usage-reports/package-lock.json index 4841621b6..4a15f166d 100644 --- a/files/dashboard/usage-reports/package-lock.json +++ b/files/dashboard/usage-reports/package-lock.json @@ -10,9 +10,9 @@ "integrity": "sha512-8uQYa7zJN8hq9z+g8z1bqCfdC8eoDAeVnM5sfqs7KHv9/ifoJ500m018fpFc7RDaO6SWCLCXwo/wPSNcdYTgcw==" }, "lit-html": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-1.3.0.tgz", - "integrity": "sha512-0Q1bwmaFH9O14vycPHw8C/IeHMk/uSDldVLIefu/kfbTBGIc44KGH6A8p1bDfxUfHdc8q6Ct7kQklWoHgr4t1Q==" + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/lit-html/-/lit-html-1.4.1.tgz", + "integrity": "sha512-B9btcSgPYb1q4oSOb/PrOT6Z/H+r6xuNzfH4lFli/AWhYwdtrgQkQWBbIc6mdnf6E2IL3gDXdkkqNktpU0OZQA==" } } } diff --git a/files/dashboard/usage-reports/package.json b/files/dashboard/usage-reports/package.json index 6552248ae..86eca2d73 100644 --- a/files/dashboard/usage-reports/package.json +++ b/files/dashboard/usage-reports/package.json @@ -11,6 +11,6 @@ "license": "ISC", "dependencies": { "jasmine-core": "^3.6.0", - "lit-html": "^1.3.0" + "lit-html": "^1.4.1" } } From 53e8251f708a8a344240b90cafef67db6d26e66d Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Wed, 14 Dec 2022 12:03:14 -0600 Subject: [PATCH 039/362] fix: upgrade jasmine-core from 3.6.0 to 3.99.1 (#2105) Snyk has created this PR to upgrade jasmine-core from 3.6.0 to 3.99.1. See this package in npm: https://www.npmjs.com/package/jasmine-core See this project in Snyk: https://app.snyk.io/org/plan-x/project/2334b58a-d787-40b8-8f93-2aa7a812c519?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot Co-authored-by: J. Q <55899496+jawadqur@users.noreply.github.com> --- files/dashboard/usage-reports/package-lock.json | 6 +++--- files/dashboard/usage-reports/package.json | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/files/dashboard/usage-reports/package-lock.json b/files/dashboard/usage-reports/package-lock.json index 4a15f166d..24e3de518 100644 --- a/files/dashboard/usage-reports/package-lock.json +++ b/files/dashboard/usage-reports/package-lock.json @@ -5,9 +5,9 @@ "requires": true, "dependencies": { "jasmine-core": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-3.6.0.tgz", - "integrity": "sha512-8uQYa7zJN8hq9z+g8z1bqCfdC8eoDAeVnM5sfqs7KHv9/ifoJ500m018fpFc7RDaO6SWCLCXwo/wPSNcdYTgcw==" + "version": "3.99.1", + "resolved": "https://registry.npmjs.org/jasmine-core/-/jasmine-core-3.99.1.tgz", + "integrity": "sha512-Hu1dmuoGcZ7AfyynN3LsfruwMbxMALMka+YtZeGoLuDEySVmVAPaonkNoBRIw/ectu8b9tVQCJNgp4a4knp+tg==" }, "lit-html": { "version": "1.4.1", diff --git a/files/dashboard/usage-reports/package.json b/files/dashboard/usage-reports/package.json index 86eca2d73..7f66661ce 100644 --- a/files/dashboard/usage-reports/package.json +++ b/files/dashboard/usage-reports/package.json @@ -10,7 +10,7 @@ "author": "", "license": "ISC", "dependencies": { - "jasmine-core": "^3.6.0", + "jasmine-core": "^3.99.1", "lit-html": "^1.4.1" } } From 4b1f6d2a13b0fefa644fde5895a5686737e674e5 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Wed, 14 Dec 2022 14:55:47 -0600 Subject: [PATCH 040/362] Revert "chore(fail-fast): Update gen3 reset to exit on error (#2088)" (#2108) This reverts commit 45e9d1fa1896571811968a294adace9e80453cc3. --- gen3/bin/reset.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/gen3/bin/reset.sh b/gen3/bin/reset.sh index 0ea135df1..6dac0ea16 100644 --- a/gen3/bin/reset.sh +++ b/gen3/bin/reset.sh @@ -1,5 +1,9 @@ #!/bin/bash -set -e +# TODO: Experiencing the following error: +# [31mERROR: 21:00:30 - Lock already exists and timed out waiting for lock to unlock[39m +# + exit 1 +# Needs further investigation. Commenting out the next line for now +# set -e # # script to reset kubernetes namespace gen3 objects/services From 58ad1ec730801baa63c43c905d24c36eecaef8b4 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 15 Dec 2022 09:18:06 -0700 Subject: [PATCH 041/362] changing from an annotation to a label (#2109) --- gen3/bin/kube-setup-argocd.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-argocd.sh b/gen3/bin/kube-setup-argocd.sh index 70d499b0a..635b7c584 100644 --- a/gen3/bin/kube-setup-argocd.sh +++ b/gen3/bin/kube-setup-argocd.sh @@ -11,7 +11,7 @@ then gen3_log_info "ArgoCD is already deployed. Skipping..." else kubectl create namespace argocd - kubectl annotate namespace argocd app="argocd" + kubectl label namespace argocd app="argocd" kubectl apply -f "${GEN3_HOME}/kube/services/argocd/install.yaml" -n argocd gen3 kube-setup-revproxy export argocdsecret=`kubectl get secret argocd-initial-admin-secret -n argocd -o json | jq .data.password -r | base64 -d` # pragma: allowlist secret From 6a37dc17a2c9245275271b509c656364fc158c37 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Fri, 16 Dec 2022 21:01:48 -0600 Subject: [PATCH 042/362] Update filter metadata compoment in HEAL CEDAR ingestion script (#2093) * update cedar ingestion script for filter component * update comment * avoid retry * fix * update * - adding networkpolicies for argocd (#2100) - annotating the argocd namespace for network policy use - fixing the indentation in the argocd nginx conf file * feat(waf-enabled): Enable waf through a flag in the manifest (#1973) * feat(waf-enabled): Enable waf through a flag in the manifest * Added the ingress_setup_waf function. Tested and working in my dev commons to be implemented everywhere once the waf rules are tested. * removing the "AWSManagedRulesAnonymousIpList" rule Co-authored-by: Edward Malinowski Co-authored-by: EliseCastle23 Co-authored-by: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Co-authored-by: cmlsn <100160785+cmlsn@users.noreply.github.com> * the gitops-sa account was not able to access the argo namespace because the default namespace was not provided for the gitops-sa (#2104) * chore(revproxy): update nginx config to increase max request body size (manifestservice has no limit and we're running into issues with it being at 1m default) (#2052) Co-authored-by: Alexander VT * chore(fail-fast): Update gen3 reset to exit on error (#2088) * chore(fail-fast): Update gen3 reset to exit on error * chore(fail-fast): Update gen3 reset to exit on error Co-authored-by: Edward Malinowski Co-authored-by: J. Q <55899496+jawadqur@users.noreply.github.com> * fix: upgrade lit-html from 1.3.0 to 1.4.1 (#2106) Snyk has created this PR to upgrade lit-html from 1.3.0 to 1.4.1. See this package in npm: https://www.npmjs.com/package/lit-html See this project in Snyk: https://app.snyk.io/org/plan-x/project/2334b58a-d787-40b8-8f93-2aa7a812c519?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot * fix: upgrade jasmine-core from 3.6.0 to 3.99.1 (#2105) Snyk has created this PR to upgrade jasmine-core from 3.6.0 to 3.99.1. See this package in npm: https://www.npmjs.com/package/jasmine-core See this project in Snyk: https://app.snyk.io/org/plan-x/project/2334b58a-d787-40b8-8f93-2aa7a812c519?utm_source=github&utm_medium=referral&page=upgrade-pr Co-authored-by: snyk-bot Co-authored-by: J. Q <55899496+jawadqur@users.noreply.github.com> * Revert "chore(fail-fast): Update gen3 reset to exit on error (#2088)" (#2108) This reverts commit 45e9d1fa1896571811968a294adace9e80453cc3. * changing from an annotation to a label (#2109) * exit on failure but no retry Co-authored-by: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Co-authored-by: emalinowski Co-authored-by: Edward Malinowski Co-authored-by: EliseCastle23 Co-authored-by: cmlsn <100160785+cmlsn@users.noreply.github.com> Co-authored-by: Alexander VanTol Co-authored-by: Alexander VT Co-authored-by: J. Q <55899496+jawadqur@users.noreply.github.com> Co-authored-by: Michael Lukowski Co-authored-by: snyk-bot --- .gitignore | 1 + .../healdata/heal-cedar-data-ingest.py | 62 +++++++++++++++++-- kube/services/jobs/cedar-ingestion-job.yaml | 2 + 3 files changed, 59 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index dbce5bd82..299bdc807 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ terraform *~ *.swp .DS_Store +.dccache kube/services/fluentd/varlogs/ kube/services/fluentd/dockerlogs/ diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index fb2c1f2c1..586b43249 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -1,9 +1,57 @@ import argparse -import json +import sys import requests import pydash -import os +# Defines how a field in metadata is going to be mapped into a key in filters +FILTER_FIELD_MAPPINGS = { + "Study Type.study_stage": "Study Type", + "Data.data_type": "Data Type", + "Study Type.study_subject_type": "Subject Type", + "Human Subject Applicability.gender_applicability": "Gender", + "Human Subject Applicability.age_applicability": "Age" +} + +# Defines how to handle special cases for values in filters +SPECIAL_VALUE_MAPPINGS = { + "Interview/Focus Group - structured": "Interview/Focus Group", + "Interview/Focus Group - semi-structured": "Interview/Focus Group", + "Interview/Focus Group - unstructured": "Interview/Focus Group", + "Questionnaire/Survey/Assessment - validated instrument": "Questionnaire/Survey/Assessment", + "Questionnaire/Survey/Assessment - unvalidated instrument": "Questionnaire/Survey/Assessment", + "Cis Male": "Male", + "Cis Female": "Female", + "Trans Male": "Female-to-male transsexual", + "Trans Female": "Male-to-female transsexual", + "Agender, Non-binary, gender non-conforming": "Other", + "Gender Queer": "Other", + "Intersex": "Intersexed" +} + +# Defines field that we don't want to include in the filters +OMITTED_VALUES_MAPPING = { + "Human Subject Applicability.gender_applicability": "Not applicable" +} + +def update_filter_metadata(metadata_to_update): + filter_metadata = [] + for metadata_field_key, filter_field_key in FILTER_FIELD_MAPPINGS.items(): + filter_field_values = pydash.get(metadata_to_update, metadata_field_key) + if filter_field_values: + if isinstance(filter_field_values, str): + filter_field_values = [filter_field_values] + if not isinstance(filter_field_values, list): + print(filter_field_values) + raise TypeError("Neither a string nor a list") + for filter_field_value in filter_field_values: + if (metadata_field_key, filter_field_value) in OMITTED_VALUES_MAPPING.items(): + continue + if filter_field_value in SPECIAL_VALUE_MAPPINGS: + filter_field_value = SPECIAL_VALUE_MAPPINGS[filter_field_value] + filter_metadata.append({"key": filter_field_key, "value": filter_field_value}) + filter_metadata = pydash.uniq(filter_metadata) + metadata_to_update["advSearchFilters"] = filter_metadata + return metadata_to_update parser = argparse.ArgumentParser() @@ -16,13 +64,13 @@ if not args.directory: print("Directory ID is required!") - exit(1) + sys.exit(1) if not args.access_token: print("User access token is required!") - exit(1) + sys.exit(1) if not args.hostname: print("Hostname is required!") - exit(1) + sys.exit(1) dir_id = args.directory access_token = args.access_token @@ -39,7 +87,7 @@ metadata_return = cedar.json() if "metadata" not in metadata_return: print("Got 200 from CEDAR wrapper but no metadata in body, something is not right!") - exit(1) + sys.exit(1) print(f"Successfully got {len(metadata_return['metadata'])} record(s) from CEDAR directory") for cedar_record in metadata_return["metadata"]: @@ -60,7 +108,9 @@ print("Metadata is already registered. Updating MDS record") elif mds_res["_guid_type"] == "unregistered_discovery_metadata": print("Metadata is has not been registered. Registering it in MDS record") + continue pydash.merge(mds_discovery_data_body, mds_res["gen3_discovery"], cedar_record) + mds_discovery_data_body = update_filter_metadata(mds_discovery_data_body) mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" diff --git a/kube/services/jobs/cedar-ingestion-job.yaml b/kube/services/jobs/cedar-ingestion-job.yaml index 87b284bf0..6fb2e1e44 100644 --- a/kube/services/jobs/cedar-ingestion-job.yaml +++ b/kube/services/jobs/cedar-ingestion-job.yaml @@ -19,6 +19,7 @@ kind: Job metadata: name: cedar-ingestion spec: + backoffLimit: 0 template: metadata: labels: @@ -106,6 +107,7 @@ spec: export ACCESS_TOKEN="$(cat /mnt/shared/access_token.txt)" python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --access_token $ACCESS_TOKEN --directory $CEDAR_DIRECTORY_ID --hostname $HOSTNAME echo "All done - exit status $?" + restartPolicy: Never - name: fence GEN3_FENCE_IMAGE imagePullPolicy: Always From af76dabcfab201491c8385b44551b9372c8c615f Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Mon, 19 Dec 2022 09:23:39 -0600 Subject: [PATCH 043/362] update datadog chart version to 3.6.4 (#2115) update datadog chart version to 3.6.4 --- gen3/bin/kube-setup-datadog.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-datadog.sh b/gen3/bin/kube-setup-datadog.sh index 76019dff9..baf0dbb2a 100644 --- a/gen3/bin/kube-setup-datadog.sh +++ b/gen3/bin/kube-setup-datadog.sh @@ -44,7 +44,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then fi helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.1.9 2> >(grep -v 'This is insecure' >&2) + helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2) ) else gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy" From 62f2fb0163a55eaf2fa8317696a7fdeb48517e89 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Tue, 20 Dec 2022 15:09:29 -0800 Subject: [PATCH 044/362] increase wait time for K8sReset (#2117) Even though K8sReset stage waits for 90 minutes, the limit of 30 minutes here causes PRs to fail if the pods do not roll within 30 minutes. --- gen3/bin/kube-wait4-pods.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-wait4-pods.sh b/gen3/bin/kube-wait4-pods.sh index 2da695e4c..03068b50d 100644 --- a/gen3/bin/kube-wait4-pods.sh +++ b/gen3/bin/kube-wait4-pods.sh @@ -11,20 +11,20 @@ help() { in the 'waiting' state. Use to wait till all launched services are up and healthy before performing some action. - Waits for up to 15 minutes. Non-zero exit code - if 15 minutes expires, and pods are still not ready. + Waits for up to 60 minutes. Non-zero exit code + if 60 minutes expires, and pods are still not ready. EOM return 0 } -MAX_RETRIES=${1:-180} +MAX_RETRIES=${1:-360} IS_K8S_RESET="${2:-false}" if [[ ! "$MAX_RETRIES" =~ ^[0-9]+$ ]]; then gen3_log_err "ignoring invalid retry count: $1" - MAX_RETRIES=180 + MAX_RETRIES=360 fi if [[ ! "$IS_K8S_RESET" =~ ^(true$|false$) ]]; From 6bedb484058779e275f0894570fbe4770c30d4a3 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 21 Dec 2022 12:04:48 -0600 Subject: [PATCH 045/362] chore(daily-reports): Updated reports to run daily (#2086) * chore(daily-reports): Updated reports to run daily * chore(daily-reports): Updated reports to run daily * chore(daily-reports): Updated reports to run daily * chore(daily-reports): Updated reports to run daily Co-authored-by: Edward Malinowski --- .../jobs/opencost-report-argo-job.yaml | 29 +++++++++++++++---- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml index b50c83f78..59b4bdab8 100644 --- a/kube/services/jobs/opencost-report-argo-job.yaml +++ b/kube/services/jobs/opencost-report-argo-job.yaml @@ -5,16 +5,19 @@ # OPENCOST_URL $OPENCOST_URL \ # # BUCKET_NAME(required) -# Name of the bucket to upload the generated reports to. +# Name of the bucket to upload the generated reports to. # Make sure that there is a service account called "reports-service-account" with access to this bucket. # # OPENCOST_URL(optional) # URL to query OpenCost API's. Default is https://kubecost-cost-analyzer.kubecost -# +# +# CHANNEL(optional) +# The slack channel ID that the alert will get sent to. Easiest way to find is to open slack in a browser, navigate to +# the webpage and copy down the ID at the end of the URL that begins with a C. # # Example # gen3 job run opencost-report-argo BUCKET_NAME opencost-report-bucket -# +# # Cronjob Example # gen3 job cron opencost-report-argo @daily BUCKET_NAME opencost-report-bucket apiVersion: batch/v1 @@ -52,14 +55,30 @@ spec: key: environment - name: BUCKET_NAME GEN3_BUCKET_NAME|-value: ""-| + - name: slackWebHook + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + optional: true + - name: channel + GEN3_CHANNEL|-value: ""-| command: [ "/bin/bash" ] args: - "-c" - | - proto-opencost-reporter GetAllocationReport \ - --from_days_before 9 \ + proto-opencost-reporter GetAllocationReport \ + --from_days_before 2 \ --to_days_before 1 \ --aggregate_by label:gen3username label:workflows.argoproj.io/workflow \ --filter_namespaces argo \ --share_idle_by_node + rc=$? + if [[ "${slackWebHook}" != 'None' ]]; then + if [ $rc != 0 ]; then + curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-FAILED: Opencost report job failed to create a report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}}" "${slackWebHook}"; + else + curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-SUCCEEDED: Opencost report job created report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}}" "${slackWebHook}" + fi + fi restartPolicy: Never From 04dbac622968c4231a2acac749cf91e57b699a61 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Wed, 21 Dec 2022 14:23:36 -0600 Subject: [PATCH 046/362] Update cedar-ingestion-job.yaml (#2118) --- kube/services/jobs/cedar-ingestion-job.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/kube/services/jobs/cedar-ingestion-job.yaml b/kube/services/jobs/cedar-ingestion-job.yaml index 6fb2e1e44..a43b2937d 100644 --- a/kube/services/jobs/cedar-ingestion-job.yaml +++ b/kube/services/jobs/cedar-ingestion-job.yaml @@ -107,7 +107,6 @@ spec: export ACCESS_TOKEN="$(cat /mnt/shared/access_token.txt)" python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --access_token $ACCESS_TOKEN --directory $CEDAR_DIRECTORY_ID --hostname $HOSTNAME echo "All done - exit status $?" - restartPolicy: Never - name: fence GEN3_FENCE_IMAGE imagePullPolicy: Always From 23c0bc8023e4921e509d2151aa3e14f0f95f5486 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Wed, 21 Dec 2022 20:50:40 -0600 Subject: [PATCH 047/362] feat(ohdsi): update requests/limits for Atlas/WebAPI (descreasing requests and increasing limits due to OOM errors) (#2119) --- .../services/ohdsi-atlas/ohdsi-atlas-deploy.yaml | 8 ++++---- .../ohdsi-webapi/ohdsi-webapi-deploy.yaml | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml index aaf552389..b96b100e2 100644 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml +++ b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml @@ -70,8 +70,8 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 1 - memory: 1Gi + cpu: 100m + memory: 100Mi limits: - cpu: 1 - memory: 1Gi + cpu: 500m + memory: 500Mi diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 6d82cc691..3caf4bb9e 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -71,11 +71,11 @@ spec: imagePullPolicy: Always resources: requests: - cpu: '0.5' - memory: 1Gi + cpu: 250m + memory: 500Mi limits: - cpu: '0.5' - memory: 1Gi + cpu: 500m + memory: 4Gi - name: ohdsi-webapi-reverse-proxy image: nginx:1.23 ports: @@ -88,8 +88,8 @@ spec: imagePullPolicy: Always resources: requests: - cpu: '0.5' - memory: 1Gi + cpu: 100m + memory: 100Mi limits: - cpu: '0.5' - memory: 1Gi + cpu: 500m + memory: 500Mi From de63e477ac7f3431f28ccb53b6524ea8bc265287 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 22 Dec 2022 13:01:06 -0700 Subject: [PATCH 048/362] Update web_whitelist (#2121) --- files/squid_whitelist/web_whitelist | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index c1238bea6..3f009534f 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -82,6 +82,7 @@ k8s.gcr.io ks.osdc.io kubecost.github.io kubernetes.github.io +kubernetes-sigs.github.io lib.stat.cmu.edu login.mathworks.com login.microsoftonline.com @@ -119,6 +120,7 @@ opportunityinsights.org orcid.org pgp.mit.edu ppa.launchpad.net +prometheus-community.github.io public.ecr.aws pubmirrors.dal.corespace.com reflector.westga.edu From 379bb20a5fff25a94dee60b9826cdb9b6572a55d Mon Sep 17 00:00:00 2001 From: Kyle Hernandez Date: Thu, 29 Dec 2022 09:55:37 -0500 Subject: [PATCH 049/362] fix(opencost): remove extra curly bracket (#2122) --- kube/services/jobs/opencost-report-argo-job.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml index 59b4bdab8..4b28e9e1c 100644 --- a/kube/services/jobs/opencost-report-argo-job.yaml +++ b/kube/services/jobs/opencost-report-argo-job.yaml @@ -76,9 +76,9 @@ spec: rc=$? if [[ "${slackWebHook}" != 'None' ]]; then if [ $rc != 0 ]; then - curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-FAILED: Opencost report job failed to create a report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}}" "${slackWebHook}"; + curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-FAILED: Opencost report job failed to create a report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}" "${slackWebHook}"; else - curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-SUCCEEDED: Opencost report job created report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}}" "${slackWebHook}" + curl -X POST --data-urlencode "payload={\"text\": \"OPENCOST-REPORT-JOB-SUCCEEDED: Opencost report job created report\", \"channel\": \"${channel}\", \"username\": \"opencost-report-job\"}" "${slackWebHook}" fi fi restartPolicy: Never From 392987e2104d7f9e535d7e56f51e62c46e8779b7 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 3 Jan 2023 08:49:19 -0700 Subject: [PATCH 050/362] needed to update the file path for the waf-rules. (#2114) * needed to update the file path for the waf-rules. * adjusting the rule priority since one of the rules was ultimately removed * adjusting the quotations for a variable * Finally fixed quotation issue * increasing the sleep time due to the "WAFUnavailableEntityException" error --- gen3/bin/kube-setup-ingress.sh | 8 ++++---- gen3/bin/waf-rules-GPE-312.json | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index 5dcd24394..8def0451d 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -17,7 +17,7 @@ scriptDir="${GEN3_HOME}/kube/services/ingress" gen3_ingress_setup_waf() { gen3_log_info "Starting GPE-312 waf setup" #variable to see if WAF already exists - export waf=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains("devplanetv1")).Name'` + export waf=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).Name'` if [[ -z $waf ]]; then gen3_log_info "Creating Web ACL. This may take a few minutes." aws wafv2 create-web-acl\ @@ -25,15 +25,15 @@ if [[ -z $waf ]]; then --scope REGIONAL \ --default-action Allow={} \ --visibility-config SampledRequestsEnabled=true,CloudWatchMetricsEnabled=true,MetricName=GPE-312WebAclMetrics \ - --rules file://waf-rules-GPE-312.json \ + --rules file://${GEN3_HOME}/gen3/bin/waf-rules-GPE-312.json \ --region us-east-1 #Need to sleep to avoid "WAFUnavailableEntityException" error since the waf takes a bit to spin up - sleep 240 + sleep 300 else gen3_log_info "WAF already exists. Skipping..." fi gen3_log_info "Attaching ACL to ALB." - export acl_arn=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains("devplanetv1")).ARN'` + export acl_arn=`aws wafv2 list-web-acls --scope REGIONAL | jq -r '.WebACLs[]|select(.Name| contains(env.vpc_name)).ARN'` export alb_name=`kubectl get ingress gen3-ingress | awk '{print $4}' | tail +2 | sed 's/^\([A-Za-z0-9]*-[A-Za-z0-9]*-[A-Za-z0-9]*\).*/\1/;q'` export alb_arn=`aws elbv2 describe-load-balancers --name $alb_name | yq -r .LoadBalancers[0].LoadBalancerArn` export association=`aws wafv2 list-resources-for-web-acl --web-acl-arn $acl_arn | grep $alb_arn| sed -e 's/^[ \t]*//' | sed -e 's/^"//' -e 's/"$//'` diff --git a/gen3/bin/waf-rules-GPE-312.json b/gen3/bin/waf-rules-GPE-312.json index 082a61f43..b37eab510 100644 --- a/gen3/bin/waf-rules-GPE-312.json +++ b/gen3/bin/waf-rules-GPE-312.json @@ -60,7 +60,7 @@ }, { "Name": "AWS-AWSManagedRulesCommonRuleSet", - "Priority": 4, + "Priority": 3, "Statement": { "ManagedRuleGroupStatement": { "VendorName": "AWS", @@ -89,7 +89,7 @@ }, { "Name": "AWS-AWSManagedRulesKnownBadInputsRuleSet", - "Priority": 5, + "Priority": 4, "Statement": { "ManagedRuleGroupStatement": { "VendorName": "AWS", From f610cb44bb09bb504b334a52d294bd0bf8afc269 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 3 Jan 2023 08:50:33 -0700 Subject: [PATCH 051/362] editing kube-setup-argo script to contain a lifecycle policy (#2113) * editing kube-setup-argo script to contain a lifecycle policy * moving the location of the policy creation for testing * moving the lifecycle creation to a better spot * modified the lifecycle policy --- .secrets.baseline | 4 ++-- gen3/bin/kube-setup-argo.sh | 22 ++++++++++++++++++++-- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index ee70d5d8c..4c82e4e42 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-12-13T12:32:32Z", + "generated_at": "2022-12-16T20:29:01Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -338,7 +338,7 @@ "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", "is_secret": false, "is_verified": false, - "line_number": 182, + "line_number": 200, "type": "Secret Keyword" } ], diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index e95f216fe..99728efaa 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -14,6 +14,7 @@ function setup_argo_buckets { local accountNumber local environment local policyFile="$XDG_RUNTIME_DIR/policy_$$.json" + local bucketLifecyclePolicyFile="$XDG_RUNTIME_DIR/bucket_lifecycle_policy_$$.json" if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then @@ -97,6 +98,21 @@ EOF } ] } +EOF + cat > "$bucketLifecyclePolicyFile" < /dev/null 2>&1; then aws iam create-user --user-name ${userName} @@ -139,7 +154,6 @@ EOF g3kubectl delete secret -n argo argo-s3-creds fi - gen3_log_info "Creating s3 creds secret in argo namespace" if [[ -z $internalBucketName ]]; then g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} @@ -150,9 +164,13 @@ EOF ## if new bucket then do the following # Get the aws keys from secret + # Create and attach lifecycle policy # Set bucket policies # Update secret to have new bucket + gen3_log_info "Creating bucket lifecycle policy" + aws s3api put-bucket-lifecycle --bucket ${bucketName} --lifecycle-configuration file://$bucketLifecyclePolicyFile + # Always update the policy, in case manifest buckets change aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile if [[ ! -z $internalBucketPolicyFile ]]; then From 63d492b7d2e6153bb0d69cc19c2fe3f8f097ba8e Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 3 Jan 2023 09:54:58 -0600 Subject: [PATCH 052/362] =?UTF-8?q?fix(prometheus-images):=20Updated=20val?= =?UTF-8?q?ues=20yaml=20image=20locations=20to=20match=20=E2=80=A6=20(#212?= =?UTF-8?q?3)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(prometheus-images): Updated values yaml image locations to match chart changes * fix(prometheus-images): Updated values yaml image locations to match chart changes * fix(prometheus-images): Updated thanos images to ones that support EKS IAM SA's Co-authored-by: Edward Malinowski --- kube/services/monitoring/thanos-deploy.yaml | 8 ++++---- kube/services/monitoring/values.yaml | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/kube/services/monitoring/thanos-deploy.yaml b/kube/services/monitoring/thanos-deploy.yaml index 74c98dc19..8ff7a54e4 100644 --- a/kube/services/monitoring/thanos-deploy.yaml +++ b/kube/services/monitoring/thanos-deploy.yaml @@ -19,7 +19,7 @@ spec: spec: containers: - name: thanos-query - image: quay.io/thanos/thanos:v0.23.0 + image: quay.io/thanos/thanos:v0.25.2 args: - 'query' - '--log.level=debug' @@ -100,7 +100,7 @@ spec: spec: containers: - name: thanos-store - image: quay.io/thanos/thanos:v0.23.0 + image: quay.io/thanos/thanos:v0.25.2 args: - 'store' - '--log.level=debug' @@ -164,7 +164,7 @@ spec: spec: containers: - name: thanos-compactor - image: quay.io/thanos/thanos:v0.23.0 + image: quay.io/thanos/thanos:v0.25.2 args: - 'compact' - '--log.level=debug' @@ -217,4 +217,4 @@ spec: interval: 30s selector: matchLabels: - app: thanos-compactor \ No newline at end of file + app: thanos-compactor diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml index 25208c9b6..761764c89 100644 --- a/kube/services/monitoring/values.yaml +++ b/kube/services/monitoring/values.yaml @@ -452,7 +452,7 @@ alertmanager: ## Image of Alertmanager ## image: - repository: quay.io/prometheus/alertmanager + repository: prometheus/alertmanager tag: v0.24.0 sha: "" @@ -1608,7 +1608,7 @@ prometheusOperator: patch: enabled: true image: - repository: k8s.gcr.io/ingress-nginx/kube-webhook-certgen + repository: ingress-nginx/kube-webhook-certgen tag: v1.1.1 sha: "" pullPolicy: IfNotPresent @@ -1838,7 +1838,7 @@ prometheusOperator: ## Prometheus-operator image ## image: - repository: quay.io/prometheus-operator/prometheus-operator + repository: prometheus-operator/prometheus-operator tag: v0.57.0 sha: "" pullPolicy: IfNotPresent @@ -1856,7 +1856,7 @@ prometheusOperator: prometheusConfigReloader: # image to use for config and rule reloading image: - repository: quay.io/prometheus-operator/prometheus-config-reloader + repository: prometheus-operator/prometheus-config-reloader tag: v0.57.0 sha: "" @@ -1872,7 +1872,7 @@ prometheusOperator: ## Thanos side-car image when configured ## thanosImage: - repository: quay.io/thanos/thanos + repository: thanos/thanos tag: v0.25.2 sha: "" @@ -2297,7 +2297,7 @@ prometheus: ## Image of Prometheus. ## image: - repository: quay.io/prometheus/prometheus + repository: prometheus/prometheus tag: v2.36.1 sha: "" @@ -3293,7 +3293,7 @@ thanosRuler: ## Image of ThanosRuler ## image: - repository: quay.io/thanos/thanos + repository: thanos/thanos tag: v0.24.0 sha: "" From 5f07c319109fc1edd93cc3b5be89f4197095a8f2 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 3 Jan 2023 10:58:34 -0700 Subject: [PATCH 053/362] Feat/GPE-717 (#2110) * the gitops-sa account was not able to access the argo namespace because the default namespace was not provided for the gitops-sa * Making the namespace dynamic so the gitops-sa can be found properly in any environment * forgot to add the namespace var * accidentally added "local", removed it since this is not a function --- gen3/bin/kube-setup-roles.sh | 6 ++++-- kube/services/jenkins/rolebinding-devops.yaml | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/gen3/bin/kube-setup-roles.sh b/gen3/bin/kube-setup-roles.sh index 040aaca05..c7d484679 100644 --- a/gen3/bin/kube-setup-roles.sh +++ b/gen3/bin/kube-setup-roles.sh @@ -12,6 +12,8 @@ gen3_load "gen3/gen3setup" g3kubectl patch serviceaccount default -p 'automountServiceAccountToken: false' g3kubectl patch serviceaccount --namespace "$(gen3 jupyter j-namespace)" default -p 'automountServiceAccountToken: false' > /dev/null || true +namespace="$(gen3 api namespace)" + # Don't do this in a Jenkins job if [[ -z "$JENKINS_HOME" ]]; then if ! g3kubectl get serviceaccounts/useryaml-job > /dev/null 2>&1; then @@ -29,10 +31,10 @@ if [[ -z "$JENKINS_HOME" ]]; then roleName="$(gen3 api safe-name gitops)" gen3 awsrole create "$roleName" gitops-sa # do this here, since we added the new role to this binding - g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml" + g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "namespace: $namespace"|g3kubectl apply -f - fi if ! g3kubectl get rolebindings/devops-binding > /dev/null 2>&1; then - g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml" + g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "namespace: $namespace"|g3kubectl apply -f - fi ctx="$(g3kubectl config current-context)" diff --git a/kube/services/jenkins/rolebinding-devops.yaml b/kube/services/jenkins/rolebinding-devops.yaml index 53ad7d1ed..579da9863 100644 --- a/kube/services/jenkins/rolebinding-devops.yaml +++ b/kube/services/jenkins/rolebinding-devops.yaml @@ -20,7 +20,7 @@ metadata: subjects: - kind: ServiceAccount name: gitops-sa - namespace: default + CURRENT_NAMESPACE apiGroup: "" roleRef: kind: ClusterRole From e36500016bb243f2b111c36c550e105c90cc7736 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 5 Jan 2023 09:27:33 -0700 Subject: [PATCH 054/362] making a few updates to the WAF rules after testing. (#2126) --- gen3/bin/waf-rules-GPE-312.json | 101 +++++++++++++++++++++++--------- 1 file changed, 73 insertions(+), 28 deletions(-) diff --git a/gen3/bin/waf-rules-GPE-312.json b/gen3/bin/waf-rules-GPE-312.json index b37eab510..b8cdccabe 100644 --- a/gen3/bin/waf-rules-GPE-312.json +++ b/gen3/bin/waf-rules-GPE-312.json @@ -1,11 +1,19 @@ [ { - "Name": "AWS-AWSManagedRulesBotControlRuleSet", + "Name": "AWS-AWSManagedRulesAdminProtectionRuleSet", "Priority": 0, "Statement": { "ManagedRuleGroupStatement": { "VendorName": "AWS", - "Name": "AWSManagedRulesBotControlRuleSet" + "Name": "AWSManagedRulesAdminProtectionRuleSet", + "RuleActionOverrides": [ + { + "Name": "AdminProtection_URIPATH", + "ActionToUse": { + "Challenge": {} + } + } + ] } }, "OverrideAction": { @@ -14,19 +22,22 @@ "VisibilityConfig": { "SampledRequestsEnabled": true, "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesBotControlRuleSet" + "MetricName": "AWS-AWSManagedRulesAdminProtectionRuleSet" } }, { - "Name": "AWS-AWSManagedRulesAdminProtectionRuleSet", + "Name": "AWS-AWSManagedRulesAmazonIpReputationList", "Priority": 1, "Statement": { "ManagedRuleGroupStatement": { "VendorName": "AWS", - "Name": "AWSManagedRulesAdminProtectionRuleSet", - "ExcludedRules": [ + "Name": "AWSManagedRulesAmazonIpReputationList", + "RuleActionOverrides": [ { - "Name": "AdminProtection_URIPATH" + "Name": "AWSManagedReconnaissanceList", + "ActionToUse": { + "Count": {} + } } ] } @@ -37,16 +48,61 @@ "VisibilityConfig": { "SampledRequestsEnabled": true, "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesAdminProtectionRuleSet" + "MetricName": "AWS-AWSManagedRulesAmazonIpReputationList" } }, { - "Name": "AWS-AWSManagedRulesAmazonIpReputationList", + "Name": "AWS-AWSManagedRulesCommonRuleSet", "Priority": 2, "Statement": { "ManagedRuleGroupStatement": { "VendorName": "AWS", - "Name": "AWSManagedRulesAmazonIpReputationList" + "Name": "AWSManagedRulesCommonRuleSet", + "Version": "Version_1.4", + "RuleActionOverrides": [ + { + "Name": "EC2MetaDataSSRF_BODY", + "ActionToUse": { + "Count": {} + } + }, + { + "Name": "GenericLFI_BODY", + "ActionToUse": { + "Allow": {} + } + }, + { + "Name": "SizeRestrictions_QUERYSTRING", + "ActionToUse": { + "Count": {} + } + }, + { + "Name": "SizeRestrictions_BODY", + "ActionToUse": { + "Allow": {} + } + }, + { + "Name": "CrossSiteScripting_BODY", + "ActionToUse": { + "Count": {} + } + }, + { + "Name": "SizeRestrictions_URIPATH", + "ActionToUse": { + "Allow": {} + } + }, + { + "Name": "SizeRestrictions_Cookie_HEADER", + "ActionToUse": { + "Allow": {} + } + } + ] } }, "OverrideAction": { @@ -55,27 +111,16 @@ "VisibilityConfig": { "SampledRequestsEnabled": true, "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesAmazonIpReputationList" + "MetricName": "AWS-AWSManagedRulesCommonRuleSet" } }, { - "Name": "AWS-AWSManagedRulesCommonRuleSet", + "Name": "AWS-AWSManagedRulesKnownBadInputsRuleSet", "Priority": 3, "Statement": { "ManagedRuleGroupStatement": { "VendorName": "AWS", - "Name": "AWSManagedRulesCommonRuleSet", - "ExcludedRules": [ - { - "Name": "EC2MetaDataSSRF_BODY" - }, - { - "Name": "GenericLFI_BODY" - }, - { - "Name": "SizeRestrictions_QUERYSTRING" - } - ] + "Name": "AWSManagedRulesKnownBadInputsRuleSet" } }, "OverrideAction": { @@ -84,16 +129,16 @@ "VisibilityConfig": { "SampledRequestsEnabled": true, "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesCommonRuleSet" + "MetricName": "AWS-AWSManagedRulesKnownBadInputsRuleSet" } }, { - "Name": "AWS-AWSManagedRulesKnownBadInputsRuleSet", + "Name": "AWS-AWSManagedRulesLinuxRuleSet", "Priority": 4, "Statement": { "ManagedRuleGroupStatement": { "VendorName": "AWS", - "Name": "AWSManagedRulesKnownBadInputsRuleSet" + "Name": "AWSManagedRulesLinuxRuleSet" } }, "OverrideAction": { @@ -102,7 +147,7 @@ "VisibilityConfig": { "SampledRequestsEnabled": true, "CloudWatchMetricsEnabled": true, - "MetricName": "AWS-AWSManagedRulesKnownBadInputsRuleSet" + "MetricName": "AWS-AWSManagedRulesLinuxRuleSet" } } ] \ No newline at end of file From 62e4fef443878b4e0a6f4e21500266e9868dbbf2 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 9 Jan 2023 08:46:05 -0600 Subject: [PATCH 055/362] feat(healthcheck-clear-evicted): Updated health check script to clear Evicted pods (#2125) * feat(healthcheck-clear-evicted): Updated healtcheck script to clear Evicted pods * feat(healthcheck-clear-evicted): Updated healtcheck script to clear Evicted pods Co-authored-by: Edward Malinowski --- gen3/bin/healthcheck.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/gen3/bin/healthcheck.sh b/gen3/bin/healthcheck.sh index b2973aa04..149cb1aaa 100644 --- a/gen3/bin/healthcheck.sh +++ b/gen3/bin/healthcheck.sh @@ -47,7 +47,7 @@ gen3_healthcheck() { # refer to k8s api docs for pod status info # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#podstatus-v1-core gen3_log_info "Getting all pods..." - + local allPods=$(g3kubectl get pods --all-namespaces -o json | \ jq -r '[ .items[] | { @@ -117,7 +117,7 @@ gen3_healthcheck() { if [[ "$statusCode" -lt 200 || "$statusCode" -ge 400 ]]; then internetAccess=false fi - + # check internet access with explicit proxy gen3_log_info "Checking explicit proxy internet access..." local http_proxy="http://cloud-proxy.internal.io:3128" @@ -151,7 +151,7 @@ gen3_healthcheck() { } EOM ) - + if ! jq -r . <<<"$healthJson" > /dev/null; then gen3_log_err "failed to assemble valid json data: $healthJson" return 1 @@ -205,4 +205,10 @@ EOM fi } +clear_evicted_pods() { + g3kubectl get pods -A -o json | jq '.items[] | select(.status.reason!=null) | select(.status.reason | contains("Evicted")) | "kubectl delete pods \(.metadata.name) -n \(.metadata.namespace)"' | xargs -n 1 bash -c 2> /dev/null || true +} + gen3_healthcheck "$@" + +clear_evicted_pods From 3c7fc0e99ded3048ed534ce4303e1ca696ee2e2f Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 9 Jan 2023 16:20:50 -0600 Subject: [PATCH 056/362] Update kubernetes metrics-server (#2129) * Update components.yaml * Update kube-setup-metrics.sh * Update kube-setup-metrics.sh --- gen3/bin/kube-setup-metrics.sh | 4 +- kube/services/metrics-server/components.yaml | 222 +++++++++++-------- 2 files changed, 136 insertions(+), 90 deletions(-) diff --git a/gen3/bin/kube-setup-metrics.sh b/gen3/bin/kube-setup-metrics.sh index ca287197a..139c9679c 100644 --- a/gen3/bin/kube-setup-metrics.sh +++ b/gen3/bin/kube-setup-metrics.sh @@ -17,7 +17,7 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" -DESIRED_VERSION=0.3.7 +DESIRED_VERSION=0.6.2 CURRENT_VERSION=$(kubectl get deployment -n kube-system metrics-server -o json | jq -r .spec.template.spec.containers[0].image | awk -F :v '{print $2}') gen3_metrics_deploy() { @@ -47,4 +47,4 @@ case "$command" in gen3_log_err "unknown option: $command" gen3 help kube-setup-metrics ;; -esac \ No newline at end of file +esac diff --git a/kube/services/metrics-server/components.yaml b/kube/services/metrics-server/components.yaml index 743d61965..dc46ca229 100644 --- a/kube/services/metrics-server/components.yaml +++ b/kube/services/metrics-server/components.yaml @@ -1,22 +1,78 @@ -# Copied contents from here: https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.7/components.yaml +# Copied contents from here: https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.2/components.yaml # https://github.com/kubernetes-sigs/metrics-server/releases for more information on installation of a different version. --- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: system:aggregated-metrics-reader labels: - rbac.authorization.k8s.io/aggregate-to-view: "true" - rbac.authorization.k8s.io/aggregate-to-edit: "true" + k8s-app: metrics-server rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader rules: -- apiGroups: ["metrics.k8s.io"] - resources: ["pods", "nodes"] - verbs: ["get", "list", "watch"] +- apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: +- apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get +- apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: metrics-server + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: + labels: + k8s-app: metrics-server name: metrics-server:system:auth-delegator roleRef: apiGroup: rbac.authorization.k8s.io @@ -28,126 +84,116 @@ subjects: namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding +kind: ClusterRoleBinding metadata: - name: metrics-server-auth-reader - namespace: kube-system + labels: + k8s-app: metrics-server + name: system:metrics-server roleRef: apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader + kind: ClusterRole + name: system:metrics-server subjects: - kind: ServiceAccount name: metrics-server namespace: kube-system --- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.metrics.k8s.io -spec: - service: - name: metrics-server - namespace: kube-system - group: metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 ---- apiVersion: v1 -kind: ServiceAccount +kind: Service metadata: + labels: + k8s-app: metrics-server name: metrics-server namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server --- apiVersion: apps/v1 kind: Deployment metadata: - name: metrics-server - namespace: kube-system labels: k8s-app: metrics-server + name: metrics-server + namespace: kube-system spec: selector: matchLabels: k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 template: metadata: - name: metrics-server labels: k8s-app: metrics-server spec: - serviceAccountName: metrics-server - volumes: - # mount in tmp so we can safely use from-scratch images and/or read-only containers - - name: tmp-dir - emptyDir: {} containers: - - name: metrics-server - image: k8s.gcr.io/metrics-server/metrics-server:v0.3.7 + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2 imagePullPolicy: IfNotPresent - args: - - --cert-dir=/tmp - - --secure-port=4443 - - --v=2 + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server ports: - - name: main-port - containerPort: 4443 + - containerPort: 4443 + name: https protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi securityContext: + allowPrivilegeEscalation: false readOnlyRootFilesystem: true runAsNonRoot: true runAsUser: 1000 volumeMounts: - - name: tmp-dir - mountPath: /tmp + - mountPath: /tmp + name: tmp-dir nodeSelector: kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir --- -apiVersion: v1 -kind: Service +apiVersion: apiregistration.k8s.io/v1 +kind: APIService metadata: - name: metrics-server - namespace: kube-system labels: - kubernetes.io/name: "Metrics-server" - kubernetes.io/cluster-service: "true" -spec: - selector: k8s-app: metrics-server - ports: - - port: 443 - protocol: TCP - targetPort: main-port ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:metrics-server -rules: -- apiGroups: - - "" - resources: - - pods - - nodes - - nodes/stats - - namespaces - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:metrics-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:metrics-server -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system \ No newline at end of file + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 From 9c12b180b82c1151437b017d84c011f7fe549e1c Mon Sep 17 00:00:00 2001 From: Sai Shanmukha Narumanchi Date: Wed, 11 Jan 2023 14:10:19 -0600 Subject: [PATCH 057/362] (PXP-9516): Add `metadata-delete-expired-objects` job (#1941) --- gen3/bin/kube-setup-audit-service.sh | 4 +- ...metadata-delete-expired-objects-cronjob.sh | 48 +++++++++++++++++++ gen3/bin/kube-setup-metadata.sh | 2 +- gen3/bin/kube-setup-requestor.sh | 4 +- gen3/bin/kube-setup-revproxy.sh | 2 +- .../fence-delete-expired-clients-job.yaml | 2 +- .../metadata-delete-expired-objects-job.yaml | 33 +++++++++++++ .../jobs/opencost-report-argo-job.yaml | 2 +- 8 files changed, 87 insertions(+), 10 deletions(-) create mode 100644 gen3/bin/kube-setup-metadata-delete-expired-objects-cronjob.sh create mode 100644 kube/services/jobs/metadata-delete-expired-objects-job.yaml diff --git a/gen3/bin/kube-setup-audit-service.sh b/gen3/bin/kube-setup-audit-service.sh index 2eebe0f97..b7565194c 100644 --- a/gen3/bin/kube-setup-audit-service.sh +++ b/gen3/bin/kube-setup-audit-service.sh @@ -21,7 +21,7 @@ setup_database_and_config() { # Setup config file that audit-service consumes local secretsFolder="$(gen3_secrets_folder)/g3auto/audit" - if [[ ! -f "$secretsFolder/audit-service-config.yaml" || ! -f "$secretsFolder/base64Authz.txt" ]]; then + if [[ ! -f "$secretsFolder/audit-service-config.yaml" ]]; then if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then if ! gen3 db setup audit; then gen3_log_err "Failed setting up database for audit-service" @@ -60,8 +60,6 @@ DB_USER: $(jq -r .db_username < "$secretsFolder/dbcreds.json") DB_PASSWORD: $(jq -r .db_password < "$secretsFolder/dbcreds.json") DB_DATABASE: $(jq -r .db_database < "$secretsFolder/dbcreds.json") EOM - # make it easy for nginx to get the Authorization header ... - # echo -n "gateway:$password" | base64 > "$secretsFolder/base64Authz.txt" fi gen3 secrets sync 'setup audit-g3auto secrets' } diff --git a/gen3/bin/kube-setup-metadata-delete-expired-objects-cronjob.sh b/gen3/bin/kube-setup-metadata-delete-expired-objects-cronjob.sh new file mode 100644 index 000000000..1879dc8dc --- /dev/null +++ b/gen3/bin/kube-setup-metadata-delete-expired-objects-cronjob.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# +# Deploy the `metadata-delete-expired-objects` cronjob. +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +setup_config() { + gen3_log_info "check metadata-delete-expired-objects secret" + local secretsFolder="$(gen3_secrets_folder)/g3auto/metadata-delete-expired-objects" + if [[ ! -f "$secretsFolder/config.json" ]]; then + local hostname=$(gen3 api hostname) + gen3_log_info "kube-setup-metadata-delete-expired-objects-job" "creating fence oidc client for $hostname" + local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client metadata-delete-expired-objects-job --grant-types client_credentials | tail -1) + # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + # try delete client + g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client metadata-delete-expired-objects-job > /dev/null 2>&1 + secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client metadata-delete-expired-objects-job --grant-types client_credentials | tail -1) + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + gen3_log_err "kube-setup-metadata-delete-expired-objects-job" "Failed generating oidc client: $secrets" + return 1 + fi + fi + local client_id="${BASH_REMATCH[2]}" + local client_secret="${BASH_REMATCH[3]}" + + gen3_log_info "create metadata-delete-expired-objects secret" + mkdir -m 0700 -p "$(gen3_secrets_folder)/g3auto/metadata-delete-expired-objects" + + cat - > "$secretsFolder/config.json" < "$secretsFolder/base64Authz.txt" fi gen3 secrets sync 'setup requestor-g3auto secrets' } diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index bba81166d..02fcc5c38 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -303,4 +303,4 @@ fi if [ "$deployELB" = true ]; then gen3_deploy_revproxy_elb -fi +fi diff --git a/kube/services/jobs/fence-delete-expired-clients-job.yaml b/kube/services/jobs/fence-delete-expired-clients-job.yaml index 1f9a8993b..041b5c2b7 100644 --- a/kube/services/jobs/fence-delete-expired-clients-job.yaml +++ b/kube/services/jobs/fence-delete-expired-clients-job.yaml @@ -20,7 +20,7 @@ spec: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/metadata-delete-expired-objects-job.yaml b/kube/services/jobs/metadata-delete-expired-objects-job.yaml new file mode 100644 index 000000000..221b964a0 --- /dev/null +++ b/kube/services/jobs/metadata-delete-expired-objects-job.yaml @@ -0,0 +1,33 @@ +# Delete all expired MDS objects. +# +# Run `gen3 kube-setup-metadata-delete-expired-objects-job` to configure this job +# and set it up as a cronjob. +# +# Add the job image to the manifest: +# `"metadata-delete-expired-objects": "quay.io/cdis/metadata-delete-expired-objects:master"` +# +# Once set up, the job can be run with `gen3 job run metadata-delete-expired-objects-job`. + +apiVersion: batch/v1 +kind: Job +metadata: + name: metadata-delete-expired-objects +spec: + template: + metadata: + labels: + app: gen3job + spec: + volumes: + - name: config-volume + secret: + secretName: "metadata-delete-expired-objects-g3auto" + containers: + - name: metadata-delete-expired-objects + GEN3_METADATA-DELETE-EXPIRED-OBJECTS_IMAGE + imagePullPolicy: Always + volumeMounts: + - name: config-volume + readOnly: true + mountPath: /mnt + restartPolicy: Never diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml index 4b28e9e1c..26fbbae60 100644 --- a/kube/services/jobs/opencost-report-argo-job.yaml +++ b/kube/services/jobs/opencost-report-argo-job.yaml @@ -39,7 +39,7 @@ spec: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: reports-service-account containers: - name: send-report From 1b2146ada27f0ed2c51d8df9ef9a5c8443d7ea9f Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 13 Jan 2023 12:41:20 -0600 Subject: [PATCH 058/362] Feat(cloud-init-fips): Set FIPs through cloud-init (#2134) * feat(cloud-init-fips): Set FIPs through cloud-init * feat(cloud-init-fips): Set FIPs through cloud-init Co-authored-by: Edward Malinowski Co-authored-by: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> --- .../eks/bootstrap-explicit-proxy-docker.sh | 24 +++++++++++++++++++ .../eks/bootstrap-with-security-updates.sh | 24 +++++++++++++++++++ flavors/eks/bootstrap.sh | 24 +++++++++++++++++++ 3 files changed, 72 insertions(+) diff --git a/flavors/eks/bootstrap-explicit-proxy-docker.sh b/flavors/eks/bootstrap-explicit-proxy-docker.sh index 13d181d03..091be1b18 100644 --- a/flavors/eks/bootstrap-explicit-proxy-docker.sh +++ b/flavors/eks/bootstrap-explicit-proxy-docker.sh @@ -1,3 +1,9 @@ +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="BOUNDARY" + +--BOUNDARY +Content-Type: text/x-shellscript; charset="us-ascii" + #!/bin/bash -xe # User data for our EKS worker nodes basic arguments to call the bootstrap script for EKS images @@ -52,3 +58,21 @@ if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then rm qualys-cloud-agent.x86_64.rpm sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id} fi + +sudo yum update -y +sudo yum install -y dracut-fips openssl >> /opt/fips-install.log +sudo dracut -f +# configure grub +sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + +--BOUNDARY +Content-Type: text/cloud-config; charset="us-ascii" + +power_state: + delay: now + mode: reboot + message: Powering off + timeout: 2 + condition: true + +--BOUNDARY-- \ No newline at end of file diff --git a/flavors/eks/bootstrap-with-security-updates.sh b/flavors/eks/bootstrap-with-security-updates.sh index 1e6a0b7eb..06d962f55 100644 --- a/flavors/eks/bootstrap-with-security-updates.sh +++ b/flavors/eks/bootstrap-with-security-updates.sh @@ -1,3 +1,9 @@ +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="BOUNDARY" + +--BOUNDARY +Content-Type: text/x-shellscript; charset="us-ascii" + #!/bin/bash -xe # User data for our EKS worker nodes basic arguments to call the bootstrap script for EKS images @@ -76,3 +82,21 @@ if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then rm qualys-cloud-agent.x86_64.rpm sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id} fi + +sudo yum update -y +sudo yum install -y dracut-fips openssl >> /opt/fips-install.log +sudo dracut -f +# configure grub +sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + +--BOUNDARY +Content-Type: text/cloud-config; charset="us-ascii" + +power_state: + delay: now + mode: reboot + message: Powering off + timeout: 2 + condition: true + +--BOUNDARY-- \ No newline at end of file diff --git a/flavors/eks/bootstrap.sh b/flavors/eks/bootstrap.sh index f5dbcf55e..7dda384d7 100644 --- a/flavors/eks/bootstrap.sh +++ b/flavors/eks/bootstrap.sh @@ -1,3 +1,9 @@ +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="BOUNDARY" + +--BOUNDARY +Content-Type: text/x-shellscript; charset="us-ascii" + #!/bin/bash -xe # User data for our EKS worker nodes basic arguments to call the bootstrap script for EKS images @@ -25,3 +31,21 @@ if [[ ! -z "${activation_id}" ]] || [[ ! -z "${customer_id}" ]]; then rm qualys-cloud-agent.x86_64.rpm sudo /usr/local/qualys/cloud-agent/bin/qualys-cloud-agent.sh ActivationId=${activation_id} CustomerId=${customer_id} fi + +sudo yum update -y +sudo yum install -y dracut-fips openssl >> /opt/fips-install.log +sudo dracut -f +# configure grub +sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + +--BOUNDARY +Content-Type: text/cloud-config; charset="us-ascii" + +power_state: + delay: now + mode: reboot + message: Powering off + timeout: 2 + condition: true + +--BOUNDARY-- \ No newline at end of file From 219d76f0b7fa5350cf5d59abaccc27a0dbbde614 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 23 Jan 2023 07:05:24 -0600 Subject: [PATCH 059/362] feat(squid-on-arm): Added gh action for squid build so that squid could be build for arm (#2136) Co-authored-by: Edward Malinowski --- .github/workflows/image_build_push_squid.yaml | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/image_build_push_squid.yaml diff --git a/.github/workflows/image_build_push_squid.yaml b/.github/workflows/image_build_push_squid.yaml new file mode 100644 index 000000000..2849f0cc5 --- /dev/null +++ b/.github/workflows/image_build_push_squid.yaml @@ -0,0 +1,21 @@ +name: Build Squid images and push to Quay + +on: + push: + paths: + - Docker/squid/** + +jobs: + squid: + name: Squid Build and Push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/squid/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/squid" + OVERRIDE_REPO_NAME: "squid" + USE_QUAY_ONLY: true + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} From 8f7e177bc6419792d190c752e38e713fba607c3d Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 24 Jan 2023 10:35:08 -0600 Subject: [PATCH 060/362] =?UTF-8?q?feat(karpenter):=20Added=20configuratio?= =?UTF-8?q?n=20to=20use=20karpenter=20instead=20of=20clus=E2=80=A6=20(#212?= =?UTF-8?q?8)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(karpenter): Added configuration to use karpenter instead of cluster autoscaler * feat(karpenter): Added configuration to use karpenter instead of cluster autoscaler * feat(karpenter): Added configuration to use karpenter instead of cluster autoscaler * feat(karpenter): Added configuration to use karpenter instead of cluster autoscaler * feat(karpenter): Added configuration to use karpenter instead of cluster autoscaler Co-authored-by: Edward Malinowski Co-authored-by: Edward Malinowski --- gen3/bin/kube-roll-all.sh | 12 +- gen3/bin/kube-setup-autoscaler.sh | 34 ++++- gen3/bin/kube-setup-karpenter.sh | 138 ++++++++++++++++++ gen3/bin/kube-setup-workvm.sh | 2 +- kube/services/karpenter/binfmt.yaml | 42 ++++++ .../karpenter/nodeTemplateDefault.yaml | 51 +++++++ .../karpenter/nodeTemplateJupyter.yaml | 50 +++++++ .../karpenter/nodeTemplateWorkflow.yaml | 50 +++++++ kube/services/karpenter/provisionerArm.yaml | 35 +++++ .../karpenter/provisionerDefault.yaml | 35 +++++ .../karpenter/provisionerJupyter.yaml | 40 +++++ .../karpenter/provisionerWorkflow.yaml | 35 +++++ 12 files changed, 518 insertions(+), 6 deletions(-) create mode 100644 gen3/bin/kube-setup-karpenter.sh create mode 100644 kube/services/karpenter/binfmt.yaml create mode 100644 kube/services/karpenter/nodeTemplateDefault.yaml create mode 100644 kube/services/karpenter/nodeTemplateJupyter.yaml create mode 100644 kube/services/karpenter/nodeTemplateWorkflow.yaml create mode 100644 kube/services/karpenter/provisionerArm.yaml create mode 100644 kube/services/karpenter/provisionerDefault.yaml create mode 100644 kube/services/karpenter/provisionerJupyter.yaml create mode 100644 kube/services/karpenter/provisionerWorkflow.yaml diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index d93ac7600..70af01b36 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -248,7 +248,17 @@ gen3 kube-setup-revproxy if [[ "$GEN3_ROLL_FAST" != "true" ]]; then # Internal k8s systems gen3 kube-setup-fluentd & - gen3 kube-setup-autoscaler & + # If there is an entry for karpenter in the manifest setup karpenter + if g3k_manifest_lookup .global.karpenter 2> /dev/null; then + if [[ "$(g3k_manifest_lookup .global.karpenter)" != "arm" ]]; then + gen3 kube-setup-karpenter deploy & + else + gen3 kube-setup-karpenter deploy --arm & + fi + # Otherwise, setup the cluster autoscaler + else + gen3 kube-setup-autoscaler & + fi gen3 kube-setup-kube-dns-autoscaler & gen3 kube-setup-metrics deploy || true gen3 kube-setup-tiller || true diff --git a/gen3/bin/kube-setup-autoscaler.sh b/gen3/bin/kube-setup-autoscaler.sh index 01a6cdd95..16ff0439b 100644 --- a/gen3/bin/kube-setup-autoscaler.sh +++ b/gen3/bin/kube-setup-autoscaler.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# cluster-autoscaler allow a kubernetes cluste scale out or in depending on the +# cluster-autoscaler allow a kubernetes cluste scale out or in depending on the # specification set in deployment. It'll talk to the ASG where the worker nodes are # and send a signal to add or remove instances based upon requirements. # @@ -82,18 +82,36 @@ function deploy() { } +function remove() { + + if ( g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1); then + if ! [ -z ${CAS_VERSION} ]; + then + casv=${CAS_VERSION} + else + casv="$(get_autoscaler_version)" # cas stands for ClusterAutoScaler + fi + echo "Removing cluster autoscaler ${casv} in ${vpc_name}" + g3k_kv_filter "${GEN3_HOME}/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml" VPC_NAME "${vpc_name}" CAS_VERSION ${casv} | g3kubectl "--namespace=kube-system" delete -f - + else + echo "kube-setup-autoscaler exiting - cluster-autoscaler not deployed" + fi + +} + function HELP(){ - echo "Usage: $SCRIPT [-v] [-f] " + echo "Usage: $SCRIPT [-v] [-f] [-r]" echo "Options:" echo "No option is mandatory, however you can provide the following:" echo " -v num --version num --create=num Cluster autoscaler version number" echo " -f --force Force and update if it is already installed" + echo " -r --remove remove deployment if already installed" } #echo $(get_autoscaler_version) -OPTSPEC="hfv:-:" +OPTSPEC="hfvr:-:" while getopts "$OPTSPEC" optchar; do case "${optchar}" in -) @@ -107,6 +125,10 @@ while getopts "$OPTSPEC" optchar; do version=*) CAS_VERSION=${OPTARG#*=} ;; + remove) + remove + exit 0 + ;; *) if [ "$OPTERR" = 1 ] && [ "${OPTSPEC:0:1}" != ":" ]; then echo "Unknown option --${OPTARG}" >&2 @@ -121,6 +143,10 @@ while getopts "$OPTSPEC" optchar; do v) CAS_VERSION=${OPTARG} ;; + r) + remove + exit 0 + ;; *) if [ "$OPTERR" != 1 ] || [ "${OPTSPEC:0:1}" = ":" ]; then echo "Non-option argument: '-${OPTARG}'" >&2 @@ -131,4 +157,4 @@ while getopts "$OPTSPEC" optchar; do esac done -deploy +deploy \ No newline at end of file diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh new file mode 100644 index 000000000..a577d5671 --- /dev/null +++ b/gen3/bin/kube-setup-karpenter.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +#set -i + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + + +gen3_deploy_karpenter() { + # If the karpenter namespace doesn't exist or the force flag isn't in place then deploy + if [[ -z $(g3kubectl get namespaces | grep karpenter) ]] || [[ $FORCE ]]; then + # Ensure the spot instance service linked role is setup + # It is required for running spot instances + aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true + karpenter=${karpenter:-v0.22.0} + echo '{ + "Statement": [ + { + "Action": [ + "ssm:GetParameter", + "iam:PassRole", + "ec2:DescribeImages", + "ec2:RunInstances", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeAvailabilityZones", + "ec2:DeleteLaunchTemplate", + "ec2:CreateTags", + "ec2:CreateLaunchTemplate", + "ec2:CreateFleet", + "ec2:DescribeSpotPriceHistory", + "pricing:GetProducts" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "Karpenter" + }, + { + "Action": "ec2:TerminateInstances", + "Condition": { + "StringLike": { + "ec2:ResourceTag/Name": "*karpenter*" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "ConditionalEC2Termination" + } + ], + "Version": "2012-10-17" + }' > controller-policy.json + + g3kubectl create namespace karpenter 2> /dev/null || true + gen3 awsrole create "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true + # Have to delete SA because helm chart will create the SA and there will be a conflict + g3kubectl delete sa karpenter -n karpenter + aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true + # Need to tag the subnets/sg's so that karpenter can discover them automatically + subnets=$(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_private_*' --query 'Subnets[].SubnetId' --output text) + security_groups=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg,ssh_eks_'$vpc_name'' --query 'SecurityGroups[].GroupId' --output text) + security_groups_jupyter=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-jupyter,ssh_eks_'$vpc_name'-nodepool-jupyter' --query 'SecurityGroups[].GroupId' --output text) + security_groups_workflow=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-workflow,ssh_eks_'$vpc_name'-nodepool-workflow' --query 'SecurityGroups[].GroupId' --output text) + cluster_endpoint="$(aws eks describe-cluster --name ${vpc_name} --query "cluster.endpoint" --output text)" + + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${security_groups} + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${subnets} + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-jupyter" --resources ${security_groups_jupyter} + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-worfklow" --resources ${security_groups_workflow} + + helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter \ + --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \ + --set settings.aws.clusterEndpoint="${cluster_endpoint}" \ + --set settings.aws.clusterName=${vpc_name} \ + --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::$(aws sts get-caller-identity --output text --query 'Account'):role/gen3_service/karpenter-controller-role-${vpc_name}" + # sleep for a little bit so CRD's can be created for the provisioner/node template + sleep 10 + # Deploy AWS node termination handler so that spot instances can be preemptively spun up before old instances stop + kubectl apply -f https://github.com/aws/aws-node-termination-handler/releases/download/v1.18.1/all-resources.yaml + fi + gen3 kube-setup-autoscaler --remove + g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateDefault.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - + g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateJupyter.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - + g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateWorkflow.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - + if [[ $ARM ]]; then + # Deploy binfmt daemonset so the emulation tools run on arm nodes + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/binfmt.yaml + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerArm.yaml + else + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerDefault.yaml + fi + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerJupyter.yaml + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerWorkflow.yaml +} + +gen3_remove_karpenter() { + aws iam delete-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-name "karpenter-controller-policy" 1>&2 || true + aws iam delete-role --role-name "karpenter-controller-role-$vpc_name" + helm uninstall karpenter -n karpenter + g3kubectl delete namespace karpenter + gen3 kube-setup-autoscaler +} + +#---------- main + +if [[ -z "$GEN3_SOURCE_ONLY" ]]; then + # Support sourcing this file for test suite + command="$1" + shift + case "$command" in + "deploy") + for flag in $@; do + if [[ $# -gt 0 ]]; then + flag="$1" + shift + fi + case "$flag" in + "--force") + FORCE=true + ;; + "--arm") + ARM=true + ;; + esac + done + gen3_deploy_karpenter + ;; + "remove") + gen3_remove_karpenter + ;; + *) + gen3_deploy_karpenter + ;; + esac +fi diff --git a/gen3/bin/kube-setup-workvm.sh b/gen3/bin/kube-setup-workvm.sh index fd4d9206e..53424f89d 100644 --- a/gen3/bin/kube-setup-workvm.sh +++ b/gen3/bin/kube-setup-workvm.sh @@ -241,7 +241,7 @@ EOM fi ( # in a subshell install helm install_helm() { - helm_release_URL="https://get.helm.sh/helm-v3.4.0-linux-amd64.tar.gz" + helm_release_URL="https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz" curl -s -o "${XDG_RUNTIME_DIR}/helm.tar.gz" ${helm_release_URL} tar xf "${XDG_RUNTIME_DIR}/helm.tar.gz" -C ${XDG_RUNTIME_DIR} sudo mv -f "${XDG_RUNTIME_DIR}/linux-amd64/helm" /usr/local/bin diff --git a/kube/services/karpenter/binfmt.yaml b/kube/services/karpenter/binfmt.yaml new file mode 100644 index 000000000..35cf5b559 --- /dev/null +++ b/kube/services/karpenter/binfmt.yaml @@ -0,0 +1,42 @@ +# Run binfmt setup on any new node +# https://kubernetes.io/docs/concepts/workloads/controllers/daemonset +# https://github.com/docker/buildx/issues/342#issuecomment-680715762 +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: binfmt + # namespace: kube-system + labels: + app: binfmt-setup +spec: + selector: + matchLabels: + name: binfmt + # https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates + template: + metadata: + labels: + name: binfmt + spec: + nodeSelector: + kubernetes.io/arch: "arm64" + initContainers: + - name: binfmt + image: tonistiigi/binfmt + # command: [] + args: ["--install", "all"] + # Run the container with the privileged flag + # https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + # https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#securitycontext-v1-core + securityContext: + privileged: true + containers: + - name: pause + image: gcr.io/google_containers/pause:3.2 + resources: + limits: + cpu: 50m + memory: 50Mi + requests: + cpu: 50m + memory: 50Mi diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml new file mode 100644 index 000000000..2026f8dfa --- /dev/null +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -0,0 +1,51 @@ +apiVersion: karpenter.k8s.aws/v1alpha1 +kind: AWSNodeTemplate +metadata: + name: default +spec: + subnetSelector: + karpenter.sh/discovery: VPC_NAME + securityGroupSelector: + karpenter.sh/discovery: VPC_NAME + tags: + karpenter.sh/discovery: VPC_NAME + Environment: VPC_NAME + Name: eks-VPC_NAME-karpenter + userData: | + MIME-Version: 1.0 + Content-Type: multipart/mixed; boundary="BOUNDARY" + + --BOUNDARY + Content-Type: text/x-shellscript; charset="us-ascii" + + #!/bin/bash -xe + + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + sysctl -w fs.inotify.max_user_watches=12000 + + sudo yum update -y + sudo yum install -y dracut-fips openssl >> /opt/fips-install.log + sudo dracut -f + # configure grub + sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" + + power_state: + delay: now + mode: reboot + message: Powering off + timeout: 2 + condition: true + + + --BOUNDARY-- + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 50Gi + volumeType: gp2 + encrypted: true + deleteOnTermination: true diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml new file mode 100644 index 000000000..629eac24e --- /dev/null +++ b/kube/services/karpenter/nodeTemplateJupyter.yaml @@ -0,0 +1,50 @@ +apiVersion: karpenter.k8s.aws/v1alpha1 +kind: AWSNodeTemplate +metadata: + name: jupyter +spec: + subnetSelector: + karpenter.sh/discovery: VPC_NAME + securityGroupSelector: + karpenter.sh/discovery: VPC_NAME-jupyter + tags: + Environment: VPC_NAME + Name: eks-VPC_NAME-jupyter-karpenter + karpenter.sh/discovery: VPC_NAME + userData: | + MIME-Version: 1.0 + Content-Type: multipart/mixed; boundary="BOUNDARY" + + --BOUNDARY + Content-Type: text/x-shellscript; charset="us-ascii" + + #!/bin/bash -xe + + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + sysctl -w fs.inotify.max_user_watches=12000 + + sudo yum update -y + sudo yum install -y dracut-fips openssl >> /opt/fips-install.log + sudo dracut -f + # configure grub + sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" + + power_state: + delay: now + mode: reboot + message: Powering off + timeout: 2 + condition: true + + --BOUNDARY-- + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 50Gi + volumeType: gp2 + encrypted: true + deleteOnTermination: true diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml new file mode 100644 index 000000000..7e0cbf481 --- /dev/null +++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml @@ -0,0 +1,50 @@ +apiVersion: karpenter.k8s.aws/v1alpha1 +kind: AWSNodeTemplate +metadata: + name: workflow +spec: + subnetSelector: + karpenter.sh/discovery: VPC_NAME + securityGroupSelector: + karpenter.sh/discovery: VPC_NAME-workflow + tags: + Environment: VPC_NAME + Name: eks-VPC_NAME-workflow-karpenter + karpenter.sh/discovery: VPC_NAME + userData: | + MIME-Version: 1.0 + Content-Type: multipart/mixed; boundary="BOUNDARY" + + --BOUNDARY + Content-Type: text/x-shellscript; charset="us-ascii" + + #!/bin/bash -xe + + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + sysctl -w fs.inotify.max_user_watches=12000 + + sudo yum update -y + sudo yum install -y dracut-fips openssl >> /opt/fips-install.log + sudo dracut -f + # configure grub + sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" + + power_state: + delay: now + mode: reboot + message: Powering off + timeout: 2 + condition: true + + --BOUNDARY-- + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 50Gi + volumeType: gp2 + encrypted: true + deleteOnTermination: true diff --git a/kube/services/karpenter/provisionerArm.yaml b/kube/services/karpenter/provisionerArm.yaml new file mode 100644 index 000000000..2f53581a2 --- /dev/null +++ b/kube/services/karpenter/provisionerArm.yaml @@ -0,0 +1,35 @@ +apiVersion: karpenter.sh/v1alpha5 +kind: Provisioner +metadata: + name: default +spec: + # Allow for spot and on demand instances + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: ["on-demand", "spot"] + - key: kubernetes.io/arch + operator: In + values: + - arm64 + - amd64 + - key: karpenter.k8s.aws/instance-category + operator: In + values: + - c + - m + - r + - t + # Set a limit of 1000 vcpus + limits: + resources: + cpu: 1000 + # Use the default node template + providerRef: + name: default + # Allow pods to be rearranged + consolidation: + enabled: true + # Kill nodes after 30 days to ensure they stay up to date + ttlSecondsUntilExpired: 2592000 + diff --git a/kube/services/karpenter/provisionerDefault.yaml b/kube/services/karpenter/provisionerDefault.yaml new file mode 100644 index 000000000..ac08284ce --- /dev/null +++ b/kube/services/karpenter/provisionerDefault.yaml @@ -0,0 +1,35 @@ +apiVersion: karpenter.sh/v1alpha5 +kind: Provisioner +metadata: + name: default +spec: + # Allow for spot and on demand instances + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: ["on-demand", "spot"] + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: karpenter.k8s.aws/instance-category + operator: In + values: + - c + - m + - r + - t + # Set a limit of 1000 vcpus + limits: + resources: + cpu: 1000 + # Use the default node template + providerRef: + name: default + # Allow pods to be rearranged + consolidation: + enabled: true + # Kill nodes after 30 days to ensure they stay up to date + ttlSecondsUntilExpired: 2592000 + + diff --git a/kube/services/karpenter/provisionerJupyter.yaml b/kube/services/karpenter/provisionerJupyter.yaml new file mode 100644 index 000000000..0d4b1c85e --- /dev/null +++ b/kube/services/karpenter/provisionerJupyter.yaml @@ -0,0 +1,40 @@ +apiVersion: karpenter.sh/v1alpha5 +kind: Provisioner +metadata: + name: jupyter +spec: + # Only allow on demand instance + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: ["on-demand"] + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: karpenter.k8s.aws/instance-category + operator: In + values: + - c + - m + - r + - t + # Set a taint for jupyter pods + taints: + - key: role + value: jupyter + effect: NoSchedule + labels: + role: jupyter + # Set a limit of 1000 vcpus + limits: + resources: + cpu: 1000 + # Use the jupyter node template + providerRef: + name: jupyter + # Allow pods to be rearranged + consolidation: + enabled: true + # Kill nodes after 30 days to ensure they stay up to date + ttlSecondsUntilExpired: 2592000 diff --git a/kube/services/karpenter/provisionerWorkflow.yaml b/kube/services/karpenter/provisionerWorkflow.yaml new file mode 100644 index 000000000..a66a14707 --- /dev/null +++ b/kube/services/karpenter/provisionerWorkflow.yaml @@ -0,0 +1,35 @@ +apiVersion: karpenter.sh/v1alpha5 +kind: Provisioner +metadata: + name: workflow +spec: + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: ["on-demand"] + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: karpenter.k8s.aws/instance-category + operator: In + values: + - c + - m + - r + - t + taints: + - key: role + value: workflow + effect: NoSchedule + limits: + resources: + cpu: 1000 + providerRef: + name: workflow + # Allow pods to be rearranged + consolidation: + enabled: true + # Kill nodes after 30 days to ensure they stay up to date + ttlSecondsUntilExpired: 2592000 + From afb750d752f1324c2884da1efaef3cec8f9476b9 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 24 Jan 2023 12:32:59 -0700 Subject: [PATCH 061/362] Feat/eks 1.22 (#2135) * adding a new autoscaler version for 1.22 * updating the image versions for system services to be compatible with EKS 1.22 * modifying cluster roles for the cluster autoscaler and metrics server to be compatible with the new image versions --- gen3/bin/kube-setup-autoscaler.sh | 3 +++ gen3/bin/kube-setup-system-services.sh | 8 ++++---- .../autoscaler/cluster-autoscaler-autodiscover.yaml | 1 + kube/services/metrics-server/components.yaml | 3 +++ 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-autoscaler.sh b/gen3/bin/kube-setup-autoscaler.sh index 16ff0439b..b3659a2be 100644 --- a/gen3/bin/kube-setup-autoscaler.sh +++ b/gen3/bin/kube-setup-autoscaler.sh @@ -30,6 +30,9 @@ function get_autoscaler_version(){ local casv case ${k8s_version} in + "1.22+") + casv="v1.22.2" + ;; "1.21+") casv="v1.21.2" ;; diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh index 7a75a33f8..34ae87436 100644 --- a/gen3/bin/kube-setup-system-services.sh +++ b/gen3/bin/kube-setup-system-services.sh @@ -16,10 +16,10 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" -kubeproxy=${kubeproxy:-1.16.13} -coredns=${coredns:-1.6.6} +kubeproxy=${kubeproxy:-1.22.11} +coredns=${coredns:-1.8.7} kubednsautoscaler=${kubednsautoscaler:-1.8.6} -cni=${cni:-1.11.0} +cni=${cni:-1.12.0} calico=${calico:-1.7.8} @@ -31,7 +31,7 @@ while [ $# -gt 0 ]; do shift done -kube_proxy_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/kube-proxy:v${kubeproxy}-eksbuild.1" +kube_proxy_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/kube-proxy:v${kubeproxy}-eksbuild.2" coredns_image="602401143452.dkr.ecr.us-east-1.amazonaws.com/eks/coredns:v${coredns}" kubednsautoscaler_image="k8s.gcr.io/cpa/cluster-proportional-autoscaler:${kubednsautoscaler}" cni_image="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${cni}/config/master/aws-k8s-cni.yaml" diff --git a/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml b/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml index c863a67f7..2e1b94fcd 100644 --- a/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml +++ b/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml @@ -35,6 +35,7 @@ rules: - apiGroups: [""] resources: - "pods" + - "namespaces" - "services" - "replicationcontrollers" - "persistentvolumeclaims" diff --git a/kube/services/metrics-server/components.yaml b/kube/services/metrics-server/components.yaml index dc46ca229..a683ca0d6 100644 --- a/kube/services/metrics-server/components.yaml +++ b/kube/services/metrics-server/components.yaml @@ -40,6 +40,7 @@ rules: - "" resources: - nodes/metrics + - nodes/stats verbs: - get - apiGroups: @@ -47,6 +48,8 @@ rules: resources: - pods - nodes + - namespaces + - configmaps verbs: - get - list From 56cda3f182eb1aa9ea0a38b46d530de385278f7a Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 30 Jan 2023 08:37:28 -0600 Subject: [PATCH 062/362] feat(argo-multi-ns): Setup argo buckets in every ns (#2089) * feat(argo-multi-ns): Setup argo buckets in every ns * feat(argo-multi-ns): Setup argo buckets in every ns * feat(argo-multi-ns): Setup argo buckets in every ns --------- Co-authored-by: Edward Malinowski --- .secrets.baseline | 2 +- gen3/bin/kube-setup-argo.sh | 26 +++++++++++++++++++++----- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 4c82e4e42..7b92b4044 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2022-12-16T20:29:01Z", + "generated_at": "2023-01-23T11:54:50Z", "plugins_used": [ { "name": "AWSKeyDetector" diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index 99728efaa..dbd46edaa 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -21,7 +21,7 @@ function setup_argo_buckets { gen3_log_err "could not determine account numer" return 1 fi - if ! environment="$(g3kubectl get configmap manifest-global -o json | jq -r .data.environment)"; then + if ! environment="$(g3k_environment)"; then gen3_log_err "could not determine environment from manifest-global - bailing out of argo setup" return 1 fi @@ -155,10 +155,26 @@ EOF fi gen3_log_info "Creating s3 creds secret in argo namespace" - if [[ -z $internalBucketName ]]; then - g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} + if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then + if [[ -z $internalBucketName ]]; then + g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} + g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} + + else + g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} + g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} + fi else - g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} + g3kubectl create sa argo || true + # Grant admin access within the current namespace to the argo SA in the current namespace + g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=$(gen3 db namespace):argo -n $(gen3 db namespace) || true + aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile + if [[ -z $internalBucketName ]]; then + aws iam put-user-policy --user-name ${userName} --policy-name argo-internal-bucket-policy --policy-document file://$internalBucketPolicyFile + g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} + else + g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} + fi fi @@ -207,9 +223,9 @@ function setup_argo_db() { fi } + setup_argo_buckets # only do this if we are running in the default namespace if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then - setup_argo_buckets setup_argo_db if (! helm status argo -n argo > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then DBHOST=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_host | base64 -d) From 67c498865c3e4464e30f68eca67b5064a938731c Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 30 Jan 2023 09:12:39 -0600 Subject: [PATCH 063/362] feat(karpenter-doc): Added documentation for karpenter (#2139) Co-authored-by: Edward Malinowski --- doc/karpenter.md | 59 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 doc/karpenter.md diff --git a/doc/karpenter.md b/doc/karpenter.md new file mode 100644 index 000000000..29aa35de1 --- /dev/null +++ b/doc/karpenter.md @@ -0,0 +1,59 @@ +# Introduction + +Karpenter is a modern cloud-native tool for Kubernetes cluster management and resource allocation. With its efficient and customizable scaling and orchestration capabilities, Karpenter is becoming an increasingly popular alternative to Cluster Autoscaler. In this document, we will discuss the benefits of using Karpenter over Cluster Autoscaler and why it is worth considering a switch. + +# Table of contents + +- [1. Benefits of Karpenter](#benefits-of-karpenter) +- [2. Requirements](#requirements) +- [3. How it Works](#how-it-works) +- [4. Installation Steps](#installation-steps) +- [5. Modifying the Provisioners and Awsnodetemplates](#modifying-the-provisioners-and-awsnodetemplates) +- [6. Potential Issues](#potential-issues) + +## Benefits of Karpenter + +- Advanced Resource Allocation: Karpenter provides fine-tuned control over resource allocation, allowing for greater optimization of resource utilization. With its advanced features, Karpenter can ensure that nodes are appropriately sized and allocated, reducing the chance of overprovisioning or underutilization. +- Scalability: Karpenter offers powerful scaling capabilities, allowing administrators to quickly and efficiently adjust the size of their cluster as needed. With its sophisticated scaling algorithms, Karpenter ensures that resources are optimized and that clusters are able to grow and shrink as needed. +- Customizable: Karpenter allows administrators to customize and configure their cluster as needed. With its flexible and intuitive interface, administrators can easily adjust the size and composition of their cluster to meet the specific needs of their organization. +- Efficient Management: Karpenter provides efficient and streamlined cluster management, allowing administrators to manage their resources more effectively. With its intuitive and powerful interface, administrators can easily allocate resources and monitor cluster performance, ensuring that their cluster is running smoothly and efficiently. + +## Requirements + +Karpenter requires access to AWS to be able to provision EC2 instances. It uses an EKS IAM service account with access to most EC2 resources. Once Karpenter is deployed it also requires configuration to decide which node types to spin up, described in the next section. Our base configuration relies on config provisioned using our terraform though, so it may require manual effort to install if not using our terraform. Last, since Karpenter is going to be the new cluster management system, we will need to uninstall the cluster autoscaler. + +## How it Works + +Karpenter works on the EKS level instead of the cloud level. This means the systems in place to configure which nodes to spin up are shifted from AWS to EKS configuration. Karpenter uses provisioners to replace autoscaling groups and awsnodetemplates to replace launch configs/templates. Once deployed you will need to create at least one provisioner and one awsnodetemplate so that karpenter can decide what nodes to spin up and once pods require new nodes to spin up karpenter will figure out the most efficient instance type to use based on the pod resources and allowed instance types specified within your provisioner/templates. + +## Installation Steps + +To install Karpenter using gen3 you can simply run the kube-setup-karpenter script. This script does the following to install karpenter. + +1. Creates a new karpenter namespace for the karpenter deployment to run in. +2. Creates an EKS IAM service account with access to EC2 resources within AWS for the Karpenter deployment to use. +3. Tags the relevent subnets and security groups for the karpenter deployment to autodiscover. +4. Installs the karpenter helm deployment +5. Installs the necessary provisioners and aws node templates. + +This can also be installed through the manifest by adding a .global.karpenter block to your manifest. If this block equals "arm" then it will also install the arm provisioner, which will provision arm based nodes for the default worker nodes. + +## Modifying the Provisioners and Awsnodetemplates + +If you ever need to change the behavior of the provisioners on the fly you can run the following command + +```bash +kubectl edit provisioners.karpenter.sh +``` + +If you ever need to edit the awsnodetemplate you can do so with + +```bash +kubectl edit awsnodetemplates.karpenter.k8s.aws +``` + +Base configuration lives in the [karpenter configration section](https://github.com/uc-cdis/cloud-automation/tree/master/kube/services/karpenter) of cloud-automation so you can edit this configuration for longer term or more widespread changes. + +## Potential Issues + +Karpenter is a powerful flexible tool, but with that can come some challenges. The first is Karpenter needs to be able to find subnets/security groups for your specific VPC. If there are multiple VPC's in an AWS account and multiple Karpenter deployments, we need to stray from the official Karpenter documentation when tagging subnets/security groups. Karpenter will find subnets/security groups tagged a certain way, so instead of setting the tag to be true for karpenter discovery we should set the value to be the VPC name, and similarly set it to be the VPC name within the karpenter configuration. Also, karpenter requires at least 2 nodes outside of any nodes it manages for it's deployment to run on. This is so that karpenter is always available and can schedule nodes without taking itself out. Because of this, we recommend running a regular EKS worker ASG with 2 min/max/desired for karpenter to run on. If these nodes ever need to be updated you will need to ensure karpenter comes back up after to ensure your cluster scales as intended. From bbd23e341c36c0a1e60bdd32ef867fc59aed193b Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 2 Feb 2023 08:30:18 -0700 Subject: [PATCH 064/362] editing the kube-setup-argo script to prevent it from deleting the s3 cred secret if run a non-default ns since it won't be re-created in that case. (#2141) --- .secrets.baseline | 4 ++-- gen3/bin/kube-setup-argo.sh | 31 ++++++++++++++++--------------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 7b92b4044..baa9cc4fb 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-01-23T11:54:50Z", + "generated_at": "2023-02-01T22:29:59Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -338,7 +338,7 @@ "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", "is_secret": false, "is_verified": false, - "line_number": 200, + "line_number": 217, "type": "Secret Keyword" } ], diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index dbd46edaa..881638808 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -126,7 +126,7 @@ EOF gen3_log_info "Creating IAM user ${userName}" if ! aws iam get-user --user-name ${userName} > /dev/null 2>&1; then - aws iam create-user --user-name ${userName} + aws iam create-user --user-name ${userName} || true else gen3_log_info "IAM user ${userName} already exits.." fi @@ -134,9 +134,9 @@ EOF secret=$(aws iam create-access-key --user-name ${userName}) if ! g3kubectl get namespace argo > /dev/null 2>&1; then gen3_log_info "Creating argo namespace" - g3kubectl create namespace argo - g3kubectl label namespace argo app=argo - g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n argo + g3kubectl create namespace argo || true + g3kubectl label namespace argo app=argo || true + g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n argo || true fi else # Else we want to recreate the argo-s3-creds secret so make a temp file with the current creds and delete argo-s3-creds secret @@ -151,29 +151,30 @@ EOF } EOF secret=$(cat $secretFile) - g3kubectl delete secret -n argo argo-s3-creds fi gen3_log_info "Creating s3 creds secret in argo namespace" if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then if [[ -z $internalBucketName ]]; then - g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} - + g3kubectl delete secret -n argo argo-s3-creds || true + g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true + g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true else - g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} + g3kubectl delete secret -n argo argo-s3-creds || true + g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} || true + g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true fi else g3kubectl create sa argo || true # Grant admin access within the current namespace to the argo SA in the current namespace g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=$(gen3 db namespace):argo -n $(gen3 db namespace) || true - aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile + aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile || true if [[ -z $internalBucketName ]]; then - aws iam put-user-policy --user-name ${userName} --policy-name argo-internal-bucket-policy --policy-document file://$internalBucketPolicyFile - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} + aws iam put-user-policy --user-name ${userName} --policy-name argo-internal-bucket-policy --policy-document file://$internalBucketPolicyFile || true + g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true else - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} + g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} || true + fi fi @@ -248,4 +249,4 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then fi else gen3_log_info "kube-setup-argo exiting - only deploys from default namespace" -fi +fi \ No newline at end of file From 3130fc93d6ac056ceb71a82efd6ede322391bd22 Mon Sep 17 00:00:00 2001 From: Atharva Rane <41084525+atharvar28@users.noreply.github.com> Date: Sat, 4 Feb 2023 10:58:50 -0500 Subject: [PATCH 065/362] Updating the reset pool for jenkins-envs (#2143) --- files/scripts/ci-env-pool-reset.sh | 6 ------ 1 file changed, 6 deletions(-) diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index 3f1d951d2..a142fd7c2 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -27,19 +27,13 @@ fi source "${GEN3_HOME}/gen3/gen3setup.sh" cat - > jenkins-envs-services.txt < jenkins-envs-releases.txt < Date: Mon, 6 Feb 2023 11:55:50 -0600 Subject: [PATCH 066/362] fix(fluentd-karpenter): Updated fluentd config to work with containerd(default in karpenter) (#2142) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-fluentd.sh | 1 + kube/services/fluentd/fluentd-karpenter.yaml | 94 ++++++++++++++++++++ kube/services/fluentd/fluentd.yaml | 10 +++ 3 files changed, 105 insertions(+) create mode 100644 kube/services/fluentd/fluentd-karpenter.yaml diff --git a/gen3/bin/kube-setup-fluentd.sh b/gen3/bin/kube-setup-fluentd.sh index 81fb0d2f6..c1d15bb80 100644 --- a/gen3/bin/kube-setup-fluentd.sh +++ b/gen3/bin/kube-setup-fluentd.sh @@ -46,6 +46,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then g3kubectl "--namespace=logging" delete daemonset fluentd fi (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f - + (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd-karpenter.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f - # We need this serviceaccount to be in the default namespace for the job and cronjob to properly work g3kubectl apply -f "${GEN3_HOME}/kube/services/fluentd/fluent-jobs-serviceaccount.yaml" -n default if [ ${fluentdVersion} == "v1.10.2-debian-cloudwatch-1.0" ]; diff --git a/kube/services/fluentd/fluentd-karpenter.yaml b/kube/services/fluentd/fluentd-karpenter.yaml new file mode 100644 index 000000000..8949a734f --- /dev/null +++ b/kube/services/fluentd/fluentd-karpenter.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd-karpenter + namespace: logging + labels: + k8s-app: fluentd-karpenter-logging + version: v1 + GEN3_DATE_LABEL + kubernetes.io/cluster-service: "true" +spec: + selector: + matchLabels: + k8s-app: fluentd-karpenter-logging + version: v1 + template: + metadata: + labels: + k8s-app: fluentd-karpenter-logging + version: v1 + kubernetes.io/cluster-service: "true" + spec: + priorityClassName: system-cluster-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: karpenter.sh/initialized + operator: In + values: + - "true" + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: "role" + operator: "Equal" + value: "jupyter" + effect: "NoSchedule" + - key: "role" + operator: "Equal" + value: "workflow" + effect: "NoSchedule" + containers: + - name: fluentd + GEN3_FLUENTD_IMAGE + env: + # See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Deploy with kube-setup-fluentd.sh ... + - name: LOG_GROUP_NAME + GEN3_LOG_GROUP_NAME + - name: AWS_REGION + value: "us-east-1" + - name: FLUENTD_CONF + value: "gen3.conf" + - name: FLUENT_CONTAINER_TAIL_PARSER_TYPE + value: "cri" + resources: + limits: + memory: 1Gi + requests: + cpu: 100m + memory: 1Gi + volumeMounts: + - name: fluentd-gen3 + mountPath: /fluentd/etc/gen3.conf + subPath: gen3.conf + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + command: ["/bin/bash" ] + args: + - "-c" + # Script always succeeds if it runs (echo exits with 0) + - | + /fluentd/entrypoint.sh + terminationGracePeriodSeconds: 30 + serviceAccountName: fluentd + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + - name: fluentd-gen3 + configMap: + name: fluentd-gen3 diff --git a/kube/services/fluentd/fluentd.yaml b/kube/services/fluentd/fluentd.yaml index 5c25ddfaa..dc2bbf05b 100644 --- a/kube/services/fluentd/fluentd.yaml +++ b/kube/services/fluentd/fluentd.yaml @@ -20,6 +20,16 @@ spec: version: v1 kubernetes.io/cluster-service: "true" spec: + priorityClassName: system-cluster-critical + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: karpenter.sh/initialized + operator: NotIn + values: + - "true" tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule From f079f7bee641b034f42124b557275995a24573e6 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 6 Feb 2023 14:23:09 -0700 Subject: [PATCH 067/362] default namespace logic (#2144) * adding logic to only setup autoscaler/karpenter if it is run in the default namespace * having the deploy function for kube-setup-clusterautoscaler and kube-setup-karpenter only run if called from the default namespace instead of modifying the logic in roll-all. * fixing logic * splitting up conditional statements --- gen3/bin/kube-setup-autoscaler.sh | 25 ++++++++++++++----------- gen3/bin/kube-setup-karpenter.sh | 6 ++++-- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/gen3/bin/kube-setup-autoscaler.sh b/gen3/bin/kube-setup-autoscaler.sh index b3659a2be..00d5dc4e7 100644 --- a/gen3/bin/kube-setup-autoscaler.sh +++ b/gen3/bin/kube-setup-autoscaler.sh @@ -11,6 +11,9 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" +ctx="$(g3kubectl config current-context)" +ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" + if [[ -n "$JENKINS_HOME" ]]; then echo "Jenkins skipping fluentd setup: $JENKINS_HOME" exit 0 @@ -69,20 +72,20 @@ function get_autoscaler_version(){ function deploy() { - - if (! g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1) || [[ "$FORCE" == true ]]; then - if ! [ -z ${CAS_VERSION} ]; - then - casv=${CAS_VERSION} + if [["$ctxNamespace" == "default" || "$ctxNamespace" == "null"]]; then + if (! g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1) || [[ "$FORCE" == true]]; then + if ! [ -z ${CAS_VERSION} ]; + then + casv=${CAS_VERSION} + else + casv="$(get_autoscaler_version)" # cas stands for ClusterAutoScaler + fi + echo "Deploying cluster autoscaler ${casv} in ${vpc_name}" + g3k_kv_filter "${GEN3_HOME}/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml" VPC_NAME "${vpc_name}" CAS_VERSION ${casv} | g3kubectl "--namespace=kube-system" apply -f - else - casv="$(get_autoscaler_version)" # cas stands for ClusterAutoScaler + echo "kube-setup-autoscaler exiting - cluster-autoscaler already deployed, use --force to redeploy" fi - echo "Deploying cluster autoscaler ${casv} in ${vpc_name}" - g3k_kv_filter "${GEN3_HOME}/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml" VPC_NAME "${vpc_name}" CAS_VERSION ${casv} | g3kubectl "--namespace=kube-system" apply -f - - else - echo "kube-setup-autoscaler exiting - cluster-autoscaler already deployed, use --force to redeploy" fi - } function remove() { diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index a577d5671..50d3a4590 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -5,15 +5,17 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" +ctx="$(g3kubectl config current-context)" +ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" gen3_deploy_karpenter() { # If the karpenter namespace doesn't exist or the force flag isn't in place then deploy - if [[ -z $(g3kubectl get namespaces | grep karpenter) ]] || [[ $FORCE ]]; then + if [[( -z $(g3kubectl get namespaces | grep karpenter) || $FORCE ) && ("$ctxNamespace" == "default" || "$ctxNamespace" == "null")]]; then # Ensure the spot instance service linked role is setup # It is required for running spot instances aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true karpenter=${karpenter:-v0.22.0} - echo '{ + echo '{b "Statement": [ { "Action": [ From 075fcb1b4aa4376b6bac053b99adec70c85fb9a4 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 7 Feb 2023 15:05:52 -0700 Subject: [PATCH 068/362] Creating a new branch to have argocd manage datadog. (#2116) * Creating a new branch to have argocd manage datadog. First, I want to test in "datadog-test" namespace so I am currently modifying the kube-setup-datadog script until I can verify it is working * fixing mispelling * adding namespace flag to deploy the application.yaml file in the argocd namespace * changing "datadog-test" to "datadog" to ensure it is working properly in dev environment. * labeling and annotating the argocd namespace * corrected a typo and deleted the manual helm install to replace it will a kubectl apply so argocd can manage prometheus * removing the "dex-server" from the argocd install yaml as it is not needed. * adding a application.yaml for prometheus so argocd can manage it moving forward. * fixing the namespace label * adding a target revision and providing the proper repo link for the prometheus helm chart * removing the trailing / * changing the repo URL due to argo errors * adding the path variable to the argocd prometheus appliction.yaml * changing the repo url, targetrevision, and path so argocd can locate the helm chart * adding the trailing / * removing the "path" field * fixing the taget revision * testing adding just prometheus chart * associating the thanos sa with thanos-store and thanos-compactor * correcting the appliction yaml file for prometheus install * changing the target revision for prometheus appliction yaml * updated the appliction yaml file name for datadog * adding a flag that can be set via the global manifest which will allow users to deploy prometheus/datadog via argocd. * changed the order of when to deploy thanos * testing the prometheus endpoint for devplanet * forgot to remove "done" from the loop * realized this is the old way of accessing prometheus * changing the target revision to match prod * changing the revproxy setup script to look in the new "monitoring" namespace for prometheus. Edited the revproxy prometheus nginx conf file to work with the new prometheus as well. * changing the baseURL for testing * using the test values.yaml file for the prometheus Helm chart * changing the values.yaml file to have the original /prometheus endpoint * added the rewrite rules to get the prometheus endpoint working * adding extra args to prometheus stack values.yaml * testing the changes I made to values.yaml for prometheus * changing the web.route-prefix * needed to change the placement of the additional arguments * trying a helm install for argocd to fix dex server errors and ephemeral memory issues * fixed argocd typo * changing the argocd deployment to deploy a helm chart instead of regular Kubernetes Manifest * adding new values.yaml for prometheus stack * adding web prefix to help external-url var be picked up * changing the location I set the externalURL * testing a new externalURL to resolve nginx issues * updating the original values file to test if it works with Prometheus Stack * updating the argocd application.yaml file for prometheus. Changing the values.yaml file that is used. * no longer needed * reverting the changes made to the old values.yaml file * upping the limits and requests for the argocd repo server * reduced the memory a bit for argocd repo server * adding a netpolicy for prometheus * deleting the unneeded network policies for Prometheus * adding a value for the grafana endpoint * setting the values file to point to master in preparation for mergine the PR --- gen3/bin/kube-setup-argocd.sh | 4 +- gen3/bin/kube-setup-datadog.sh | 8 + gen3/bin/kube-setup-prometheus.sh | 12 +- gen3/bin/kube-setup-revproxy.sh | 7 +- kube/services/argocd/install.yaml | 11116 ---------------- kube/services/argocd/values.yaml | 2894 ++++ .../services/datadog/datadog-application.yaml | 24 + .../monitoring/prometheus-application.yaml | 24 + .../monitoring/prometheus-values.yaml | 2 +- kube/services/monitoring/thanos-deploy.yaml | 4 +- kube/services/monitoring/values.yaml | 4 +- .../gen3.nginx.conf/prometheus-server.conf | 6 +- 12 files changed, 2975 insertions(+), 11130 deletions(-) delete mode 100644 kube/services/argocd/install.yaml create mode 100644 kube/services/argocd/values.yaml create mode 100644 kube/services/datadog/datadog-application.yaml create mode 100644 kube/services/monitoring/prometheus-application.yaml diff --git a/gen3/bin/kube-setup-argocd.sh b/gen3/bin/kube-setup-argocd.sh index 635b7c584..4a9ac0f74 100644 --- a/gen3/bin/kube-setup-argocd.sh +++ b/gen3/bin/kube-setup-argocd.sh @@ -12,7 +12,9 @@ then else kubectl create namespace argocd kubectl label namespace argocd app="argocd" - kubectl apply -f "${GEN3_HOME}/kube/services/argocd/install.yaml" -n argocd + kubectl annotate namespace argocd app="argocd" + helm repo add argo https://argoproj.github.io/argo-helm + helm upgrade --install argocd -f "$GEN3_HOME/kube/services/argocd/values.yaml" argo/argo-cd -n argocd gen3 kube-setup-revproxy export argocdsecret=`kubectl get secret argocd-initial-admin-secret -n argocd -o json | jq .data.password -r | base64 -d` # pragma: allowlist secret gen3_log_info "You can now access the ArgoCD endpoint with the following credentials: Username= admin and Password= $argocdsecret" diff --git a/gen3/bin/kube-setup-datadog.sh b/gen3/bin/kube-setup-datadog.sh index baf0dbb2a..172d8c8da 100644 --- a/gen3/bin/kube-setup-datadog.sh +++ b/gen3/bin/kube-setup-datadog.sh @@ -5,6 +5,10 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" gen3_load "gen3/lib/kube-setup-init" +# Deploy Datadog with argocd if flag is set in the manifest path +manifestPath=$(g3k_manifest_path) +argocd="$(jq -r ".[\"global\"][\"argocd\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" + if [[ -n "$JENKINS_HOME" ]]; then gen3_log_info "Jenkins skipping datadog setup: $JENKINS_HOME" exit 0 @@ -44,7 +48,11 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then fi helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) + if [ "$argocd" = true ]; then + g3kubectl apply -f "$GEN3_HOME/kube/services/datadog/datadog-application.yaml" --namespace=argocd + else helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2) + fi ) else gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy" diff --git a/gen3/bin/kube-setup-prometheus.sh b/gen3/bin/kube-setup-prometheus.sh index 848c33389..965cb5eb6 100644 --- a/gen3/bin/kube-setup-prometheus.sh +++ b/gen3/bin/kube-setup-prometheus.sh @@ -8,6 +8,10 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" +# Deploy Prometheus with argocd if flag is set in the manifest path +manifestPath=$(g3k_manifest_path) +argocd="$(jq -r ".[\"global\"][\"argocd\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" + if [[ -n "$JENKINS_HOME" ]]; then gen3_log_info "Jenkins skipping prometheus/grafana setup: $JENKINS_HOME" exit 0 @@ -60,7 +64,7 @@ function deploy_prometheus() if (! g3kubectl get namespace monitoring> /dev/null 2>&1); then g3kubectl create namespace monitoring - g3kubectl label namespace namespace app=prometheus + g3kubectl label namespace monitoring app=prometheus fi if (g3kubectl --namespace=monitoring get deployment prometheus-server > /dev/null 2>&1); @@ -71,8 +75,12 @@ function deploy_prometheus() if ! g3kubectl get storageclass prometheus > /dev/null 2>&1; then g3kubectl apply -f "${GEN3_HOME}/kube/services/monitoring/prometheus-storageclass.yaml" fi - deploy_thanos + if [ "$argocd" = true ]; then + g3kubectl apply -f "$GEN3_HOME/kube/services/monitoring/prometheus-application.yaml" --namespace=argocd + else gen3 arun helm upgrade --install prometheus prometheus-community/kube-prometheus-stack --namespace monitoring -f "${GEN3_HOME}/kube/services/monitoring/values.yaml" + fi + deploy_thanos else gen3_log_info "Prometheus is already installed, use --force to try redeploying" fi diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 02fcc5c38..307acaecf 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -132,15 +132,12 @@ fi if [[ $current_namespace == "default" ]]; then - if g3kubectl get namespace prometheus > /dev/null 2>&1; + if g3kubectl get namespace monitoring > /dev/null 2>&1; then - for prometheus in $(g3kubectl get services -n prometheus -o jsonpath='{.items[*].metadata.name}'); - do - filePath="$scriptDir/gen3.nginx.conf/${prometheus}.conf" + filePath="$scriptDir/gen3.nginx.conf/prometheus-server.conf" if [[ -f "$filePath" ]]; then confFileList+=("--from-file" "$filePath") fi - done fi fi diff --git a/kube/services/argocd/install.yaml b/kube/services/argocd/install.yaml deleted file mode 100644 index 10f6477c7..000000000 --- a/kube/services/argocd/install.yaml +++ /dev/null @@ -1,11116 +0,0 @@ -# This is an auto-generated file. DO NOT EDIT -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - app.kubernetes.io/name: applications.argoproj.io - app.kubernetes.io/part-of: argocd - name: applications.argoproj.io -spec: - group: argoproj.io - names: - kind: Application - listKind: ApplicationList - plural: applications - shortNames: - - app - - apps - singular: application - scope: Namespaced - versions: - - additionalPrinterColumns: - - jsonPath: .status.sync.status - name: Sync Status - type: string - - jsonPath: .status.health.status - name: Health Status - type: string - - jsonPath: .status.sync.revision - name: Revision - priority: 10 - type: string - name: v1alpha1 - schema: - openAPIV3Schema: - description: Application is a definition of Application resource. - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - operation: - description: Operation contains information about a requested or running - operation - properties: - info: - description: Info is a list of informational items for this operation - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - initiatedBy: - description: InitiatedBy contains information about who initiated - the operations - properties: - automated: - description: Automated is set to true if operation was initiated - automatically by the application controller. - type: boolean - username: - description: Username contains the name of a user who started - operation - type: string - type: object - retry: - description: Retry controls the strategy to apply if a sync fails - properties: - backoff: - description: Backoff controls how to backoff on subsequent retries - of failed syncs - properties: - duration: - description: Duration is the amount to back off. Default unit - is seconds, but could also be a duration (e.g. "2m", "1h") - type: string - factor: - description: Factor is a factor to multiply the base duration - after each failed retry - format: int64 - type: integer - maxDuration: - description: MaxDuration is the maximum amount of time allowed - for the backoff strategy - type: string - type: object - limit: - description: Limit is the maximum number of attempts for retrying - a failed sync. If set to 0, no retries will be performed. - format: int64 - type: integer - type: object - sync: - description: Sync contains parameters for the operation - properties: - dryRun: - description: DryRun specifies to perform a `kubectl apply --dry-run` - without actually performing the sync - type: boolean - manifests: - description: Manifests is an optional field that overrides sync - source with a local directory for development - items: - type: string - type: array - prune: - description: Prune specifies to delete resources from the cluster - that are no longer tracked in git - type: boolean - resources: - description: Resources describes which resources shall be part - of the sync - items: - description: SyncOperationResource contains resources to sync. - properties: - group: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - required: - - kind - - name - type: object - type: array - revision: - description: Revision is the revision (Git) or chart version (Helm) - which to sync the application to If omitted, will use the revision - specified in app spec. - type: string - source: - description: Source overrides the source definition set in the - application. This is typically set in a Rollback operation and - is nil during a Sync operation - properties: - chart: - description: Chart is a Helm chart name, and must be specified - for applications sourced from a Helm repo. - type: string - directory: - description: Directory holds path/directory specific options - properties: - exclude: - description: Exclude contains a glob pattern to match - paths against that should be explicitly excluded from - being used during manifest generation - type: string - include: - description: Include contains a glob pattern to match - paths against that should be explicitly included during - manifest generation - type: string - jsonnet: - description: Jsonnet holds options specific to Jsonnet - properties: - extVars: - description: ExtVars is a list of Jsonnet External - Variables - items: - description: JsonnetVar represents a variable to - be passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - description: Additional library search dirs - items: - type: string - type: array - tlas: - description: TLAS is a list of Jsonnet Top-level Arguments - items: - description: JsonnetVar represents a variable to - be passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - description: Recurse specifies whether to scan a directory - recursively for manifests - type: boolean - type: object - helm: - description: Helm holds helm specific options - properties: - fileParameters: - description: FileParameters are file parameters to the - helm template - items: - description: HelmFileParameter is a file parameter that's - passed to helm template during manifest generation - properties: - name: - description: Name is the name of the Helm parameter - type: string - path: - description: Path is the path to the file containing - the values for the Helm parameter - type: string - type: object - type: array - ignoreMissingValueFiles: - description: IgnoreMissingValueFiles prevents helm template - from failing when valueFiles do not exist locally by - not appending them to helm template --values - type: boolean - parameters: - description: Parameters is a list of Helm parameters which - are passed to the helm template command upon manifest - generation - items: - description: HelmParameter is a parameter that's passed - to helm template during manifest generation - properties: - forceString: - description: ForceString determines whether to tell - Helm to interpret booleans and numbers as strings - type: boolean - name: - description: Name is the name of the Helm parameter - type: string - value: - description: Value is the value for the Helm parameter - type: string - type: object - type: array - passCredentials: - description: PassCredentials pass credentials to all domains - (Helm's --pass-credentials) - type: boolean - releaseName: - description: ReleaseName is the Helm release name to use. - If omitted it will use the application name - type: string - skipCrds: - description: SkipCrds skips custom resource definition - installation step (Helm's --skip-crds) - type: boolean - valueFiles: - description: ValuesFiles is a list of Helm value files - to use when generating a template - items: - type: string - type: array - values: - description: Values specifies Helm values to be passed - to helm template, typically defined as a block - type: string - version: - description: Version is the Helm version to use for templating - ("3") - type: string - type: object - kustomize: - description: Kustomize holds kustomize specific options - properties: - commonAnnotations: - additionalProperties: - type: string - description: CommonAnnotations is a list of additional - annotations to add to rendered manifests - type: object - commonLabels: - additionalProperties: - type: string - description: CommonLabels is a list of additional labels - to add to rendered manifests - type: object - forceCommonAnnotations: - description: ForceCommonAnnotations specifies whether - to force applying common annotations to resources for - Kustomize apps - type: boolean - forceCommonLabels: - description: ForceCommonLabels specifies whether to force - applying common labels to resources for Kustomize apps - type: boolean - images: - description: Images is a list of Kustomize image override - specifications - items: - description: KustomizeImage represents a Kustomize image - definition in the format [old_image_name=]: - type: string - type: array - namePrefix: - description: NamePrefix is a prefix appended to resources - for Kustomize apps - type: string - nameSuffix: - description: NameSuffix is a suffix appended to resources - for Kustomize apps - type: string - version: - description: Version controls which version of Kustomize - to use for rendering manifests - type: string - type: object - path: - description: Path is a directory path within the Git repository, - and is only valid for applications sourced from Git. - type: string - plugin: - description: Plugin holds config management plugin specific - options - properties: - env: - description: Env is a list of environment variable entries - items: - description: EnvEntry represents an entry in the application's - environment - properties: - name: - description: Name is the name of the variable, usually - expressed in uppercase - type: string - value: - description: Value is the value of the variable - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - description: RepoURL is the URL to the repository (Git or - Helm) that contains the application manifests - type: string - targetRevision: - description: TargetRevision defines the revision of the source - to sync the application to. In case of Git, this can be - commit, tag, or branch. If omitted, will equal to HEAD. - In case of Helm, this is a semver tag for the Chart's version. - type: string - required: - - repoURL - type: object - syncOptions: - description: SyncOptions provide per-sync sync-options, e.g. Validate=false - items: - type: string - type: array - syncStrategy: - description: SyncStrategy describes how to perform the sync - properties: - apply: - description: Apply will perform a `kubectl apply` to perform - the sync. - properties: - force: - description: Force indicates whether or not to supply - the --force flag to `kubectl apply`. The --force flag - deletes and re-create the resource, when PATCH encounters - conflict and has retried for 5 times. - type: boolean - type: object - hook: - description: Hook will submit any referenced resources to - perform the sync. This is the default strategy - properties: - force: - description: Force indicates whether or not to supply - the --force flag to `kubectl apply`. The --force flag - deletes and re-create the resource, when PATCH encounters - conflict and has retried for 5 times. - type: boolean - type: object - type: object - type: object - type: object - spec: - description: ApplicationSpec represents desired application state. Contains - link to repository with application definition and additional parameters - link definition revision. - properties: - destination: - description: Destination is a reference to the target Kubernetes server - and namespace - properties: - name: - description: Name is an alternate way of specifying the target - cluster by its symbolic name - type: string - namespace: - description: Namespace specifies the target namespace for the - application's resources. The namespace will only be set for - namespace-scoped resources that have not set a value for .metadata.namespace - type: string - server: - description: Server specifies the URL of the target cluster and - must be set to the Kubernetes control plane API - type: string - type: object - ignoreDifferences: - description: IgnoreDifferences is a list of resources and their fields - which should be ignored during comparison - items: - description: ResourceIgnoreDifferences contains resource filter - and list of json paths which should be ignored during comparison - with live state. - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - description: ManagedFieldsManagers is a list of trusted managers. - Fields mutated by those managers will take precedence over - the desired state defined in the SCM and won't be displayed - in diffs - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - description: Info contains a list of information (URLs, email addresses, - and plain text) that relates to the application - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - description: Project is a reference to the project this application - belongs to. The empty string means that application belongs to the - 'default' project. - type: string - revisionHistoryLimit: - description: RevisionHistoryLimit limits the number of items kept - in the application's revision history, which is used for informational - purposes as well as for rollbacks to previous versions. This should - only be changed in exceptional circumstances. Setting to zero will - store no history. This will reduce storage used. Increasing will - increase the space used to store the history, so we do not recommend - increasing it. Default is 10. - format: int64 - type: integer - source: - description: Source is a reference to the location of the application's - manifests or chart - properties: - chart: - description: Chart is a Helm chart name, and must be specified - for applications sourced from a Helm repo. - type: string - directory: - description: Directory holds path/directory specific options - properties: - exclude: - description: Exclude contains a glob pattern to match paths - against that should be explicitly excluded from being used - during manifest generation - type: string - include: - description: Include contains a glob pattern to match paths - against that should be explicitly included during manifest - generation - type: string - jsonnet: - description: Jsonnet holds options specific to Jsonnet - properties: - extVars: - description: ExtVars is a list of Jsonnet External Variables - items: - description: JsonnetVar represents a variable to be - passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - description: Additional library search dirs - items: - type: string - type: array - tlas: - description: TLAS is a list of Jsonnet Top-level Arguments - items: - description: JsonnetVar represents a variable to be - passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - description: Recurse specifies whether to scan a directory - recursively for manifests - type: boolean - type: object - helm: - description: Helm holds helm specific options - properties: - fileParameters: - description: FileParameters are file parameters to the helm - template - items: - description: HelmFileParameter is a file parameter that's - passed to helm template during manifest generation - properties: - name: - description: Name is the name of the Helm parameter - type: string - path: - description: Path is the path to the file containing - the values for the Helm parameter - type: string - type: object - type: array - ignoreMissingValueFiles: - description: IgnoreMissingValueFiles prevents helm template - from failing when valueFiles do not exist locally by not - appending them to helm template --values - type: boolean - parameters: - description: Parameters is a list of Helm parameters which - are passed to the helm template command upon manifest generation - items: - description: HelmParameter is a parameter that's passed - to helm template during manifest generation - properties: - forceString: - description: ForceString determines whether to tell - Helm to interpret booleans and numbers as strings - type: boolean - name: - description: Name is the name of the Helm parameter - type: string - value: - description: Value is the value for the Helm parameter - type: string - type: object - type: array - passCredentials: - description: PassCredentials pass credentials to all domains - (Helm's --pass-credentials) - type: boolean - releaseName: - description: ReleaseName is the Helm release name to use. - If omitted it will use the application name - type: string - skipCrds: - description: SkipCrds skips custom resource definition installation - step (Helm's --skip-crds) - type: boolean - valueFiles: - description: ValuesFiles is a list of Helm value files to - use when generating a template - items: - type: string - type: array - values: - description: Values specifies Helm values to be passed to - helm template, typically defined as a block - type: string - version: - description: Version is the Helm version to use for templating - ("3") - type: string - type: object - kustomize: - description: Kustomize holds kustomize specific options - properties: - commonAnnotations: - additionalProperties: - type: string - description: CommonAnnotations is a list of additional annotations - to add to rendered manifests - type: object - commonLabels: - additionalProperties: - type: string - description: CommonLabels is a list of additional labels to - add to rendered manifests - type: object - forceCommonAnnotations: - description: ForceCommonAnnotations specifies whether to force - applying common annotations to resources for Kustomize apps - type: boolean - forceCommonLabels: - description: ForceCommonLabels specifies whether to force - applying common labels to resources for Kustomize apps - type: boolean - images: - description: Images is a list of Kustomize image override - specifications - items: - description: KustomizeImage represents a Kustomize image - definition in the format [old_image_name=]: - type: string - type: array - namePrefix: - description: NamePrefix is a prefix appended to resources - for Kustomize apps - type: string - nameSuffix: - description: NameSuffix is a suffix appended to resources - for Kustomize apps - type: string - version: - description: Version controls which version of Kustomize to - use for rendering manifests - type: string - type: object - path: - description: Path is a directory path within the Git repository, - and is only valid for applications sourced from Git. - type: string - plugin: - description: Plugin holds config management plugin specific options - properties: - env: - description: Env is a list of environment variable entries - items: - description: EnvEntry represents an entry in the application's - environment - properties: - name: - description: Name is the name of the variable, usually - expressed in uppercase - type: string - value: - description: Value is the value of the variable - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - description: RepoURL is the URL to the repository (Git or Helm) - that contains the application manifests - type: string - targetRevision: - description: TargetRevision defines the revision of the source - to sync the application to. In case of Git, this can be commit, - tag, or branch. If omitted, will equal to HEAD. In case of Helm, - this is a semver tag for the Chart's version. - type: string - required: - - repoURL - type: object - syncPolicy: - description: SyncPolicy controls when and how a sync will be performed - properties: - automated: - description: Automated will keep an application synced to the - target revision - properties: - allowEmpty: - description: 'AllowEmpty allows apps have zero live resources - (default: false)' - type: boolean - prune: - description: 'Prune specifies whether to delete resources - from the cluster that are not found in the sources anymore - as part of automated sync (default: false)' - type: boolean - selfHeal: - description: 'SelfHeal specifes whether to revert resources - back to their desired state upon modification in the cluster - (default: false)' - type: boolean - type: object - retry: - description: Retry controls failed sync retry behavior - properties: - backoff: - description: Backoff controls how to backoff on subsequent - retries of failed syncs - properties: - duration: - description: Duration is the amount to back off. Default - unit is seconds, but could also be a duration (e.g. - "2m", "1h") - type: string - factor: - description: Factor is a factor to multiply the base duration - after each failed retry - format: int64 - type: integer - maxDuration: - description: MaxDuration is the maximum amount of time - allowed for the backoff strategy - type: string - type: object - limit: - description: Limit is the maximum number of attempts for retrying - a failed sync. If set to 0, no retries will be performed. - format: int64 - type: integer - type: object - syncOptions: - description: Options allow you to specify whole app sync-options - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - status: - description: ApplicationStatus contains status information for the application - properties: - conditions: - description: Conditions is a list of currently observed application - conditions - items: - description: ApplicationCondition contains details about an application - condition, which is usally an error or warning - properties: - lastTransitionTime: - description: LastTransitionTime is the time the condition was - last observed - format: date-time - type: string - message: - description: Message contains human-readable message indicating - details about condition - type: string - type: - description: Type is an application condition type - type: string - required: - - message - - type - type: object - type: array - health: - description: Health contains information about the application's current - health status - properties: - message: - description: Message is a human-readable informational message - describing the health status - type: string - status: - description: Status holds the status code of the application or - resource - type: string - type: object - history: - description: History contains information about the application's - sync history - items: - description: RevisionHistory contains history information about - a previous sync - properties: - deployStartedAt: - description: DeployStartedAt holds the time the sync operation - started - format: date-time - type: string - deployedAt: - description: DeployedAt holds the time the sync operation completed - format: date-time - type: string - id: - description: ID is an auto incrementing identifier of the RevisionHistory - format: int64 - type: integer - revision: - description: Revision holds the revision the sync was performed - against - type: string - source: - description: Source is a reference to the application source - used for the sync operation - properties: - chart: - description: Chart is a Helm chart name, and must be specified - for applications sourced from a Helm repo. - type: string - directory: - description: Directory holds path/directory specific options - properties: - exclude: - description: Exclude contains a glob pattern to match - paths against that should be explicitly excluded from - being used during manifest generation - type: string - include: - description: Include contains a glob pattern to match - paths against that should be explicitly included during - manifest generation - type: string - jsonnet: - description: Jsonnet holds options specific to Jsonnet - properties: - extVars: - description: ExtVars is a list of Jsonnet External - Variables - items: - description: JsonnetVar represents a variable - to be passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - description: Additional library search dirs - items: - type: string - type: array - tlas: - description: TLAS is a list of Jsonnet Top-level - Arguments - items: - description: JsonnetVar represents a variable - to be passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - description: Recurse specifies whether to scan a directory - recursively for manifests - type: boolean - type: object - helm: - description: Helm holds helm specific options - properties: - fileParameters: - description: FileParameters are file parameters to the - helm template - items: - description: HelmFileParameter is a file parameter - that's passed to helm template during manifest generation - properties: - name: - description: Name is the name of the Helm parameter - type: string - path: - description: Path is the path to the file containing - the values for the Helm parameter - type: string - type: object - type: array - ignoreMissingValueFiles: - description: IgnoreMissingValueFiles prevents helm template - from failing when valueFiles do not exist locally - by not appending them to helm template --values - type: boolean - parameters: - description: Parameters is a list of Helm parameters - which are passed to the helm template command upon - manifest generation - items: - description: HelmParameter is a parameter that's passed - to helm template during manifest generation - properties: - forceString: - description: ForceString determines whether to - tell Helm to interpret booleans and numbers - as strings - type: boolean - name: - description: Name is the name of the Helm parameter - type: string - value: - description: Value is the value for the Helm parameter - type: string - type: object - type: array - passCredentials: - description: PassCredentials pass credentials to all - domains (Helm's --pass-credentials) - type: boolean - releaseName: - description: ReleaseName is the Helm release name to - use. If omitted it will use the application name - type: string - skipCrds: - description: SkipCrds skips custom resource definition - installation step (Helm's --skip-crds) - type: boolean - valueFiles: - description: ValuesFiles is a list of Helm value files - to use when generating a template - items: - type: string - type: array - values: - description: Values specifies Helm values to be passed - to helm template, typically defined as a block - type: string - version: - description: Version is the Helm version to use for - templating ("3") - type: string - type: object - kustomize: - description: Kustomize holds kustomize specific options - properties: - commonAnnotations: - additionalProperties: - type: string - description: CommonAnnotations is a list of additional - annotations to add to rendered manifests - type: object - commonLabels: - additionalProperties: - type: string - description: CommonLabels is a list of additional labels - to add to rendered manifests - type: object - forceCommonAnnotations: - description: ForceCommonAnnotations specifies whether - to force applying common annotations to resources - for Kustomize apps - type: boolean - forceCommonLabels: - description: ForceCommonLabels specifies whether to - force applying common labels to resources for Kustomize - apps - type: boolean - images: - description: Images is a list of Kustomize image override - specifications - items: - description: KustomizeImage represents a Kustomize - image definition in the format [old_image_name=]: - type: string - type: array - namePrefix: - description: NamePrefix is a prefix appended to resources - for Kustomize apps - type: string - nameSuffix: - description: NameSuffix is a suffix appended to resources - for Kustomize apps - type: string - version: - description: Version controls which version of Kustomize - to use for rendering manifests - type: string - type: object - path: - description: Path is a directory path within the Git repository, - and is only valid for applications sourced from Git. - type: string - plugin: - description: Plugin holds config management plugin specific - options - properties: - env: - description: Env is a list of environment variable entries - items: - description: EnvEntry represents an entry in the application's - environment - properties: - name: - description: Name is the name of the variable, - usually expressed in uppercase - type: string - value: - description: Value is the value of the variable - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - description: RepoURL is the URL to the repository (Git or - Helm) that contains the application manifests - type: string - targetRevision: - description: TargetRevision defines the revision of the - source to sync the application to. In case of Git, this - can be commit, tag, or branch. If omitted, will equal - to HEAD. In case of Helm, this is a semver tag for the - Chart's version. - type: string - required: - - repoURL - type: object - required: - - deployedAt - - id - - revision - type: object - type: array - observedAt: - description: 'ObservedAt indicates when the application state was - updated without querying latest git state Deprecated: controller - no longer updates ObservedAt field' - format: date-time - type: string - operationState: - description: OperationState contains information about any ongoing - operations, such as a sync - properties: - finishedAt: - description: FinishedAt contains time of operation completion - format: date-time - type: string - message: - description: Message holds any pertinent messages when attempting - to perform operation (typically errors). - type: string - operation: - description: Operation is the original requested operation - properties: - info: - description: Info is a list of informational items for this - operation - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - initiatedBy: - description: InitiatedBy contains information about who initiated - the operations - properties: - automated: - description: Automated is set to true if operation was - initiated automatically by the application controller. - type: boolean - username: - description: Username contains the name of a user who - started operation - type: string - type: object - retry: - description: Retry controls the strategy to apply if a sync - fails - properties: - backoff: - description: Backoff controls how to backoff on subsequent - retries of failed syncs - properties: - duration: - description: Duration is the amount to back off. Default - unit is seconds, but could also be a duration (e.g. - "2m", "1h") - type: string - factor: - description: Factor is a factor to multiply the base - duration after each failed retry - format: int64 - type: integer - maxDuration: - description: MaxDuration is the maximum amount of - time allowed for the backoff strategy - type: string - type: object - limit: - description: Limit is the maximum number of attempts for - retrying a failed sync. If set to 0, no retries will - be performed. - format: int64 - type: integer - type: object - sync: - description: Sync contains parameters for the operation - properties: - dryRun: - description: DryRun specifies to perform a `kubectl apply - --dry-run` without actually performing the sync - type: boolean - manifests: - description: Manifests is an optional field that overrides - sync source with a local directory for development - items: - type: string - type: array - prune: - description: Prune specifies to delete resources from - the cluster that are no longer tracked in git - type: boolean - resources: - description: Resources describes which resources shall - be part of the sync - items: - description: SyncOperationResource contains resources - to sync. - properties: - group: - type: string - kind: - type: string - name: - type: string - namespace: - type: string - required: - - kind - - name - type: object - type: array - revision: - description: Revision is the revision (Git) or chart version - (Helm) which to sync the application to If omitted, - will use the revision specified in app spec. - type: string - source: - description: Source overrides the source definition set - in the application. This is typically set in a Rollback - operation and is nil during a Sync operation - properties: - chart: - description: Chart is a Helm chart name, and must - be specified for applications sourced from a Helm - repo. - type: string - directory: - description: Directory holds path/directory specific - options - properties: - exclude: - description: Exclude contains a glob pattern to - match paths against that should be explicitly - excluded from being used during manifest generation - type: string - include: - description: Include contains a glob pattern to - match paths against that should be explicitly - included during manifest generation - type: string - jsonnet: - description: Jsonnet holds options specific to - Jsonnet - properties: - extVars: - description: ExtVars is a list of Jsonnet - External Variables - items: - description: JsonnetVar represents a variable - to be passed to jsonnet during manifest - generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - description: Additional library search dirs - items: - type: string - type: array - tlas: - description: TLAS is a list of Jsonnet Top-level - Arguments - items: - description: JsonnetVar represents a variable - to be passed to jsonnet during manifest - generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - description: Recurse specifies whether to scan - a directory recursively for manifests - type: boolean - type: object - helm: - description: Helm holds helm specific options - properties: - fileParameters: - description: FileParameters are file parameters - to the helm template - items: - description: HelmFileParameter is a file parameter - that's passed to helm template during manifest - generation - properties: - name: - description: Name is the name of the Helm - parameter - type: string - path: - description: Path is the path to the file - containing the values for the Helm parameter - type: string - type: object - type: array - ignoreMissingValueFiles: - description: IgnoreMissingValueFiles prevents - helm template from failing when valueFiles do - not exist locally by not appending them to helm - template --values - type: boolean - parameters: - description: Parameters is a list of Helm parameters - which are passed to the helm template command - upon manifest generation - items: - description: HelmParameter is a parameter that's - passed to helm template during manifest generation - properties: - forceString: - description: ForceString determines whether - to tell Helm to interpret booleans and - numbers as strings - type: boolean - name: - description: Name is the name of the Helm - parameter - type: string - value: - description: Value is the value for the - Helm parameter - type: string - type: object - type: array - passCredentials: - description: PassCredentials pass credentials - to all domains (Helm's --pass-credentials) - type: boolean - releaseName: - description: ReleaseName is the Helm release name - to use. If omitted it will use the application - name - type: string - skipCrds: - description: SkipCrds skips custom resource definition - installation step (Helm's --skip-crds) - type: boolean - valueFiles: - description: ValuesFiles is a list of Helm value - files to use when generating a template - items: - type: string - type: array - values: - description: Values specifies Helm values to be - passed to helm template, typically defined as - a block - type: string - version: - description: Version is the Helm version to use - for templating ("3") - type: string - type: object - kustomize: - description: Kustomize holds kustomize specific options - properties: - commonAnnotations: - additionalProperties: - type: string - description: CommonAnnotations is a list of additional - annotations to add to rendered manifests - type: object - commonLabels: - additionalProperties: - type: string - description: CommonLabels is a list of additional - labels to add to rendered manifests - type: object - forceCommonAnnotations: - description: ForceCommonAnnotations specifies - whether to force applying common annotations - to resources for Kustomize apps - type: boolean - forceCommonLabels: - description: ForceCommonLabels specifies whether - to force applying common labels to resources - for Kustomize apps - type: boolean - images: - description: Images is a list of Kustomize image - override specifications - items: - description: KustomizeImage represents a Kustomize - image definition in the format [old_image_name=]: - type: string - type: array - namePrefix: - description: NamePrefix is a prefix appended to - resources for Kustomize apps - type: string - nameSuffix: - description: NameSuffix is a suffix appended to - resources for Kustomize apps - type: string - version: - description: Version controls which version of - Kustomize to use for rendering manifests - type: string - type: object - path: - description: Path is a directory path within the Git - repository, and is only valid for applications sourced - from Git. - type: string - plugin: - description: Plugin holds config management plugin - specific options - properties: - env: - description: Env is a list of environment variable - entries - items: - description: EnvEntry represents an entry in - the application's environment - properties: - name: - description: Name is the name of the variable, - usually expressed in uppercase - type: string - value: - description: Value is the value of the variable - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - description: RepoURL is the URL to the repository - (Git or Helm) that contains the application manifests - type: string - targetRevision: - description: TargetRevision defines the revision of - the source to sync the application to. In case of - Git, this can be commit, tag, or branch. If omitted, - will equal to HEAD. In case of Helm, this is a semver - tag for the Chart's version. - type: string - required: - - repoURL - type: object - syncOptions: - description: SyncOptions provide per-sync sync-options, - e.g. Validate=false - items: - type: string - type: array - syncStrategy: - description: SyncStrategy describes how to perform the - sync - properties: - apply: - description: Apply will perform a `kubectl apply` - to perform the sync. - properties: - force: - description: Force indicates whether or not to - supply the --force flag to `kubectl apply`. - The --force flag deletes and re-create the resource, - when PATCH encounters conflict and has retried - for 5 times. - type: boolean - type: object - hook: - description: Hook will submit any referenced resources - to perform the sync. This is the default strategy - properties: - force: - description: Force indicates whether or not to - supply the --force flag to `kubectl apply`. - The --force flag deletes and re-create the resource, - when PATCH encounters conflict and has retried - for 5 times. - type: boolean - type: object - type: object - type: object - type: object - phase: - description: Phase is the current phase of the operation - type: string - retryCount: - description: RetryCount contains time of operation retries - format: int64 - type: integer - startedAt: - description: StartedAt contains time of operation start - format: date-time - type: string - syncResult: - description: SyncResult is the result of a Sync operation - properties: - resources: - description: Resources contains a list of sync result items - for each individual resource in a sync operation - items: - description: ResourceResult holds the operation result details - of a specific resource - properties: - group: - description: Group specifies the API group of the resource - type: string - hookPhase: - description: HookPhase contains the state of any operation - associated with this resource OR hook This can also - contain values for non-hook resources. - type: string - hookType: - description: HookType specifies the type of the hook. - Empty for non-hook resources - type: string - kind: - description: Kind specifies the API kind of the resource - type: string - message: - description: Message contains an informational or error - message for the last sync OR operation - type: string - name: - description: Name specifies the name of the resource - type: string - namespace: - description: Namespace specifies the target namespace - of the resource - type: string - status: - description: Status holds the final result of the sync. - Will be empty if the resources is yet to be applied/pruned - and is always zero-value for hooks - type: string - syncPhase: - description: SyncPhase indicates the particular phase - of the sync that this result was acquired in - type: string - version: - description: Version specifies the API version of the - resource - type: string - required: - - group - - kind - - name - - namespace - - version - type: object - type: array - revision: - description: Revision holds the revision this sync operation - was performed to - type: string - source: - description: Source records the application source information - of the sync, used for comparing auto-sync - properties: - chart: - description: Chart is a Helm chart name, and must be specified - for applications sourced from a Helm repo. - type: string - directory: - description: Directory holds path/directory specific options - properties: - exclude: - description: Exclude contains a glob pattern to match - paths against that should be explicitly excluded - from being used during manifest generation - type: string - include: - description: Include contains a glob pattern to match - paths against that should be explicitly included - during manifest generation - type: string - jsonnet: - description: Jsonnet holds options specific to Jsonnet - properties: - extVars: - description: ExtVars is a list of Jsonnet External - Variables - items: - description: JsonnetVar represents a variable - to be passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - description: Additional library search dirs - items: - type: string - type: array - tlas: - description: TLAS is a list of Jsonnet Top-level - Arguments - items: - description: JsonnetVar represents a variable - to be passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - description: Recurse specifies whether to scan a directory - recursively for manifests - type: boolean - type: object - helm: - description: Helm holds helm specific options - properties: - fileParameters: - description: FileParameters are file parameters to - the helm template - items: - description: HelmFileParameter is a file parameter - that's passed to helm template during manifest - generation - properties: - name: - description: Name is the name of the Helm parameter - type: string - path: - description: Path is the path to the file containing - the values for the Helm parameter - type: string - type: object - type: array - ignoreMissingValueFiles: - description: IgnoreMissingValueFiles prevents helm - template from failing when valueFiles do not exist - locally by not appending them to helm template --values - type: boolean - parameters: - description: Parameters is a list of Helm parameters - which are passed to the helm template command upon - manifest generation - items: - description: HelmParameter is a parameter that's - passed to helm template during manifest generation - properties: - forceString: - description: ForceString determines whether - to tell Helm to interpret booleans and numbers - as strings - type: boolean - name: - description: Name is the name of the Helm parameter - type: string - value: - description: Value is the value for the Helm - parameter - type: string - type: object - type: array - passCredentials: - description: PassCredentials pass credentials to all - domains (Helm's --pass-credentials) - type: boolean - releaseName: - description: ReleaseName is the Helm release name - to use. If omitted it will use the application name - type: string - skipCrds: - description: SkipCrds skips custom resource definition - installation step (Helm's --skip-crds) - type: boolean - valueFiles: - description: ValuesFiles is a list of Helm value files - to use when generating a template - items: - type: string - type: array - values: - description: Values specifies Helm values to be passed - to helm template, typically defined as a block - type: string - version: - description: Version is the Helm version to use for - templating ("3") - type: string - type: object - kustomize: - description: Kustomize holds kustomize specific options - properties: - commonAnnotations: - additionalProperties: - type: string - description: CommonAnnotations is a list of additional - annotations to add to rendered manifests - type: object - commonLabels: - additionalProperties: - type: string - description: CommonLabels is a list of additional - labels to add to rendered manifests - type: object - forceCommonAnnotations: - description: ForceCommonAnnotations specifies whether - to force applying common annotations to resources - for Kustomize apps - type: boolean - forceCommonLabels: - description: ForceCommonLabels specifies whether to - force applying common labels to resources for Kustomize - apps - type: boolean - images: - description: Images is a list of Kustomize image override - specifications - items: - description: KustomizeImage represents a Kustomize - image definition in the format [old_image_name=]: - type: string - type: array - namePrefix: - description: NamePrefix is a prefix appended to resources - for Kustomize apps - type: string - nameSuffix: - description: NameSuffix is a suffix appended to resources - for Kustomize apps - type: string - version: - description: Version controls which version of Kustomize - to use for rendering manifests - type: string - type: object - path: - description: Path is a directory path within the Git repository, - and is only valid for applications sourced from Git. - type: string - plugin: - description: Plugin holds config management plugin specific - options - properties: - env: - description: Env is a list of environment variable - entries - items: - description: EnvEntry represents an entry in the - application's environment - properties: - name: - description: Name is the name of the variable, - usually expressed in uppercase - type: string - value: - description: Value is the value of the variable - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - description: RepoURL is the URL to the repository (Git - or Helm) that contains the application manifests - type: string - targetRevision: - description: TargetRevision defines the revision of the - source to sync the application to. In case of Git, this - can be commit, tag, or branch. If omitted, will equal - to HEAD. In case of Helm, this is a semver tag for the - Chart's version. - type: string - required: - - repoURL - type: object - required: - - revision - type: object - required: - - operation - - phase - - startedAt - type: object - reconciledAt: - description: ReconciledAt indicates when the application state was - reconciled using the latest git version - format: date-time - type: string - resourceHealthSource: - description: 'ResourceHealthSource indicates where the resource health - status is stored: inline if not set or appTree' - type: string - resources: - description: Resources is a list of Kubernetes resources managed by - this application - items: - description: 'ResourceStatus holds the current sync and health status - of a resource TODO: describe members of this type' - properties: - group: - type: string - health: - description: HealthStatus contains information about the currently - observed health state of an application or resource - properties: - message: - description: Message is a human-readable informational message - describing the health status - type: string - status: - description: Status holds the status code of the application - or resource - type: string - type: object - hook: - type: boolean - kind: - type: string - name: - type: string - namespace: - type: string - requiresPruning: - type: boolean - status: - description: SyncStatusCode is a type which represents possible - comparison results - type: string - syncWave: - format: int64 - type: integer - version: - type: string - type: object - type: array - sourceType: - description: SourceType specifies the type of this application - type: string - summary: - description: Summary contains a list of URLs and container images - used by this application - properties: - externalURLs: - description: ExternalURLs holds all external URLs of application - child resources. - items: - type: string - type: array - images: - description: Images holds all images of application child resources. - items: - type: string - type: array - type: object - sync: - description: Sync contains information about the application's current - sync status - properties: - comparedTo: - description: ComparedTo contains information about what has been - compared - properties: - destination: - description: Destination is a reference to the application's - destination used for comparison - properties: - name: - description: Name is an alternate way of specifying the - target cluster by its symbolic name - type: string - namespace: - description: Namespace specifies the target namespace - for the application's resources. The namespace will - only be set for namespace-scoped resources that have - not set a value for .metadata.namespace - type: string - server: - description: Server specifies the URL of the target cluster - and must be set to the Kubernetes control plane API - type: string - type: object - source: - description: Source is a reference to the application's source - used for comparison - properties: - chart: - description: Chart is a Helm chart name, and must be specified - for applications sourced from a Helm repo. - type: string - directory: - description: Directory holds path/directory specific options - properties: - exclude: - description: Exclude contains a glob pattern to match - paths against that should be explicitly excluded - from being used during manifest generation - type: string - include: - description: Include contains a glob pattern to match - paths against that should be explicitly included - during manifest generation - type: string - jsonnet: - description: Jsonnet holds options specific to Jsonnet - properties: - extVars: - description: ExtVars is a list of Jsonnet External - Variables - items: - description: JsonnetVar represents a variable - to be passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - description: Additional library search dirs - items: - type: string - type: array - tlas: - description: TLAS is a list of Jsonnet Top-level - Arguments - items: - description: JsonnetVar represents a variable - to be passed to jsonnet during manifest generation - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - description: Recurse specifies whether to scan a directory - recursively for manifests - type: boolean - type: object - helm: - description: Helm holds helm specific options - properties: - fileParameters: - description: FileParameters are file parameters to - the helm template - items: - description: HelmFileParameter is a file parameter - that's passed to helm template during manifest - generation - properties: - name: - description: Name is the name of the Helm parameter - type: string - path: - description: Path is the path to the file containing - the values for the Helm parameter - type: string - type: object - type: array - ignoreMissingValueFiles: - description: IgnoreMissingValueFiles prevents helm - template from failing when valueFiles do not exist - locally by not appending them to helm template --values - type: boolean - parameters: - description: Parameters is a list of Helm parameters - which are passed to the helm template command upon - manifest generation - items: - description: HelmParameter is a parameter that's - passed to helm template during manifest generation - properties: - forceString: - description: ForceString determines whether - to tell Helm to interpret booleans and numbers - as strings - type: boolean - name: - description: Name is the name of the Helm parameter - type: string - value: - description: Value is the value for the Helm - parameter - type: string - type: object - type: array - passCredentials: - description: PassCredentials pass credentials to all - domains (Helm's --pass-credentials) - type: boolean - releaseName: - description: ReleaseName is the Helm release name - to use. If omitted it will use the application name - type: string - skipCrds: - description: SkipCrds skips custom resource definition - installation step (Helm's --skip-crds) - type: boolean - valueFiles: - description: ValuesFiles is a list of Helm value files - to use when generating a template - items: - type: string - type: array - values: - description: Values specifies Helm values to be passed - to helm template, typically defined as a block - type: string - version: - description: Version is the Helm version to use for - templating ("3") - type: string - type: object - kustomize: - description: Kustomize holds kustomize specific options - properties: - commonAnnotations: - additionalProperties: - type: string - description: CommonAnnotations is a list of additional - annotations to add to rendered manifests - type: object - commonLabels: - additionalProperties: - type: string - description: CommonLabels is a list of additional - labels to add to rendered manifests - type: object - forceCommonAnnotations: - description: ForceCommonAnnotations specifies whether - to force applying common annotations to resources - for Kustomize apps - type: boolean - forceCommonLabels: - description: ForceCommonLabels specifies whether to - force applying common labels to resources for Kustomize - apps - type: boolean - images: - description: Images is a list of Kustomize image override - specifications - items: - description: KustomizeImage represents a Kustomize - image definition in the format [old_image_name=]: - type: string - type: array - namePrefix: - description: NamePrefix is a prefix appended to resources - for Kustomize apps - type: string - nameSuffix: - description: NameSuffix is a suffix appended to resources - for Kustomize apps - type: string - version: - description: Version controls which version of Kustomize - to use for rendering manifests - type: string - type: object - path: - description: Path is a directory path within the Git repository, - and is only valid for applications sourced from Git. - type: string - plugin: - description: Plugin holds config management plugin specific - options - properties: - env: - description: Env is a list of environment variable - entries - items: - description: EnvEntry represents an entry in the - application's environment - properties: - name: - description: Name is the name of the variable, - usually expressed in uppercase - type: string - value: - description: Value is the value of the variable - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - description: RepoURL is the URL to the repository (Git - or Helm) that contains the application manifests - type: string - targetRevision: - description: TargetRevision defines the revision of the - source to sync the application to. In case of Git, this - can be commit, tag, or branch. If omitted, will equal - to HEAD. In case of Helm, this is a semver tag for the - Chart's version. - type: string - required: - - repoURL - type: object - required: - - destination - - source - type: object - revision: - description: Revision contains information about the revision - the comparison has been performed to - type: string - status: - description: Status is the sync state of the comparison - type: string - required: - - status - type: object - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - app.kubernetes.io/name: applicationsets.argoproj.io - app.kubernetes.io/part-of: argocd - name: applicationsets.argoproj.io -spec: - group: argoproj.io - names: - kind: ApplicationSet - listKind: ApplicationSetList - plural: applicationsets - shortNames: - - appset - - appsets - singular: applicationset - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - generators: - items: - properties: - clusterDecisionResource: - properties: - configMapRef: - type: string - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - name: - type: string - requeueAfterSeconds: - format: int64 - type: integer - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - values: - additionalProperties: - type: string - type: object - required: - - configMapRef - type: object - clusters: - properties: - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - values: - additionalProperties: - type: string - type: object - type: object - git: - properties: - directories: - items: - properties: - exclude: - type: boolean - path: - type: string - required: - - path - type: object - type: array - files: - items: - properties: - path: - type: string - required: - - path - type: object - type: array - repoURL: - type: string - requeueAfterSeconds: - format: int64 - type: integer - revision: - type: string - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - required: - - repoURL - - revision - type: object - list: - properties: - elements: - items: - x-kubernetes-preserve-unknown-fields: true - type: array - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - required: - - elements - type: object - matrix: - properties: - generators: - items: - properties: - clusterDecisionResource: - properties: - configMapRef: - type: string - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - name: - type: string - requeueAfterSeconds: - format: int64 - type: integer - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - values: - additionalProperties: - type: string - type: object - required: - - configMapRef - type: object - clusters: - properties: - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - values: - additionalProperties: - type: string - type: object - type: object - git: - properties: - directories: - items: - properties: - exclude: - type: boolean - path: - type: string - required: - - path - type: object - type: array - files: - items: - properties: - path: - type: string - required: - - path - type: object - type: array - repoURL: - type: string - requeueAfterSeconds: - format: int64 - type: integer - revision: - type: string - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - required: - - repoURL - - revision - type: object - list: - properties: - elements: - items: - x-kubernetes-preserve-unknown-fields: true - type: array - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - required: - - elements - type: object - matrix: - x-kubernetes-preserve-unknown-fields: true - merge: - x-kubernetes-preserve-unknown-fields: true - pullRequest: - properties: - bitbucketServer: - properties: - api: - type: string - basicAuth: - properties: - passwordRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - username: - type: string - required: - - passwordRef - - username - type: object - project: - type: string - repo: - type: string - required: - - api - - project - - repo - type: object - filters: - items: - properties: - branchMatch: - type: string - type: object - type: array - gitea: - properties: - api: - type: string - insecure: - type: boolean - owner: - type: string - repo: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - api - - owner - - repo - type: object - github: - properties: - api: - type: string - appSecretName: - type: string - labels: - items: - type: string - type: array - owner: - type: string - repo: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - owner - - repo - type: object - gitlab: - properties: - api: - type: string - labels: - items: - type: string - type: array - project: - type: string - pullRequestState: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - project - type: object - requeueAfterSeconds: - format: int64 - type: integer - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - type: object - scmProvider: - properties: - azureDevOps: - properties: - accessTokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - allBranches: - type: boolean - api: - type: string - organization: - type: string - teamProject: - type: string - required: - - accessTokenRef - - organization - - teamProject - type: object - bitbucket: - properties: - allBranches: - type: boolean - appPasswordRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - owner: - type: string - user: - type: string - required: - - appPasswordRef - - owner - - user - type: object - bitbucketServer: - properties: - allBranches: - type: boolean - api: - type: string - basicAuth: - properties: - passwordRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - username: - type: string - required: - - passwordRef - - username - type: object - project: - type: string - required: - - api - - project - type: object - cloneProtocol: - type: string - filters: - items: - properties: - branchMatch: - type: string - labelMatch: - type: string - pathsDoNotExist: - items: - type: string - type: array - pathsExist: - items: - type: string - type: array - repositoryMatch: - type: string - type: object - type: array - gitea: - properties: - allBranches: - type: boolean - api: - type: string - insecure: - type: boolean - owner: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - api - - owner - type: object - github: - properties: - allBranches: - type: boolean - api: - type: string - appSecretName: - type: string - organization: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - organization - type: object - gitlab: - properties: - allBranches: - type: boolean - api: - type: string - group: - type: string - includeSubgroups: - type: boolean - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - group - type: object - requeueAfterSeconds: - format: int64 - type: integer - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - type: object - type: array - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - required: - - generators - type: object - merge: - properties: - generators: - items: - properties: - clusterDecisionResource: - properties: - configMapRef: - type: string - labelSelector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - name: - type: string - requeueAfterSeconds: - format: int64 - type: integer - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - values: - additionalProperties: - type: string - type: object - required: - - configMapRef - type: object - clusters: - properties: - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - values: - additionalProperties: - type: string - type: object - type: object - git: - properties: - directories: - items: - properties: - exclude: - type: boolean - path: - type: string - required: - - path - type: object - type: array - files: - items: - properties: - path: - type: string - required: - - path - type: object - type: array - repoURL: - type: string - requeueAfterSeconds: - format: int64 - type: integer - revision: - type: string - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - required: - - repoURL - - revision - type: object - list: - properties: - elements: - items: - x-kubernetes-preserve-unknown-fields: true - type: array - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - required: - - elements - type: object - matrix: - x-kubernetes-preserve-unknown-fields: true - merge: - x-kubernetes-preserve-unknown-fields: true - pullRequest: - properties: - bitbucketServer: - properties: - api: - type: string - basicAuth: - properties: - passwordRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - username: - type: string - required: - - passwordRef - - username - type: object - project: - type: string - repo: - type: string - required: - - api - - project - - repo - type: object - filters: - items: - properties: - branchMatch: - type: string - type: object - type: array - gitea: - properties: - api: - type: string - insecure: - type: boolean - owner: - type: string - repo: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - api - - owner - - repo - type: object - github: - properties: - api: - type: string - appSecretName: - type: string - labels: - items: - type: string - type: array - owner: - type: string - repo: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - owner - - repo - type: object - gitlab: - properties: - api: - type: string - labels: - items: - type: string - type: array - project: - type: string - pullRequestState: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - project - type: object - requeueAfterSeconds: - format: int64 - type: integer - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - type: object - scmProvider: - properties: - azureDevOps: - properties: - accessTokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - allBranches: - type: boolean - api: - type: string - organization: - type: string - teamProject: - type: string - required: - - accessTokenRef - - organization - - teamProject - type: object - bitbucket: - properties: - allBranches: - type: boolean - appPasswordRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - owner: - type: string - user: - type: string - required: - - appPasswordRef - - owner - - user - type: object - bitbucketServer: - properties: - allBranches: - type: boolean - api: - type: string - basicAuth: - properties: - passwordRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - username: - type: string - required: - - passwordRef - - username - type: object - project: - type: string - required: - - api - - project - type: object - cloneProtocol: - type: string - filters: - items: - properties: - branchMatch: - type: string - labelMatch: - type: string - pathsDoNotExist: - items: - type: string - type: array - pathsExist: - items: - type: string - type: array - repositoryMatch: - type: string - type: object - type: array - gitea: - properties: - allBranches: - type: boolean - api: - type: string - insecure: - type: boolean - owner: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - api - - owner - type: object - github: - properties: - allBranches: - type: boolean - api: - type: string - appSecretName: - type: string - organization: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - organization - type: object - gitlab: - properties: - allBranches: - type: boolean - api: - type: string - group: - type: string - includeSubgroups: - type: boolean - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - group - type: object - requeueAfterSeconds: - format: int64 - type: integer - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - type: object - type: array - mergeKeys: - items: - type: string - type: array - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - required: - - generators - - mergeKeys - type: object - pullRequest: - properties: - bitbucketServer: - properties: - api: - type: string - basicAuth: - properties: - passwordRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - username: - type: string - required: - - passwordRef - - username - type: object - project: - type: string - repo: - type: string - required: - - api - - project - - repo - type: object - filters: - items: - properties: - branchMatch: - type: string - type: object - type: array - gitea: - properties: - api: - type: string - insecure: - type: boolean - owner: - type: string - repo: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - api - - owner - - repo - type: object - github: - properties: - api: - type: string - appSecretName: - type: string - labels: - items: - type: string - type: array - owner: - type: string - repo: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - owner - - repo - type: object - gitlab: - properties: - api: - type: string - labels: - items: - type: string - type: array - project: - type: string - pullRequestState: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - project - type: object - requeueAfterSeconds: - format: int64 - type: integer - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - type: object - scmProvider: - properties: - azureDevOps: - properties: - accessTokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - allBranches: - type: boolean - api: - type: string - organization: - type: string - teamProject: - type: string - required: - - accessTokenRef - - organization - - teamProject - type: object - bitbucket: - properties: - allBranches: - type: boolean - appPasswordRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - owner: - type: string - user: - type: string - required: - - appPasswordRef - - owner - - user - type: object - bitbucketServer: - properties: - allBranches: - type: boolean - api: - type: string - basicAuth: - properties: - passwordRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - username: - type: string - required: - - passwordRef - - username - type: object - project: - type: string - required: - - api - - project - type: object - cloneProtocol: - type: string - filters: - items: - properties: - branchMatch: - type: string - labelMatch: - type: string - pathsDoNotExist: - items: - type: string - type: array - pathsExist: - items: - type: string - type: array - repositoryMatch: - type: string - type: object - type: array - gitea: - properties: - allBranches: - type: boolean - api: - type: string - insecure: - type: boolean - owner: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - api - - owner - type: object - github: - properties: - allBranches: - type: boolean - api: - type: string - appSecretName: - type: string - organization: - type: string - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - organization - type: object - gitlab: - properties: - allBranches: - type: boolean - api: - type: string - group: - type: string - includeSubgroups: - type: boolean - tokenRef: - properties: - key: - type: string - secretName: - type: string - required: - - key - - secretName - type: object - required: - - group - type: object - requeueAfterSeconds: - format: int64 - type: integer - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - type: object - selector: - properties: - matchExpressions: - items: - properties: - key: - type: string - operator: - type: string - values: - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - type: object - type: object - type: object - type: array - goTemplate: - type: boolean - syncPolicy: - properties: - preserveResourcesOnDeletion: - type: boolean - type: object - template: - properties: - metadata: - properties: - annotations: - additionalProperties: - type: string - type: object - finalizers: - items: - type: string - type: array - labels: - additionalProperties: - type: string - type: object - name: - type: string - namespace: - type: string - type: object - spec: - properties: - destination: - properties: - name: - type: string - namespace: - type: string - server: - type: string - type: object - ignoreDifferences: - items: - properties: - group: - type: string - jqPathExpressions: - items: - type: string - type: array - jsonPointers: - items: - type: string - type: array - kind: - type: string - managedFieldsManagers: - items: - type: string - type: array - name: - type: string - namespace: - type: string - required: - - kind - type: object - type: array - info: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - project: - type: string - revisionHistoryLimit: - format: int64 - type: integer - source: - properties: - chart: - type: string - directory: - properties: - exclude: - type: string - include: - type: string - jsonnet: - properties: - extVars: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - libs: - items: - type: string - type: array - tlas: - items: - properties: - code: - type: boolean - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - type: object - recurse: - type: boolean - type: object - helm: - properties: - fileParameters: - items: - properties: - name: - type: string - path: - type: string - type: object - type: array - ignoreMissingValueFiles: - type: boolean - parameters: - items: - properties: - forceString: - type: boolean - name: - type: string - value: - type: string - type: object - type: array - passCredentials: - type: boolean - releaseName: - type: string - skipCrds: - type: boolean - valueFiles: - items: - type: string - type: array - values: - type: string - version: - type: string - type: object - kustomize: - properties: - commonAnnotations: - additionalProperties: - type: string - type: object - commonLabels: - additionalProperties: - type: string - type: object - forceCommonAnnotations: - type: boolean - forceCommonLabels: - type: boolean - images: - items: - type: string - type: array - namePrefix: - type: string - nameSuffix: - type: string - version: - type: string - type: object - path: - type: string - plugin: - properties: - env: - items: - properties: - name: - type: string - value: - type: string - required: - - name - - value - type: object - type: array - name: - type: string - type: object - repoURL: - type: string - targetRevision: - type: string - required: - - repoURL - type: object - syncPolicy: - properties: - automated: - properties: - allowEmpty: - type: boolean - prune: - type: boolean - selfHeal: - type: boolean - type: object - retry: - properties: - backoff: - properties: - duration: - type: string - factor: - format: int64 - type: integer - maxDuration: - type: string - type: object - limit: - format: int64 - type: integer - type: object - syncOptions: - items: - type: string - type: array - type: object - required: - - destination - - project - - source - type: object - required: - - metadata - - spec - type: object - required: - - generators - - template - type: object - status: - properties: - conditions: - items: - properties: - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - type: string - required: - - message - - reason - - status - - type - type: object - type: array - type: object - required: - - metadata - - spec - type: object - served: true - storage: true - subresources: - status: {} ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - app.kubernetes.io/name: appprojects.argoproj.io - app.kubernetes.io/part-of: argocd - name: appprojects.argoproj.io -spec: - group: argoproj.io - names: - kind: AppProject - listKind: AppProjectList - plural: appprojects - shortNames: - - appproj - - appprojs - singular: appproject - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: 'AppProject provides a logical grouping of applications, providing - controls for: * where the apps may deploy to (cluster whitelist) * what - may be deployed (repository whitelist, resource whitelist/blacklist) * who - can access these applications (roles, OIDC group claims bindings) * and - what they can do (RBAC policies) * automation access to these roles (JWT - tokens)' - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: AppProjectSpec is the specification of an AppProject - properties: - clusterResourceBlacklist: - description: ClusterResourceBlacklist contains list of blacklisted - cluster level resources - items: - description: GroupKind specifies a Group and a Kind, but does not - force a version. This is useful for identifying concepts during - lookup stages without having partially valid types - properties: - group: - type: string - kind: - type: string - required: - - group - - kind - type: object - type: array - clusterResourceWhitelist: - description: ClusterResourceWhitelist contains list of whitelisted - cluster level resources - items: - description: GroupKind specifies a Group and a Kind, but does not - force a version. This is useful for identifying concepts during - lookup stages without having partially valid types - properties: - group: - type: string - kind: - type: string - required: - - group - - kind - type: object - type: array - description: - description: Description contains optional project description - type: string - destinations: - description: Destinations contains list of destinations available - for deployment - items: - description: ApplicationDestination holds information about the - application's destination - properties: - name: - description: Name is an alternate way of specifying the target - cluster by its symbolic name - type: string - namespace: - description: Namespace specifies the target namespace for the - application's resources. The namespace will only be set for - namespace-scoped resources that have not set a value for .metadata.namespace - type: string - server: - description: Server specifies the URL of the target cluster - and must be set to the Kubernetes control plane API - type: string - type: object - type: array - namespaceResourceBlacklist: - description: NamespaceResourceBlacklist contains list of blacklisted - namespace level resources - items: - description: GroupKind specifies a Group and a Kind, but does not - force a version. This is useful for identifying concepts during - lookup stages without having partially valid types - properties: - group: - type: string - kind: - type: string - required: - - group - - kind - type: object - type: array - namespaceResourceWhitelist: - description: NamespaceResourceWhitelist contains list of whitelisted - namespace level resources - items: - description: GroupKind specifies a Group and a Kind, but does not - force a version. This is useful for identifying concepts during - lookup stages without having partially valid types - properties: - group: - type: string - kind: - type: string - required: - - group - - kind - type: object - type: array - orphanedResources: - description: OrphanedResources specifies if controller should monitor - orphaned resources of apps in this project - properties: - ignore: - description: Ignore contains a list of resources that are to be - excluded from orphaned resources monitoring - items: - description: OrphanedResourceKey is a reference to a resource - to be ignored from - properties: - group: - type: string - kind: - type: string - name: - type: string - type: object - type: array - warn: - description: Warn indicates if warning condition should be created - for apps which have orphaned resources - type: boolean - type: object - permitOnlyProjectScopedClusters: - description: PermitOnlyProjectScopedClusters determines whether destinations - can only reference clusters which are project-scoped - type: boolean - roles: - description: Roles are user defined RBAC roles associated with this - project - items: - description: ProjectRole represents a role that has access to a - project - properties: - description: - description: Description is a description of the role - type: string - groups: - description: Groups are a list of OIDC group claims bound to - this role - items: - type: string - type: array - jwtTokens: - description: JWTTokens are a list of generated JWT tokens bound - to this role - items: - description: JWTToken holds the issuedAt and expiresAt values - of a token - properties: - exp: - format: int64 - type: integer - iat: - format: int64 - type: integer - id: - type: string - required: - - iat - type: object - type: array - name: - description: Name is a name for this role - type: string - policies: - description: Policies Stores a list of casbin formatted strings - that define access policies for the role in the project - items: - type: string - type: array - required: - - name - type: object - type: array - signatureKeys: - description: SignatureKeys contains a list of PGP key IDs that commits - in Git must be signed with in order to be allowed for sync - items: - description: SignatureKey is the specification of a key required - to verify commit signatures with - properties: - keyID: - description: The ID of the key in hexadecimal notation - type: string - required: - - keyID - type: object - type: array - sourceNamespaces: - description: SourceNamespaces defines the namespaces application resources - are allowed to be created in - items: - type: string - type: array - sourceRepos: - description: SourceRepos contains list of repository URLs which can - be used for deployment - items: - type: string - type: array - syncWindows: - description: SyncWindows controls when syncs can be run for apps in - this project - items: - description: SyncWindow contains the kind, time, duration and attributes - that are used to assign the syncWindows to apps - properties: - applications: - description: Applications contains a list of applications that - the window will apply to - items: - type: string - type: array - clusters: - description: Clusters contains a list of clusters that the window - will apply to - items: - type: string - type: array - duration: - description: Duration is the amount of time the sync window - will be open - type: string - kind: - description: Kind defines if the window allows or blocks syncs - type: string - manualSync: - description: ManualSync enables manual syncs when they would - otherwise be blocked - type: boolean - namespaces: - description: Namespaces contains a list of namespaces that the - window will apply to - items: - type: string - type: array - schedule: - description: Schedule is the time the window will begin, specified - in cron format - type: string - timeZone: - description: TimeZone of the sync that will be applied to the - schedule - type: string - type: object - type: array - type: object - status: - description: AppProjectStatus contains status information for AppProject - CRs - properties: - jwtTokensByRole: - additionalProperties: - description: JWTTokens represents a list of JWT tokens - properties: - items: - items: - description: JWTToken holds the issuedAt and expiresAt values - of a token - properties: - exp: - format: int64 - type: integer - iat: - format: int64 - type: integer - id: - type: string - required: - - iat - type: object - type: array - type: object - description: JWTTokensByRole contains a list of JWT tokens issued - for a given role - type: object - type: object - required: - - metadata - - spec - type: object - served: true - storage: true ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: application-controller - app.kubernetes.io/name: argocd-application-controller - app.kubernetes.io/part-of: argocd - name: argocd-application-controller ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/name: argocd-applicationset-controller - app.kubernetes.io/part-of: argocd-applicationset - name: argocd-applicationset-controller ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: dex-server - app.kubernetes.io/name: argocd-dex-server - app.kubernetes.io/part-of: argocd - name: argocd-dex-server ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: notifications-controller - app.kubernetes.io/name: argocd-notifications-controller - app.kubernetes.io/part-of: argocd - name: argocd-notifications-controller ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: redis - app.kubernetes.io/name: argocd-redis - app.kubernetes.io/part-of: argocd - name: argocd-redis ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: repo-server - app.kubernetes.io/name: argocd-repo-server - app.kubernetes.io/part-of: argocd - name: argocd-repo-server ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: server - app.kubernetes.io/name: argocd-server - app.kubernetes.io/part-of: argocd - name: argocd-server ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: application-controller - app.kubernetes.io/name: argocd-application-controller - app.kubernetes.io/part-of: argocd - name: argocd-application-controller -rules: -- apiGroups: - - "" - resources: - - secrets - - configmaps - verbs: - - get - - list - - watch -- apiGroups: - - argoproj.io - resources: - - applications - - appprojects - verbs: - - create - - get - - list - - watch - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/name: argocd-applicationset-controller - app.kubernetes.io/part-of: argocd-applicationset - name: argocd-applicationset-controller -rules: -- apiGroups: - - argoproj.io - resources: - - applications - - applicationsets - - applicationsets/finalizers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - argoproj.io - resources: - - appprojects - verbs: - - get -- apiGroups: - - argoproj.io - resources: - - applicationsets/status - verbs: - - get - - patch - - update -- apiGroups: - - "" - resources: - - events - verbs: - - create - - get - - list - - patch - - watch -- apiGroups: - - "" - resources: - - secrets - - configmaps - verbs: - - get - - list - - watch -- apiGroups: - - apps - - extensions - resources: - - deployments - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: dex-server - app.kubernetes.io/name: argocd-dex-server - app.kubernetes.io/part-of: argocd - name: argocd-dex-server -rules: -- apiGroups: - - "" - resources: - - secrets - - configmaps - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: argocd-notifications-controller -rules: -- apiGroups: - - argoproj.io - resources: - - applications - - appprojects - verbs: - - get - - list - - watch - - update - - patch -- apiGroups: - - "" - resources: - - configmaps - - secrets - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - argocd-notifications-cm - resources: - - configmaps - verbs: - - get -- apiGroups: - - "" - resourceNames: - - argocd-notifications-secret - resources: - - secrets - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: server - app.kubernetes.io/name: argocd-server - app.kubernetes.io/part-of: argocd - name: argocd-server -rules: -- apiGroups: - - "" - resources: - - secrets - - configmaps - verbs: - - create - - get - - list - - watch - - update - - patch - - delete -- apiGroups: - - argoproj.io - resources: - - applications - - appprojects - - applicationsets - verbs: - - create - - get - - list - - watch - - update - - delete - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - create - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/component: application-controller - app.kubernetes.io/name: argocd-application-controller - app.kubernetes.io/part-of: argocd - name: argocd-application-controller -rules: -- apiGroups: - - '*' - resources: - - '*' - verbs: - - '*' -- nonResourceURLs: - - '*' - verbs: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/component: server - app.kubernetes.io/name: argocd-server - app.kubernetes.io/part-of: argocd - name: argocd-server -rules: -- apiGroups: - - '*' - resources: - - '*' - verbs: - - delete - - get - - patch -- apiGroups: - - "" - resources: - - events - verbs: - - list -- apiGroups: - - "" - resources: - - pods - - pods/log - verbs: - - get -- apiGroups: - - argoproj.io - resources: - - applications - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: application-controller - app.kubernetes.io/name: argocd-application-controller - app.kubernetes.io/part-of: argocd - name: argocd-application-controller -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: argocd-application-controller -subjects: -- kind: ServiceAccount - name: argocd-application-controller ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/name: argocd-applicationset-controller - app.kubernetes.io/part-of: argocd-applicationset - name: argocd-applicationset-controller -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: argocd-applicationset-controller -subjects: -- kind: ServiceAccount - name: argocd-applicationset-controller ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: dex-server - app.kubernetes.io/name: argocd-dex-server - app.kubernetes.io/part-of: argocd - name: argocd-dex-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: argocd-dex-server -subjects: -- kind: ServiceAccount - name: argocd-dex-server ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: argocd-notifications-controller -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: argocd-notifications-controller -subjects: -- kind: ServiceAccount - name: argocd-notifications-controller ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: redis - app.kubernetes.io/name: argocd-redis - app.kubernetes.io/part-of: argocd - name: argocd-redis -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: argocd-redis -subjects: -- kind: ServiceAccount - name: argocd-redis ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: server - app.kubernetes.io/name: argocd-server - app.kubernetes.io/part-of: argocd - name: argocd-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: argocd-server -subjects: -- kind: ServiceAccount - name: argocd-server ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: application-controller - app.kubernetes.io/name: argocd-application-controller - app.kubernetes.io/part-of: argocd - name: argocd-application-controller -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: argocd-application-controller -subjects: -- kind: ServiceAccount - name: argocd-application-controller - namespace: argocd ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: server - app.kubernetes.io/name: argocd-server - app.kubernetes.io/part-of: argocd - name: argocd-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: argocd-server -subjects: -- kind: ServiceAccount - name: argocd-server - namespace: argocd ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/name: argocd-cm - app.kubernetes.io/part-of: argocd - name: argocd-cm ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/name: argocd-cmd-params-cm - app.kubernetes.io/part-of: argocd - name: argocd-cmd-params-cm ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/name: argocd-gpg-keys-cm - app.kubernetes.io/part-of: argocd - name: argocd-gpg-keys-cm ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: argocd-notifications-cm ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/name: argocd-rbac-cm - app.kubernetes.io/part-of: argocd - name: argocd-rbac-cm ---- -apiVersion: v1 -data: - ssh_known_hosts: |- - bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== - github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== - gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= - gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf - gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 - ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H - vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H - github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= - github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/name: argocd-ssh-known-hosts-cm - app.kubernetes.io/part-of: argocd - name: argocd-ssh-known-hosts-cm ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - app.kubernetes.io/name: argocd-tls-certs-cm - app.kubernetes.io/part-of: argocd - name: argocd-tls-certs-cm ---- -apiVersion: v1 -kind: Secret -metadata: - name: argocd-notifications-secret -type: Opaque ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - app.kubernetes.io/name: argocd-secret - app.kubernetes.io/part-of: argocd - name: argocd-secret -type: Opaque ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/name: argocd-applicationset-controller - app.kubernetes.io/part-of: argocd-applicationset - name: argocd-applicationset-controller -spec: - ports: - - name: webhook - port: 7000 - protocol: TCP - targetPort: webhook - - name: metrics - port: 8080 - protocol: TCP - targetPort: metrics - selector: - app.kubernetes.io/name: argocd-applicationset-controller ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: dex-server - app.kubernetes.io/name: argocd-dex-server - app.kubernetes.io/part-of: argocd - name: argocd-dex-server -spec: - ports: - - name: http - port: 5556 - protocol: TCP - targetPort: 5556 - - name: grpc - port: 5557 - protocol: TCP - targetPort: 5557 - - name: metrics - port: 5558 - protocol: TCP - targetPort: 5558 - selector: - app.kubernetes.io/name: argocd-dex-server ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: metrics - app.kubernetes.io/name: argocd-metrics - app.kubernetes.io/part-of: argocd - name: argocd-metrics -spec: - ports: - - name: metrics - port: 8082 - protocol: TCP - targetPort: 8082 - selector: - app.kubernetes.io/name: argocd-application-controller ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/name: argocd-notifications-controller-metrics - name: argocd-notifications-controller-metrics -spec: - ports: - - name: metrics - port: 9001 - protocol: TCP - targetPort: 9001 - selector: - app.kubernetes.io/name: argocd-notifications-controller ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: redis - app.kubernetes.io/name: argocd-redis - app.kubernetes.io/part-of: argocd - name: argocd-redis -spec: - ports: - - name: tcp-redis - port: 6379 - targetPort: 6379 - selector: - app.kubernetes.io/name: argocd-redis ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: repo-server - app.kubernetes.io/name: argocd-repo-server - app.kubernetes.io/part-of: argocd - name: argocd-repo-server -spec: - ports: - - name: server - port: 8081 - protocol: TCP - targetPort: 8081 - - name: metrics - port: 8084 - protocol: TCP - targetPort: 8084 - selector: - app.kubernetes.io/name: argocd-repo-server ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: server - app.kubernetes.io/name: argocd-server - app.kubernetes.io/part-of: argocd - name: argocd-server -spec: - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 8080 - - name: https - port: 443 - protocol: TCP - targetPort: 8080 - selector: - app.kubernetes.io/name: argocd-server ---- -apiVersion: v1 -kind: Service -metadata: - labels: - app.kubernetes.io/component: server - app.kubernetes.io/name: argocd-server-metrics - app.kubernetes.io/part-of: argocd - name: argocd-server-metrics -spec: - ports: - - name: metrics - port: 8083 - protocol: TCP - targetPort: 8083 - selector: - app.kubernetes.io/name: argocd-server ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/name: argocd-applicationset-controller - app.kubernetes.io/part-of: argocd-applicationset - name: argocd-applicationset-controller -spec: - selector: - matchLabels: - app.kubernetes.io/name: argocd-applicationset-controller - template: - metadata: - labels: - app.kubernetes.io/name: argocd-applicationset-controller - spec: - containers: - - command: - - entrypoint.sh - - argocd-applicationset-controller - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/argoproj/argocd:v2.5.3 - imagePullPolicy: Always - name: argocd-applicationset-controller - ports: - - containerPort: 7000 - name: webhook - - containerPort: 8080 - name: metrics - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /app/config/ssh - name: ssh-known-hosts - - mountPath: /app/config/tls - name: tls-certs - - mountPath: /app/config/gpg/source - name: gpg-keys - - mountPath: /app/config/gpg/keys - name: gpg-keyring - - mountPath: /tmp - name: tmp - serviceAccountName: argocd-applicationset-controller - volumes: - - configMap: - name: argocd-ssh-known-hosts-cm - name: ssh-known-hosts - - configMap: - name: argocd-tls-certs-cm - name: tls-certs - - configMap: - name: argocd-gpg-keys-cm - name: gpg-keys - - emptyDir: {} - name: gpg-keyring - - emptyDir: {} - name: tmp ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: dex-server - app.kubernetes.io/name: argocd-dex-server - app.kubernetes.io/part-of: argocd - name: argocd-dex-server -spec: - selector: - matchLabels: - app.kubernetes.io/name: argocd-dex-server - template: - metadata: - labels: - app.kubernetes.io/name: argocd-dex-server - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/part-of: argocd - topologyKey: kubernetes.io/hostname - weight: 5 - containers: - - command: - - /shared/argocd-dex - - rundex - env: - - name: ARGOCD_DEX_SERVER_DISABLE_TLS - valueFrom: - configMapKeyRef: - key: dexserver.disable.tls - name: argocd-cmd-params-cm - optional: true - image: ghcr.io/dexidp/dex:v2.35.3 - imagePullPolicy: Always - name: dex - ports: - - containerPort: 5556 - - containerPort: 5557 - - containerPort: 5558 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /shared - name: static-files - - mountPath: /tmp - name: dexconfig - - mountPath: /tls - name: argocd-dex-server-tls - initContainers: - - command: - - cp - - -n - - /usr/local/bin/argocd - - /shared/argocd-dex - image: quay.io/argoproj/argocd:v2.5.3 - imagePullPolicy: Always - name: copyutil - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /shared - name: static-files - - mountPath: /tmp - name: dexconfig - serviceAccountName: argocd-dex-server - volumes: - - emptyDir: {} - name: static-files - - emptyDir: {} - name: dexconfig - - name: argocd-dex-server-tls - secret: - items: - - key: tls.crt - path: tls.crt - - key: tls.key - path: tls.key - - key: ca.crt - path: ca.crt - optional: true - secretName: argocd-dex-server-tls ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: argocd-notifications-controller -spec: - selector: - matchLabels: - app.kubernetes.io/name: argocd-notifications-controller - strategy: - type: Recreate - template: - metadata: - labels: - app.kubernetes.io/name: argocd-notifications-controller - spec: - containers: - - command: - - argocd-notifications - image: quay.io/argoproj/argocd:v2.5.3 - imagePullPolicy: Always - livenessProbe: - tcpSocket: - port: 9001 - name: argocd-notifications-controller - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - volumeMounts: - - mountPath: /app/config/tls - name: tls-certs - - mountPath: /app/config/reposerver/tls - name: argocd-repo-server-tls - workingDir: /app - securityContext: - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: argocd-notifications-controller - volumes: - - configMap: - name: argocd-tls-certs-cm - name: tls-certs - - name: argocd-repo-server-tls - secret: - items: - - key: tls.crt - path: tls.crt - - key: tls.key - path: tls.key - - key: ca.crt - path: ca.crt - optional: true - secretName: argocd-repo-server-tls ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: redis - app.kubernetes.io/name: argocd-redis - app.kubernetes.io/part-of: argocd - name: argocd-redis -spec: - selector: - matchLabels: - app.kubernetes.io/name: argocd-redis - template: - metadata: - labels: - app.kubernetes.io/name: argocd-redis - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/name: argocd-redis - topologyKey: kubernetes.io/hostname - weight: 100 - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/part-of: argocd - topologyKey: kubernetes.io/hostname - weight: 5 - containers: - - args: - - --save - - "" - - --appendonly - - "no" - image: redis:7.0.5-alpine - imagePullPolicy: Always - name: redis - ports: - - containerPort: 6379 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - securityContext: - runAsNonRoot: true - runAsUser: 999 - seccompProfile: - type: RuntimeDefault - serviceAccountName: argocd-redis ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: repo-server - app.kubernetes.io/name: argocd-repo-server - app.kubernetes.io/part-of: argocd - name: argocd-repo-server -spec: - selector: - matchLabels: - app.kubernetes.io/name: argocd-repo-server - template: - metadata: - labels: - app.kubernetes.io/name: argocd-repo-server - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/name: argocd-repo-server - topologyKey: kubernetes.io/hostname - weight: 100 - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/part-of: argocd - topologyKey: kubernetes.io/hostname - weight: 5 - automountServiceAccountToken: false - containers: - - command: - - sh - - -c - - entrypoint.sh argocd-repo-server --redis argocd-redis:6379 - env: - - name: ARGOCD_RECONCILIATION_TIMEOUT - valueFrom: - configMapKeyRef: - key: timeout.reconciliation - name: argocd-cm - optional: true - - name: ARGOCD_REPO_SERVER_LOGFORMAT - valueFrom: - configMapKeyRef: - key: reposerver.log.format - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_SERVER_LOGLEVEL - valueFrom: - configMapKeyRef: - key: reposerver.log.level - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_SERVER_PARALLELISM_LIMIT - valueFrom: - configMapKeyRef: - key: reposerver.parallelism.limit - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_SERVER_DISABLE_TLS - valueFrom: - configMapKeyRef: - key: reposerver.disable.tls - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_TLS_MIN_VERSION - valueFrom: - configMapKeyRef: - key: reposerver.tls.minversion - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_TLS_MAX_VERSION - valueFrom: - configMapKeyRef: - key: reposerver.tls.maxversion - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_TLS_CIPHERS - valueFrom: - configMapKeyRef: - key: reposerver.tls.ciphers - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_CACHE_EXPIRATION - valueFrom: - configMapKeyRef: - key: reposerver.repo.cache.expiration - name: argocd-cmd-params-cm - optional: true - - name: REDIS_SERVER - valueFrom: - configMapKeyRef: - key: redis.server - name: argocd-cmd-params-cm - optional: true - - name: REDIS_COMPRESSION - valueFrom: - configMapKeyRef: - key: redis.compression - name: argocd-cmd-params-cm - optional: true - - name: REDISDB - valueFrom: - configMapKeyRef: - key: redis.db - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_DEFAULT_CACHE_EXPIRATION - valueFrom: - configMapKeyRef: - key: reposerver.default.cache.expiration - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_SERVER_OTLP_ADDRESS - valueFrom: - configMapKeyRef: - key: otlp.address - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_SERVER_MAX_COMBINED_DIRECTORY_MANIFESTS_SIZE - valueFrom: - configMapKeyRef: - key: reposerver.max.combined.directory.manifests.size - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_SERVER_PLUGIN_TAR_EXCLUSIONS - valueFrom: - configMapKeyRef: - key: reposerver.plugin.tar.exclusions - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_SERVER_ALLOW_OUT_OF_BOUNDS_SYMLINKS - valueFrom: - configMapKeyRef: - key: reposerver.allow.oob.symlinks - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_TAR_SIZE - valueFrom: - configMapKeyRef: - key: reposerver.streamed.manifest.max.tar.size - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_REPO_SERVER_STREAMED_MANIFEST_MAX_EXTRACTED_SIZE - valueFrom: - configMapKeyRef: - key: reposerver.streamed.manifest.max.extracted.size - name: argocd-cmd-params-cm - optional: true - - name: HELM_CACHE_HOME - value: /helm-working-dir - - name: HELM_CONFIG_HOME - value: /helm-working-dir - - name: HELM_DATA_HOME - value: /helm-working-dir - image: quay.io/argoproj/argocd:v2.5.3 - imagePullPolicy: Always - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz?full=true - port: 8084 - initialDelaySeconds: 30 - periodSeconds: 5 - name: argocd-repo-server - ports: - - containerPort: 8081 - - containerPort: 8084 - readinessProbe: - httpGet: - path: /healthz - port: 8084 - initialDelaySeconds: 5 - periodSeconds: 10 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /app/config/ssh - name: ssh-known-hosts - - mountPath: /app/config/tls - name: tls-certs - - mountPath: /app/config/gpg/source - name: gpg-keys - - mountPath: /app/config/gpg/keys - name: gpg-keyring - - mountPath: /app/config/reposerver/tls - name: argocd-repo-server-tls - - mountPath: /tmp - name: tmp - - mountPath: /helm-working-dir - name: helm-working-dir - - mountPath: /home/argocd/cmp-server/plugins - name: plugins - initContainers: - - command: - - cp - - -n - - /usr/local/bin/argocd - - /var/run/argocd/argocd-cmp-server - image: quay.io/argoproj/argocd:v2.5.3 - name: copyutil - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /var/run/argocd - name: var-files - serviceAccountName: argocd-repo-server - volumes: - - configMap: - name: argocd-ssh-known-hosts-cm - name: ssh-known-hosts - - configMap: - name: argocd-tls-certs-cm - name: tls-certs - - configMap: - name: argocd-gpg-keys-cm - name: gpg-keys - - emptyDir: {} - name: gpg-keyring - - emptyDir: {} - name: tmp - - emptyDir: {} - name: helm-working-dir - - name: argocd-repo-server-tls - secret: - items: - - key: tls.crt - path: tls.crt - - key: tls.key - path: tls.key - - key: ca.crt - path: ca.crt - optional: true - secretName: argocd-repo-server-tls - - emptyDir: {} - name: var-files - - emptyDir: {} - name: plugins ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: server - app.kubernetes.io/name: argocd-server - app.kubernetes.io/part-of: argocd - name: argocd-server -spec: - selector: - matchLabels: - app.kubernetes.io/name: argocd-server - template: - metadata: - labels: - app.kubernetes.io/name: argocd-server - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/name: argocd-server - topologyKey: kubernetes.io/hostname - weight: 100 - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/part-of: argocd - topologyKey: kubernetes.io/hostname - weight: 5 - containers: - - command: - - argocd-server - - --insecure - env: - - name: ARGOCD_SERVER_INSECURE - valueFrom: - configMapKeyRef: - key: server.insecure - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_BASEHREF - value: "/argocd/" - - name: ARGOCD_SERVER_ROOTPATH - valueFrom: - configMapKeyRef: - key: server.rootpath - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_LOGFORMAT - valueFrom: - configMapKeyRef: - key: server.log.format - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_LOG_LEVEL - valueFrom: - configMapKeyRef: - key: server.log.level - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_REPO_SERVER - valueFrom: - configMapKeyRef: - key: repo.server - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_DEX_SERVER - valueFrom: - configMapKeyRef: - key: server.dex.server - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_DISABLE_AUTH - valueFrom: - configMapKeyRef: - key: server.disable.auth - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_ENABLE_GZIP - valueFrom: - configMapKeyRef: - key: server.enable.gzip - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_REPO_SERVER_TIMEOUT_SECONDS - valueFrom: - configMapKeyRef: - key: server.repo.server.timeout.seconds - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_X_FRAME_OPTIONS - valueFrom: - configMapKeyRef: - key: server.x.frame.options - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_CONTENT_SECURITY_POLICY - valueFrom: - configMapKeyRef: - key: server.content.security.policy - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_REPO_SERVER_PLAINTEXT - valueFrom: - configMapKeyRef: - key: server.repo.server.plaintext - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_REPO_SERVER_STRICT_TLS - valueFrom: - configMapKeyRef: - key: server.repo.server.strict.tls - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_DEX_SERVER_PLAINTEXT - valueFrom: - configMapKeyRef: - key: server.dex.server.plaintext - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_DEX_SERVER_STRICT_TLS - valueFrom: - configMapKeyRef: - key: server.dex.server.strict.tls - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_TLS_MIN_VERSION - valueFrom: - configMapKeyRef: - key: server.tls.minversion - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_TLS_MAX_VERSION - valueFrom: - configMapKeyRef: - key: server.tls.maxversion - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_TLS_CIPHERS - valueFrom: - configMapKeyRef: - key: server.tls.ciphers - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_CONNECTION_STATUS_CACHE_EXPIRATION - valueFrom: - configMapKeyRef: - key: server.connection.status.cache.expiration - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_OIDC_CACHE_EXPIRATION - valueFrom: - configMapKeyRef: - key: server.oidc.cache.expiration - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_LOGIN_ATTEMPTS_EXPIRATION - valueFrom: - configMapKeyRef: - key: server.login.attempts.expiration - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_STATIC_ASSETS - valueFrom: - configMapKeyRef: - key: server.staticassets - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APP_STATE_CACHE_EXPIRATION - valueFrom: - configMapKeyRef: - key: server.app.state.cache.expiration - name: argocd-cmd-params-cm - optional: true - - name: REDIS_SERVER - valueFrom: - configMapKeyRef: - key: redis.server - name: argocd-cmd-params-cm - optional: true - - name: REDIS_COMPRESSION - valueFrom: - configMapKeyRef: - key: redis.compression - name: argocd-cmd-params-cm - optional: true - - name: REDISDB - valueFrom: - configMapKeyRef: - key: redis.db - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_DEFAULT_CACHE_EXPIRATION - valueFrom: - configMapKeyRef: - key: server.default.cache.expiration - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_MAX_COOKIE_NUMBER - valueFrom: - configMapKeyRef: - key: server.http.cookie.maxnumber - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_SERVER_OTLP_ADDRESS - valueFrom: - configMapKeyRef: - key: otlp.address - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_NAMESPACES - valueFrom: - configMapKeyRef: - key: application.namespaces - name: argocd-cmd-params-cm - optional: true - image: quay.io/argoproj/argocd:v2.5.3 - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz?full=true - port: 8080 - initialDelaySeconds: 3 - periodSeconds: 30 - name: argocd-server - ports: - - containerPort: 8080 - - containerPort: 8083 - readinessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 3 - periodSeconds: 30 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /app/config/ssh - name: ssh-known-hosts - - mountPath: /app/config/tls - name: tls-certs - - mountPath: /app/config/server/tls - name: argocd-repo-server-tls - - mountPath: /app/config/dex/tls - name: argocd-dex-server-tls - - mountPath: /home/argocd - name: plugins-home - - mountPath: /tmp - name: tmp - serviceAccountName: argocd-server - volumes: - - emptyDir: {} - name: plugins-home - - emptyDir: {} - name: tmp - - configMap: - name: argocd-ssh-known-hosts-cm - name: ssh-known-hosts - - configMap: - name: argocd-tls-certs-cm - name: tls-certs - - name: argocd-repo-server-tls - secret: - items: - - key: tls.crt - path: tls.crt - - key: tls.key - path: tls.key - - key: ca.crt - path: ca.crt - optional: true - secretName: argocd-repo-server-tls - - name: argocd-dex-server-tls - secret: - items: - - key: tls.crt - path: tls.crt - - key: ca.crt - path: ca.crt - optional: true - secretName: argocd-dex-server-tls ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - app.kubernetes.io/component: application-controller - app.kubernetes.io/name: argocd-application-controller - app.kubernetes.io/part-of: argocd - name: argocd-application-controller -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: argocd-application-controller - serviceName: argocd-application-controller - template: - metadata: - labels: - app.kubernetes.io/name: argocd-application-controller - spec: - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/name: argocd-application-controller - topologyKey: kubernetes.io/hostname - weight: 100 - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/part-of: argocd - topologyKey: kubernetes.io/hostname - weight: 5 - containers: - - command: - - argocd-application-controller - env: - - name: ARGOCD_CONTROLLER_REPLICAS - value: "1" - - name: ARGOCD_RECONCILIATION_TIMEOUT - valueFrom: - configMapKeyRef: - key: timeout.reconciliation - name: argocd-cm - optional: true - - name: ARGOCD_HARD_RECONCILIATION_TIMEOUT - valueFrom: - configMapKeyRef: - key: timeout.hard.reconciliation - name: argocd-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER - valueFrom: - configMapKeyRef: - key: repo.server - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_TIMEOUT_SECONDS - valueFrom: - configMapKeyRef: - key: controller.repo.server.timeout.seconds - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_STATUS_PROCESSORS - valueFrom: - configMapKeyRef: - key: controller.status.processors - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_OPERATION_PROCESSORS - valueFrom: - configMapKeyRef: - key: controller.operation.processors - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_LOGFORMAT - valueFrom: - configMapKeyRef: - key: controller.log.format - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_LOGLEVEL - valueFrom: - configMapKeyRef: - key: controller.log.level - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_METRICS_CACHE_EXPIRATION - valueFrom: - configMapKeyRef: - key: controller.metrics.cache.expiration - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_SELF_HEAL_TIMEOUT_SECONDS - valueFrom: - configMapKeyRef: - key: controller.self.heal.timeout.seconds - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_PLAINTEXT - valueFrom: - configMapKeyRef: - key: controller.repo.server.plaintext - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_REPO_SERVER_STRICT_TLS - valueFrom: - configMapKeyRef: - key: controller.repo.server.strict.tls - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_PERSIST_RESOURCE_HEALTH - valueFrom: - configMapKeyRef: - key: controller.resource.health.persist - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APP_STATE_CACHE_EXPIRATION - valueFrom: - configMapKeyRef: - key: controller.app.state.cache.expiration - name: argocd-cmd-params-cm - optional: true - - name: REDIS_SERVER - valueFrom: - configMapKeyRef: - key: redis.server - name: argocd-cmd-params-cm - optional: true - - name: REDIS_COMPRESSION - valueFrom: - configMapKeyRef: - key: redis.compression - name: argocd-cmd-params-cm - optional: true - - name: REDISDB - valueFrom: - configMapKeyRef: - key: redis.db - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_DEFAULT_CACHE_EXPIRATION - valueFrom: - configMapKeyRef: - key: controller.default.cache.expiration - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_CONTROLLER_OTLP_ADDRESS - valueFrom: - configMapKeyRef: - key: otlp.address - name: argocd-cmd-params-cm - optional: true - - name: ARGOCD_APPLICATION_NAMESPACES - valueFrom: - configMapKeyRef: - key: application.namespaces - name: argocd-cmd-params-cm - optional: true - image: quay.io/argoproj/argocd:v2.5.3 - imagePullPolicy: Always - name: argocd-application-controller - ports: - - containerPort: 8082 - readinessProbe: - httpGet: - path: /healthz - port: 8082 - initialDelaySeconds: 5 - periodSeconds: 10 - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /app/config/controller/tls - name: argocd-repo-server-tls - - mountPath: /home/argocd - name: argocd-home - workingDir: /home/argocd - serviceAccountName: argocd-application-controller - volumes: - - emptyDir: {} - name: argocd-home - - name: argocd-repo-server-tls - secret: - items: - - key: tls.crt - path: tls.crt - - key: tls.key - path: tls.key - - key: ca.crt - path: ca.crt - optional: true - secretName: argocd-repo-server-tls ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: argocd-application-controller-network-policy -spec: - ingress: - - from: - - namespaceSelector: {} - ports: - - port: 8082 - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-application-controller - policyTypes: - - Ingress ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: argocd-applicationset-controller-network-policy -spec: - ingress: - - from: - - namespaceSelector: {} - ports: - - port: 7000 - protocol: TCP - - port: 8080 - protocol: TCP - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-applicationset-controller - policyTypes: - - Ingress ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: argocd-dex-server-network-policy -spec: - ingress: - - from: - - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-server - ports: - - port: 5556 - protocol: TCP - - port: 5557 - protocol: TCP - - from: - - namespaceSelector: {} - ports: - - port: 5558 - protocol: TCP - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-dex-server - policyTypes: - - Ingress ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: argocd-notifications-controller-network-policy -spec: - ingress: - - from: - - namespaceSelector: {} - ports: - - port: 9001 - protocol: TCP - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-notifications-controller - policyTypes: - - Ingress ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: argocd-redis-network-policy -spec: - egress: - - ports: - - port: 53 - protocol: UDP - - port: 53 - protocol: TCP - ingress: - - from: - - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-server - - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-repo-server - - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-application-controller - ports: - - port: 6379 - protocol: TCP - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-redis - policyTypes: - - Ingress - - Egress ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: argocd-repo-server-network-policy -spec: - ingress: - - from: - - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-server - - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-application-controller - - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-notifications-controller - ports: - - port: 8081 - protocol: TCP - - from: - - namespaceSelector: {} - ports: - - port: 8084 - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-repo-server - policyTypes: - - Ingress ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: argocd-server-network-policy -spec: - ingress: - - {} - podSelector: - matchLabels: - app.kubernetes.io/name: argocd-server - policyTypes: - - Ingress \ No newline at end of file diff --git a/kube/services/argocd/values.yaml b/kube/services/argocd/values.yaml new file mode 100644 index 000000000..4d799c055 --- /dev/null +++ b/kube/services/argocd/values.yaml @@ -0,0 +1,2894 @@ +## Argo CD configuration +## Ref: https://github.com/argoproj/argo-cd +## + +# -- Provide a name in place of `argocd` +nameOverride: argocd +# -- String to fully override `"argo-cd.fullname"` +fullnameOverride: "" +# -- Override the Kubernetes version, which is used to evaluate certain manifests +kubeVersionOverride: "" +# Override APIVersions +# If you want to template helm charts but cannot access k8s API server +# you can set api versions here +apiVersionOverrides: + # -- String to override apiVersion of cert-manager resources rendered by this helm chart + certmanager: "" # cert-manager.io/v1 + # -- String to override apiVersion of GKE resources rendered by this helm chart + cloudgoogle: "" # cloud.google.com/v1 + # -- String to override apiVersion of autoscaling rendered by this helm chart + autoscaling: "" # autoscaling/v2 + +# -- Create clusterroles that extend existing clusterroles to interact with argo-cd crds +## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles +createAggregateRoles: false + +openshift: + # -- enables using arbitrary uid for argo repo server + enabled: false + +## Custom resource configuration +crds: + # -- Install and upgrade CRDs + install: true + # -- Keep CRDs on chart uninstall + keep: true + # -- Annotations to be added to all CRDs + annotations: {} + +## Globally shared configuration +global: + # -- Common labels for the all resources + additionalLabels: {} + # app: argo-cd + + # -- Number of old deployment ReplicaSets to retain. The rest will be garbage collected. + revisionHistoryLimit: 3 + + # Default image used by all components + image: + # -- If defined, a repository applied to all Argo CD deployments + repository: quay.io/argoproj/argocd + # -- Overrides the global Argo CD image tag whose default is the chart appVersion + tag: "" + # -- If defined, a imagePullPolicy applied to all Argo CD deployments + imagePullPolicy: IfNotPresent + + # -- Secrets with credentials to pull images from a private registry + imagePullSecrets: [] + + # Default logging options used by all components + logging: + # -- Set the global logging format. Either: `text` or `json` + format: text + # -- Set the global logging level. One of: `debug`, `info`, `warn` or `error` + level: info + + # -- Annotations for the all deployed Statefulsets + statefulsetAnnotations: {} + + # -- Annotations for the all deployed Deployments + deploymentAnnotations: {} + + # -- Annotations for the all deployed pods + podAnnotations: {} + + # -- Labels for the all deployed pods + podLabels: {} + + # -- Toggle and define pod-level security context. + # @default -- `{}` (See [values.yaml]) + securityContext: {} + # runAsUser: 999 + # runAsGroup: 999 + # fsGroup: 999 + + # -- Mapping between IP and hostnames that will be injected as entries in the pod's hosts files + hostAliases: [] + # - ip: 10.20.30.40 + # hostnames: + # - git.myhostname + + networkPolicy: + # -- Create NetworkPolicy objects for all components + create: false + # -- Default deny all ingress traffic + defaultDenyIngress: false + +## Argo Configs +configs: + # General Argo CD configuration + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cm.yaml + cm: + # -- Create the argocd-cm configmap for [declarative setup] + create: true + + # -- Annotations to be added to argocd-cm configmap + annotations: {} + + # -- Argo CD's externally facing base URL (optional). Required when configuring SSO + url: "" + + # -- The name of tracking label used by Argo CD for resource pruning + # @default -- Defaults to app.kubernetes.io/instance + application.instanceLabelKey: argocd.argoproj.io/instance + + # -- Enable logs RBAC enforcement + ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/upgrading/2.3-2.4/#enable-logs-rbac-enforcement + server.rbac.log.enforce.enable: false + + # -- Enable exec feature in Argo UI + ## Ref: https://argo-cd.readthedocs.io/en/latest/operator-manual/rbac/#exec-resource + exec.enabled: false + + # -- Enable local admin user + ## Ref: https://argo-cd.readthedocs.io/en/latest/faq/#how-to-disable-admin-user + admin.enabled: true + + # -- Timeout to discover if a new manifests version got published to the repository + timeout.reconciliation: 180s + + # -- Timeout to refresh application data as well as target manifests cache + timeout.hard.reconciliation: 0s + + # Dex configuration + # dex.config: | + # connectors: + # # GitHub example + # - type: github + # id: github + # name: GitHub + # config: + # clientID: aabbccddeeff00112233 + # clientSecret: $dex.github.clientSecret # Alternatively $:dex.github.clientSecret + # orgs: + # - name: your-github-org + + # OIDC configuration as an alternative to dex (optional). + # oidc.config: | + # name: AzureAD + # issuer: https://login.microsoftonline.com/TENANT_ID/v2.0 + # clientID: CLIENT_ID + # clientSecret: $oidc.azuread.clientSecret + # rootCA: | + # -----BEGIN CERTIFICATE----- + # ... encoded certificate data here ... + # -----END CERTIFICATE----- + # requestedIDTokenClaims: + # groups: + # essential: true + # requestedScopes: + # - openid + # - profile + # - email + + # Argo CD configuration parameters + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/argocd-cmd-params-cm.yaml + params: + # -- Annotations to be added to the argocd-cmd-params-cm ConfigMap + annotations: {} + + ## Generic parameters + # -- Open-Telemetry collector address: (e.g. "otel-collector:4317") + otlp.address: '' + + ## Controller Properties + # -- Number of application status processors + controller.status.processors: 20 + # -- Number of application operation processors + controller.operation.processors: 10 + # -- Specifies timeout between application self heal attempts + controller.self.heal.timeout.seconds: 5 + # -- Repo server RPC call timeout seconds. + controller.repo.server.timeout.seconds: 60 + + ## Server properties + # -- Run server without TLS + server.insecure: false + # -- Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from / + server.basehref: "/argocd/" + # -- Used if Argo CD is running behind reverse proxy under subpath different from / + server.rootpath: "" + # -- Directory path that contains additional static assets + server.staticassets: /shared/app + # -- Disable Argo CD RBAC for user authentication + server.disable.auth: false + # -- Enable GZIP compression + server.enable.gzip: false + # -- Set X-Frame-Options header in HTTP responses to value. To disable, set to "". + server.x.frame.options: sameorigin + + ## Repo-server properties + # -- Limit on number of concurrent manifests generate requests. Any value less the 1 means no limit. + reposerver.parallelism.limit: 0 + + # Argo CD RBAC policy configuration + ## Ref: https://github.com/argoproj/argo-cd/blob/master/docs/operator-manual/rbac.md + rbac: + # -- Create the argocd-rbac-cm configmap with ([Argo CD RBAC policy]) definitions. + # If false, it is expected the configmap will be created by something else. + # Argo CD will not work if there is no configmap created with the name above. + create: true + + # -- Annotations to be added to argocd-rbac-cm configmap + annotations: {} + + # -- The name of the default role which Argo CD will falls back to, when authorizing API requests (optional). + # If omitted or empty, users may be still be able to login, but will see no apps, projects, etc... + policy.default: '' + + # -- File containing user-defined policies and role definitions. + # @default -- `''` (See [values.yaml]) + policy.csv: '' + # Policy rules are in the form: + # p, subject, resource, action, object, effect + # Role definitions and bindings are in the form: + # g, subject, inherited-subject + # policy.csv | + # p, role:org-admin, applications, *, */*, allow + # p, role:org-admin, clusters, get, *, allow + # p, role:org-admin, repositories, *, *, allow + # p, role:org-admin, logs, get, *, allow + # p, role:org-admin, exec, create, */*, allow + # g, your-github-org:your-team, role:org-admin + + # -- OIDC scopes to examine during rbac enforcement (in addition to `sub` scope). + # The scope value can be a string, or a list of strings. + scopes: "[groups]" + + # GnuPG public keys for commit verification + ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/gpg-verification/ + gpg: + # -- Annotations to be added to argocd-gpg-keys-cm configmap + annotations: {} + + # -- [GnuPG] public keys to add to the keyring + # @default -- `{}` (See [values.yaml]) + ## Note: Public keys should be exported with `gpg --export --armor ` + keys: {} + # 4AEE18F83AFDEB23: | + # -----BEGIN PGP PUBLIC KEY BLOCK----- + # ... + # -----END PGP PUBLIC KEY BLOCK----- + + + # -- Provide one or multiple [external cluster credentials] + # @default -- `[]` (See [values.yaml]) + ## Ref: + ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#clusters + ## - https://argo-cd.readthedocs.io/en/stable/operator-manual/security/#external-cluster-credentials + clusterCredentials: [] + # - name: mycluster + # server: https://mycluster.com + # labels: {} + # annotations: {} + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + # - name: mycluster2 + # server: https://mycluster2.com + # labels: {} + # annotations: {} + # namespaces: namespace1,namespace2 + # clusterResources: true + # config: + # bearerToken: "" + # tlsClientConfig: + # insecure: false + # caData: "" + + # -- Known Hosts configmap annotations + knownHostsAnnotations: {} + knownHosts: + data: + # -- Known Hosts + # @default -- See [values.yaml] + ssh_known_hosts: | + bitbucket.org ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAubiN81eDcafrgMeLzaFPsw2kNvEcqTKl/VqLat/MaB33pZy0y3rJZtnqwR2qOOvbwKZYKiEO1O6VqNEBxKvJJelCq0dTXWT5pbO2gDXC6h6QDXCaHo6pOHGPUy+YBaGQRGuSusMEASYiWunYN0vCAI8QaXnWMXNMdFP3jHAJH0eDsoiGnLPBlBp4TNm6rYI74nMzgz3B9IikW4WVK+dc8KZJZWYjAuORU3jc1c/NPskD2ASinf8v3xnfXeukU0sJ5N6m5E8VLjObPEO+mN2t/FZTMZLiFqPWc/ALSqnMnnhwrNi2rbfg/rd/IpL8Le3pSBne8+seeFVBoGqzHM9yXw== + github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg= + github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl + github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== + gitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY= + gitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf + gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9 + ssh.dev.azure.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + vs-ssh.visualstudio.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7Hr1oTWqNqOlzGJOfGJ4NakVyIzf1rXYd4d7wo6jBlkLvCA4odBlL0mDUyZ0/QUfTTqeu+tm22gOsv+VrVTMk6vwRU75gY/y9ut5Mb3bR5BV58dKXyq9A9UeB5Cakehn5Zgm6x1mKoVyf+FFn26iYqXJRgzIZZcZ5V6hrE0Qg39kZm4az48o0AUbf6Sp4SLdvnuMa2sVNwHBboS7EJkm57XQPVU3/QpyNLHbWDdzwtrlS+ez30S3AdYhLKEOxAG8weOnyrtLJAUen9mTkol8oII1edf7mWWbWVf0nBmly21+nZcmCTISQBtdcyPaEno7fFQMDD26/s0lfKob4Kw8H + # -- TLS certificate configmap annotations + tlsCertsAnnotations: {} + # -- TLS certificate + # @default -- See [values.yaml] + tlsCerts: + {} + # data: + # argocd.example.com: | + # -----BEGIN CERTIFICATE----- + # MIIF1zCCA7+gAwIBAgIUQdTcSHY2Sxd3Tq/v1eIEZPCNbOowDQYJKoZIhvcNAQEL + # BQAwezELMAkGA1UEBhMCREUxFTATBgNVBAgMDExvd2VyIFNheG9ueTEQMA4GA1UE + # BwwHSGFub3ZlcjEVMBMGA1UECgwMVGVzdGluZyBDb3JwMRIwEAYDVQQLDAlUZXN0 + # c3VpdGUxGDAWBgNVBAMMD2Jhci5leGFtcGxlLmNvbTAeFw0xOTA3MDgxMzU2MTda + # Fw0yMDA3MDcxMzU2MTdaMHsxCzAJBgNVBAYTAkRFMRUwEwYDVQQIDAxMb3dlciBT + # YXhvbnkxEDAOBgNVBAcMB0hhbm92ZXIxFTATBgNVBAoMDFRlc3RpbmcgQ29ycDES + # MBAGA1UECwwJVGVzdHN1aXRlMRgwFgYDVQQDDA9iYXIuZXhhbXBsZS5jb20wggIi + # MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCv4mHMdVUcafmaSHVpUM0zZWp5 + # NFXfboxA4inuOkE8kZlbGSe7wiG9WqLirdr39Ts+WSAFA6oANvbzlu3JrEQ2CHPc + # CNQm6diPREFwcDPFCe/eMawbwkQAPVSHPts0UoRxnpZox5pn69ghncBR+jtvx+/u + # P6HdwW0qqTvfJnfAF1hBJ4oIk2AXiip5kkIznsAh9W6WRy6nTVCeetmIepDOGe0G + # ZJIRn/OfSz7NzKylfDCat2z3EAutyeT/5oXZoWOmGg/8T7pn/pR588GoYYKRQnp+ + # YilqCPFX+az09EqqK/iHXnkdZ/Z2fCuU+9M/Zhrnlwlygl3RuVBI6xhm/ZsXtL2E + # Gxa61lNy6pyx5+hSxHEFEJshXLtioRd702VdLKxEOuYSXKeJDs1x9o6cJ75S6hko + # Ml1L4zCU+xEsMcvb1iQ2n7PZdacqhkFRUVVVmJ56th8aYyX7KNX6M9CD+kMpNm6J + # kKC1li/Iy+RI138bAvaFplajMF551kt44dSvIoJIbTr1LigudzWPqk31QaZXV/4u + # kD1n4p/XMc9HYU/was/CmQBFqmIZedTLTtK7clkuFN6wbwzdo1wmUNgnySQuMacO + # gxhHxxzRWxd24uLyk9Px+9U3BfVPaRLiOPaPoC58lyVOykjSgfpgbus7JS69fCq7 + # bEH4Jatp/10zkco+UQIDAQABo1MwUTAdBgNVHQ4EFgQUjXH6PHi92y4C4hQpey86 + # r6+x1ewwHwYDVR0jBBgwFoAUjXH6PHi92y4C4hQpey86r6+x1ewwDwYDVR0TAQH/ + # BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAFE4SdKsX9UsLy+Z0xuHSxhTd0jfn + # Iih5mtzb8CDNO5oTw4z0aMeAvpsUvjJ/XjgxnkiRACXh7K9hsG2r+ageRWGevyvx + # CaRXFbherV1kTnZw4Y9/pgZTYVWs9jlqFOppz5sStkfjsDQ5lmPJGDii/StENAz2 + # XmtiPOgfG9Upb0GAJBCuKnrU9bIcT4L20gd2F4Y14ccyjlf8UiUi192IX6yM9OjT + # +TuXwZgqnTOq6piVgr+FTSa24qSvaXb5z/mJDLlk23npecTouLg83TNSn3R6fYQr + # d/Y9eXuUJ8U7/qTh2Ulz071AO9KzPOmleYPTx4Xty4xAtWi1QE5NHW9/Ajlv5OtO + # OnMNWIs7ssDJBsB7VFC8hcwf79jz7kC0xmQqDfw51Xhhk04kla+v+HZcFW2AO9so + # 6ZdVHHQnIbJa7yQJKZ+hK49IOoBR6JgdB5kymoplLLiuqZSYTcwSBZ72FYTm3iAr + # jzvt1hxpxVDmXvRnkhRrIRhK4QgJL0jRmirBjDY+PYYd7bdRIjN7WNZLFsgplnS8 + # 9w6CwG32pRlm0c8kkiQ7FXA6BYCqOsDI8f1VGQv331OpR2Ck+FTv+L7DAmg6l37W + # +LB9LGh4OAp68ImTjqf6ioGKG0RBSznwME+r4nXtT1S/qLR6ASWUS4ViWRhbRlNK + # XWyb96wrUlv+E8I= + # -----END CERTIFICATE----- + + # -- Repository credentials to be used as Templates for other repos + ## Creates a secret for each key/value specified below to create repository credentials + credentialTemplates: {} + # github-enterprise-creds-1: + # url: https://github.com/argoproj + # githubAppID: 1 + # githubAppInstallationID: 2 + # githubAppEnterpriseBaseUrl: https://ghe.example.com/api/v3 + # githubAppPrivateKey: | + # -----BEGIN OPENSSH PRIVATE KEY----- + # ... + # -----END OPENSSH PRIVATE KEY----- + # https-creds: + # url: https://github.com/argoproj + # password: my-password + # username: my-username + # ssh-creds: + # url: git@github.com:argoproj-labs + # sshPrivateKey: | + # -----BEGIN OPENSSH PRIVATE KEY----- + # ... + # -----END OPENSSH PRIVATE KEY----- + + # -- Annotations to be added to `configs.credentialTemplates` Secret + credentialTemplatesAnnotations: {} + + # -- Repositories list to be used by applications + ## Creates a secret for each key/value specified below to create repositories + ## Note: the last example in the list would use a repository credential template, configured under "configs.repositoryCredentials". + repositories: {} + # istio-helm-repo: + # url: https://storage.googleapis.com/istio-prerelease/daily-build/master-latest-daily/charts + # name: istio.io + # type: helm + # private-helm-repo: + # url: https://my-private-chart-repo.internal + # name: private-repo + # type: helm + # password: my-password + # username: my-username + # private-repo: + # url: https://github.com/argoproj/private-repo + + # -- Annotations to be added to `configs.repositories` Secret + repositoriesAnnotations: {} + + # Argo CD sensitive data + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets + secret: + # -- Create the argocd-secret + createSecret: true + # -- Annotations to be added to argocd-secret + annotations: {} + + # -- Shared secret for authenticating GitHub webhook events + githubSecret: "" + # -- Shared secret for authenticating GitLab webhook events + gitlabSecret: "" + # -- Shared secret for authenticating BitbucketServer webhook events + bitbucketServerSecret: "" + # -- UUID for authenticating Bitbucket webhook events + bitbucketUUID: "" + # -- Shared secret for authenticating Gogs webhook events + gogsSecret: "" + + # -- add additional secrets to be added to argocd-secret + ## Custom secrets. Useful for injecting SSO secrets into environment variables. + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/user-management/#sensitive-data-and-sso-client-secrets + ## Note that all values must be non-empty. + extra: + {} + # LDAP_PASSWORD: "mypassword" + + # -- Argo TLS Data + # DEPRECATED - Use server.certificate or server.certificateSecret + # argocdServerTlsConfig: + # key: '' + # crt: '' + + # -- Bcrypt hashed admin password + ## Argo expects the password in the secret to be bcrypt hashed. You can create this hash with + ## `htpasswd -nbBC 10 "" $ARGO_PWD | tr -d ':\n' | sed 's/$2y/$2a/'` + argocdServerAdminPassword: "" + # -- Admin password modification time. Eg. `"2006-01-02T15:04:05Z"` + # @default -- `""` (defaults to current time) + argocdServerAdminPasswordMtime: "" + + # -- Define custom [CSS styles] for your argo instance. + # This setting will automatically mount the provided CSS and reference it in the argo configuration. + # @default -- `""` (See [values.yaml]) + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/custom-styles/ + styles: "" + # styles: | + # .nav-bar { + # background: linear-gradient(to bottom, #999, #777, #333, #222, #111); + # } + +# -- Array of extra K8s manifests to deploy +extraObjects: [] + # - apiVersion: secrets-store.csi.x-k8s.io/v1 + # kind: SecretProviderClass + # metadata: + # name: argocd-secrets-store + # spec: + # provider: aws + # parameters: + # objects: | + # - objectName: "argocd" + # objectType: "secretsmanager" + # jmesPath: + # - path: "client_id" + # objectAlias: "client_id" + # - path: "client_secret" + # objectAlias: "client_secret" + # secretObjects: + # - data: + # - key: client_id + # objectName: client_id + # - key: client_secret + # objectName: client_secret + # secretName: argocd-secrets-store + # type: Opaque + # labels: + # app.kubernetes.io/part-of: argocd + +## Application controller +controller: + # -- Application controller name string + name: application-controller + + # -- The number of application controller pods to run. + # Additional replicas will cause sharding of managed clusters across number of replicas. + replicas: 1 + + ## Application controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the application controller + enabled: false + # -- Labels to be added to application controller pdb + labels: {} + # -- Annotations to be added to application controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `controller.pdb.minAvailable` + maxUnavailable: "" + + ## Application controller image + image: + # -- Repository to use for the application controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the application controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the application controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- DEPRECATED - Application controller commandline flags + args: {} + # DEPRECATED - Use configs.params to override + # # -- define the application controller `--status-processors` + # statusProcessors: "20" + # # -- define the application controller `--operation-processors` + # operationProcessors: "10" + # # -- define the application controller `--app-hard-resync` + # appHardResyncPeriod: "0" + # # -- define the application controller `--app-resync` + # appResyncPeriod: "180" + # # -- define the application controller `--self-heal-timeout-seconds` + # selfHealTimeout: "5" + # # -- define the application controller `--repo-server-timeout-seconds` + # repoServerTimeoutSeconds: "60" + + # -- Additional command line arguments to pass to application controller + extraArgs: [] + + # -- Environment variables to pass to application controller + env: [] + + # -- envFrom to pass to application controller + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the application controller pod + extraContainers: [] + + # -- Init containers to add to the application controller pod + ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin + ## you could use this (and the same in the server pod) to provide such executable + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins + initContainers: [] + # - name: download-tools + # image: alpine:3 + # command: [sh, -c] + # args: + # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip && + # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/ + # volumeMounts: + # - mountPath: /custom-tools + # name: custom-tools + + # -- Additional volumeMounts to the application controller main container + volumeMounts: [] + # - mountPath: /usr/local/bin/kubelogin + # name: custom-tools + # subPath: kubelogin + + # -- Additional volumes to the application controller pod + volumes: [] + # - name: custom-tools + # emptyDir: {} + + # -- Annotations for the application controller StatefulSet + statefulsetAnnotations: {} + + # -- Annotations to be added to application controller pods + podAnnotations: {} + + # -- Labels to be added to application controller pods + podLabels: {} + + # -- Resource limits and requests for the application controller pods + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 250m + # memory: 256Mi + + # -- Application controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- Application controller listening port + containerPort: 8082 + + # Rediness probe for application controller + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + # -- [Node selector] + nodeSelector: {} + + # -- [Tolerations] for use with node taints + tolerations: [] + + # -- Assign custom [affinity] rules to the deployment + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the application controller + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Priority class for the application controller pods + priorityClassName: "" + + serviceAccount: + # -- Create a service account for the application controller + create: true + # -- Service account name + name: argocd-application-controller + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + ## Application controller metrics configuration + metrics: + # -- Deploy metrics service + enabled: false + applicationLabels: + # -- Enables additional labels in argocd_app_labels metric + enabled: false + # -- Additional labels + labels: [] + service: + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8082 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + rules: + # -- Deploy a PrometheusRule for the application controller + enabled: false + # -- PrometheusRule.Spec for the application controller + spec: [] + # - alert: ArgoAppMissing + # expr: | + # absent(argocd_app_info) == 1 + # for: 15m + # labels: + # severity: critical + # annotations: + # summary: "[Argo CD] No reported applications" + # description: > + # Argo CD has not reported any applications data for the past 15 minutes which + # means that it must be down or not functioning properly. This needs to be + # resolved for this cloud to continue to maintain state. + # - alert: ArgoAppNotSynced + # expr: | + # argocd_app_info{sync_status!="Synced"} == 1 + # for: 12h + # labels: + # severity: warning + # annotations: + # summary: "[{{`{{$labels.name}}`}}] Application not synchronized" + # description: > + # The application [{{`{{$labels.name}}`}} has not been synchronized for over + # 12 hours which means that the state of this cloud has drifted away from the + # state inside Git. + # selector: + # prometheus: kube-prometheus + # namespace: monitoring + # additionalLabels: {} + # annotations: {} + + ## Enable if you would like to grant rights to Argo CD to deploy to the local Kubernetes cluster. + clusterAdminAccess: + # -- Enable RBAC for local cluster deployments + enabled: true + + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- Enable custom rules for the application controller's ClusterRole resource + enabled: false + # -- List of custom rules for the application controller's ClusterRole resource + rules: [] + +## Dex +dex: + # -- Enable dex + enabled: false + # -- Dex name + name: dex-server + + # -- Additional command line arguments to pass to the Dex server + extraArgs: [] + + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## Dex Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Dex server + enabled: false + # -- Labels to be added to Dex server pdb + labels: {} + # -- Annotations to be added to Dex server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `dex.pdb.minAvailable` + maxUnavailable: "" + + ## Dex image + image: + # -- Dex image repository + repository: ghcr.io/dexidp/dex + # -- Dex image tag + tag: v2.35.3 + # -- Dex imagePullPolicy + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # Argo CD init image that creates Dex config + initImage: + # -- Argo CD init image repository + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Argo CD init image tag + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Argo CD init image imagePullPolicy + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Environment variables to pass to the Dex server + env: [] + + # -- envFrom to pass to the Dex server + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the dex pod + extraContainers: [] + + # -- Init containers to add to the dex pod + initContainers: [] + + # -- Additional volumeMounts to the dex main container + volumeMounts: [] + + # -- Additional volumes to the dex pod + volumes: [] + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-dex-server + ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart Dex automatically without extra controllers. + certificateSecret: + # -- Create argocd-dex-server-tls secret + enabled: false + # -- Labels to be added to argocd-dex-server-tls secret + labels: {} + # -- Annotations to be added to argocd-dex-server-tls secret + annotations: {} + # -- Certificate authority. Required for self-signed certificates. + ca: '' + # -- Certificate private key + key: '' + # -- Certificate data. Must contain SANs of Dex service (ie: argocd-dex-server, argocd-dex-server.argo-cd.svc) + crt: '' + + # -- Annotations to be added to the Dex server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to the Dex server pods + podAnnotations: {} + + # -- Labels to be added to the Dex server pods + podLabels: {} + + # -- Resource limits and requests for dex + resources: {} + # limits: + # cpu: 50m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # -- Dex container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for Dex server + ## Supported from Dex >= 2.28.0 + livenessProbe: + # -- Enable Kubernetes liveness probe for Dex >= 2.28.0 + enabled: false + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + readinessProbe: + # -- Enable Kubernetes readiness probe for Dex >= 2.28.0 + enabled: false + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + serviceAccount: + # -- Create dex service account + create: true + # -- Dex service account name + name: argocd-dex-server + # -- Annotations applied to created service account + annotations: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # -- Container port for HTTP access + containerPortHttp: 5556 + # -- Service port for HTTP access + servicePortHttp: 5556 + # -- Service port name for HTTP access + servicePortHttpName: http + # -- Container port for gRPC access + containerPortGrpc: 5557 + # -- Service port for gRPC access + servicePortGrpc: 5557 + # -- Service port name for gRPC access + servicePortGrpcName: grpc + # -- Container port for metrics access + containerPortMetrics: 5558 + # -- Service port for metrics access + servicePortMetrics: 5558 + + # -- [Node selector] + nodeSelector: {} + # -- [Tolerations] for use with node taints + tolerations: [] + # -- Assign custom [affinity] rules to the deployment + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to dex + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Priority class for dex + priorityClassName: "" + +## Redis +redis: + # -- Enable redis + enabled: true + # -- Redis name + name: redis + + ## Redis Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Redis + enabled: false + # -- Labels to be added to Redis pdb + labels: {} + # -- Annotations to be added to Redis pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `redis.pdb.minAvailable` + maxUnavailable: "" + + ## Redis image + image: + # -- Redis repository + repository: public.ecr.aws/docker/library/redis + # -- Redis tag + tag: 7.0.5-alpine + # -- Redis imagePullPolicy + imagePullPolicy: IfNotPresent + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Additional command line arguments to pass to redis-server + extraArgs: [] + # - --bind + # - "0.0.0.0" + + # -- Environment variables to pass to the Redis server + env: [] + + # -- envFrom to pass to the Redis server + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the redis pod + extraContainers: [] + + # -- Init containers to add to the redis pod + initContainers: [] + + # -- Additional volumeMounts to the redis container + volumeMounts: [] + + # -- Additional volumes to the redis pod + volumes: [] + + # -- Annotations to be added to the Redis server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to the Redis server pods + podAnnotations: {} + + # -- Labels to be added to the Redis server pods + podLabels: {} + + # -- Resource limits and requests for redis + resources: {} + # limits: + # cpu: 200m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 64Mi + + # -- Redis pod-level security context + # @default -- See [values.yaml] + securityContext: + runAsNonRoot: true + runAsUser: 999 + seccompProfile: + type: RuntimeDefault + + # -- Redis container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + + # -- Redis container port + containerPort: 6379 + # -- Redis service port + servicePort: 6379 + + # -- [Node selector] + nodeSelector: {} + + # -- [Tolerations] for use with node taints + tolerations: [] + + # -- Assign custom [affinity] rules to the deployment + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to redis + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Priority class for redis + priorityClassName: "" + + serviceAccount: + # -- Create a service account for the redis pod + create: false + # -- Service account name for redis pod + name: "" + # -- Annotations applied to created service account + annotations: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: false + + service: + # -- Redis service annotations + annotations: {} + # -- Additional redis service labels + labels: {} + + metrics: + # -- Deploy metrics service and redis-exporter sidecar + enabled: false + image: + # -- redis-exporter image repository + repository: public.ecr.aws/bitnami/redis-exporter + # -- redis-exporter image tag + tag: 1.26.0-debian-10-r2 + # -- redis-exporter image PullPolicy + imagePullPolicy: IfNotPresent + # -- Port to use for redis-exporter sidecar + containerPort: 9121 + + # -- Redis exporter security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- Resource limits and requests for redis-exporter sidecar + resources: {} + # limits: + # cpu: 50m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + service: + # -- Metrics service type + type: ClusterIP + # -- Metrics service clusterIP. `None` makes a "headless service" (no virtual IP) + clusterIP: None + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 9121 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Interval at which metrics should be scraped + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + +# This key configures Redis-HA subchart and when enabled (redis-ha.enabled=true) +# the custom redis deployment is omitted +# Check the redis-ha chart for more properties +redis-ha: + # -- Enables the Redis HA subchart and disables the custom Redis single node deployment + enabled: false + exporter: + # -- If `true`, the prometheus exporter sidecar is enabled + enabled: true + persistentVolume: + # -- Configures persistency on Redis nodes + enabled: false + redis: + # -- Redis convention for naming the cluster group: must match `^[\\w-\\.]+$` and can be templated + masterGroupName: argocd + # -- Any valid redis config options in this section will be applied to each server (see `redis-ha` chart) + # @default -- See [values.yaml] + config: + # -- Will save the DB if both the given number of seconds and the given number of write operations against the DB occurred. `""` is disabled + # @default -- `'""'` + save: '""' + haproxy: + # -- Enabled HAProxy LoadBalancing/Proxy + enabled: true + metrics: + # -- HAProxy enable prometheus metric scraping + enabled: true + image: + # -- Redis tag + tag: 7.0.5-alpine + + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + topologySpreadConstraints: + # -- Enable Redis HA topology spread constraints + enabled: false + # -- Max skew of pods tolerated + # @default -- `""` (defaults to `1`) + maxSkew: "" + # -- Topology key for spread + # @default -- `""` (defaults to `topology.kubernetes.io/zone`) + topologyKey: "" + # -- Enforcement policy, hard or soft + # @default -- `""` (defaults to `ScheduleAnyway`) + whenUnsatisfiable: "" + +# External Redis parameters +externalRedis: + # -- External Redis server host + host: "" + # -- External Redis username + username: "" + # -- External Redis password + password: "" + # -- External Redis server port + port: 6379 + # -- The name of an existing secret with Redis credentials (must contain key `redis-password`). + # When it's set, the `externalRedis.password` parameter is ignored + existingSecret: "" + # -- External Redis Secret annotations + secretAnnotations: {} + +## Server +server: + # -- Argo CD server name + name: server + + # -- The number of server pods to run + replicas: 1 + + ## Argo CD server Horizontal Pod Autoscaler + autoscaling: + # -- Enable Horizontal Pod Autoscaler ([HPA]) for the Argo CD server + enabled: false + # -- Minimum number of replicas for the Argo CD server [HPA] + minReplicas: 1 + # -- Maximum number of replicas for the Argo CD server [HPA] + maxReplicas: 5 + # -- Average CPU utilization percentage for the Argo CD server [HPA] + targetCPUUtilizationPercentage: 50 + # -- Average memory utilization percentage for the Argo CD server [HPA] + targetMemoryUtilizationPercentage: 50 + # -- Configures the scaling behavior of the target in both Up and Down directions. + # This is only available on HPA apiVersion `autoscaling/v2beta2` and newer + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + ## Argo CD server Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Argo CD server + enabled: false + # -- Labels to be added to Argo CD server pdb + labels: {} + # -- Annotations to be added to Argo CD server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `server.pdb.minAvailable` + maxUnavailable: "" + + ## Argo CD server image + image: + # -- Repository to use for the Argo CD server + # @default -- `""` (defaults to global.image.repository) + repository: "" # defaults to global.image.repository + # -- Tag to use for the Argo CD server + # @default -- `""` (defaults to global.image.tag) + tag: "" # defaults to global.image.tag + # -- Image pull policy for the Argo CD server + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" # IfNotPresent + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Additional command line arguments to pass to Argo CD server + extraArgs: [--insecure] + + # -- Environment variables to pass to Argo CD server + env: [] + + # -- envFrom to pass to Argo CD server + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Specify postStart and preStop lifecycle hooks for your argo-cd-server container + lifecycle: {} + + ## Argo UI extensions + ## This function in tech preview stage, do expect unstability or breaking changes in newer versions. + ## Ref: https://github.com/argoproj-labs/argocd-extensions + extensions: + # -- Enable support for Argo UI extensions + enabled: false + + ## Argo UI extensions image + image: + # -- Repository to use for extensions image + repository: "ghcr.io/argoproj-labs/argocd-extensions" + # -- Tag to use for extensions image + tag: "v0.1.0" + # -- Image pull policy for extensions + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Server UI extensions container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- Resource limits and requests for the argocd-extensions container + resources: {} + # limits: + # cpu: 50m + # memory: 128Mi + # requests: + # cpu: 10m + # memory: 64Mi + + # -- Additional containers to be added to the server pod + ## See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Init containers to add to the server pod + ## If your target Kubernetes cluster(s) require a custom credential (exec) plugin + ## you could use this (and the same in the application controller pod) to provide such executable + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins + initContainers: [] + # - name: download-tools + # image: alpine:3 + # command: [sh, -c] + # args: + # - wget -qO kubelogin.zip https://github.com/Azure/kubelogin/releases/download/v0.0.25/kubelogin-linux-amd64.zip && + # unzip kubelogin.zip && mv bin/linux_amd64/kubelogin /custom-tools/ + # volumeMounts: + # - mountPath: /custom-tools + # name: custom-tools + + # -- Additional volumeMounts to the server main container + volumeMounts: [] + # - mountPath: /usr/local/bin/kubelogin + # name: custom-tools + # subPath: kubelogin + + # -- Additional volumes to the server pod + volumes: [] + # - name: custom-tools + # emptyDir: {} + + # -- Annotations to be added to server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to server pods + podAnnotations: {} + + # -- Labels to be added to server pods + podLabels: {} + + # -- Resource limits and requests for the Argo CD server + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 50m + # memory: 64Mi + + # -- Configures the server port + containerPort: 8080 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + livenessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + # -- [Node selector] + nodeSelector: {} + # -- [Tolerations] for use with node taints + tolerations: [] + # -- Assign custom [affinity] rules to the deployment + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the Argo CD server + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Priority class for the Argo CD server + priorityClassName: "" + + # -- Server container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # TLS certificate configuration via cert-manager + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server + certificate: + # -- Deploy a Certificate resource (requires cert-manager) + enabled: false + # -- The name of the Secret that will be automatically created and managed by this Certificate resource + secretName: argocd-server-tls + # -- Certificate primary domain (commonName) + domain: argocd.example.com + # -- Certificate Subject Alternate Names (SANs) + additionalHosts: [] + # -- The requested 'duration' (i.e. lifetime) of the certificate. + # @default -- `""` (defaults to 2160h = 90d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + duration: "" + # -- How long before the expiry a certificate should be renewed. + # @default -- `""` (defaults to 360h = 15d if not specified) + ## Ref: https://cert-manager.io/docs/usage/certificate/#renewal + renewBefore: "" + # Certificate issuer + ## Ref: https://cert-manager.io/docs/concepts/issuer + issuer: + # -- Certificate issuer group. Set if using an external issuer. Eg. `cert-manager.io` + group: "" + # -- Certificate issuer kind. Either `Issuer` or `ClusterIssuer` + kind: "" + # -- Certificate isser name. Eg. `letsencrypt` + name: "" + # Private key of the certificate + privateKey: + # -- Rotation policy of private key when certificate is re-issued. Either: `Never` or `Always` + rotationPolicy: Never + # -- The private key cryptography standards (PKCS) encoding for private key. Either: `PCKS1` or `PKCS8` + encoding: PKCS1 + # -- Algorithm used to generate certificate private key. One of: `RSA`, `Ed25519` or `ECDSA` + algorithm: RSA + # -- Key bit size of the private key. If algorithm is set to `Ed25519`, size is ignored. + size: 2048 + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#tls-certificates-used-by-argocd-server + certificateSecret: + # -- Create argocd-server-tls secret + enabled: false + # -- Annotations to be added to argocd-server-tls secret + annotations: {} + # -- Labels to be added to argocd-server-tls secret + labels: {} + # -- Private Key of the certificate + key: '' + # -- Certificate data + crt: '' + + ## Server service configuration + service: + # -- Server service annotations + annotations: {} + # -- Server service labels + labels: {} + # -- Server service type + type: ClusterIP + # -- Server service http port for NodePort service type (only if `server.service.type` is set to "NodePort") + nodePortHttp: 30080 + # -- Server service https port for NodePort service type (only if `server.service.type` is set to "NodePort") + nodePortHttps: 30443 + # -- Server service http port + servicePortHttp: 80 + # -- Server service https port + servicePortHttps: 443 + # -- Server service http port name, can be used to route traffic via istio + servicePortHttpName: http + # -- Server service https port name, can be used to route traffic via istio + servicePortHttpsName: https + # -- Use named target port for argocd + ## Named target ports are not supported by GCE health checks, so when deploying argocd on GKE + ## and exposing it via GCE ingress, the health checks fail and the load balancer returns a 502. + namedTargetPort: true + # -- LoadBalancer will get created with the IP specified in this field + loadBalancerIP: "" + # -- Source IP ranges to allow access to service from + loadBalancerSourceRanges: [] + # -- Server service external IPs + externalIPs: [] + # -- Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + externalTrafficPolicy: "" + # -- Used to maintain session affinity. Supports `ClientIP` and `None` + sessionAffinity: "" + + ## Server metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8083 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # monitoring + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + serviceAccount: + # -- Create server service account + create: true + # -- Server service account name + name: argocd-server + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + ingress: + # -- Enable an ingress resource for the Argo CD server + enabled: false + # -- Additional ingress annotations + annotations: {} + # -- Additional ingress labels + labels: {} + # -- Defines which ingress controller will implement the resource + ingressClassName: "" + + # -- List of ingress hosts + ## Argo Ingress. + ## Hostnames must be provided if Ingress is enabled. + ## Secrets must be manually created in the namespace + hosts: [] + # - argocd.example.com + + # -- List of ingress paths + paths: + - / + # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + # -- Additional ingress paths + extraPaths: [] + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Ingress TLS configuration + tls: [] + # - secretName: your-certificate-name + # hosts: + # - argocd.example.com + + # -- Uses `server.service.servicePortHttps` instead `server.service.servicePortHttp` + https: false + + # dedicated ingress for gRPC as documented at + # Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/ingress/ + ingressGrpc: + # -- Enable an ingress resource for the Argo CD server for dedicated [gRPC-ingress] + enabled: false + # -- Setup up gRPC ingress to work with an AWS ALB + isAWSALB: false + # -- Additional ingress annotations for dedicated [gRPC-ingress] + annotations: {} + # -- Additional ingress labels for dedicated [gRPC-ingress] + labels: {} + # -- Defines which ingress controller will implement the resource [gRPC-ingress] + ingressClassName: "" + + awsALB: + # -- Service type for the AWS ALB gRPC service + ## Service Type if isAWSALB is set to true + ## Can be of type NodePort or ClusterIP depending on which mode you are + ## are running. Instance mode needs type NodePort, IP mode needs type + ## ClusterIP + ## Ref: https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/how-it-works/#ingress-traffic + serviceType: NodePort + # -- Backend protocol version for the AWS ALB gRPC service + ## This tells AWS to send traffic from the ALB using HTTP2. Can use gRPC as well if you want to leverage gRPC specific features + backendProtocolVersion: HTTP2 + + # -- List of ingress hosts for dedicated [gRPC-ingress] + ## Argo Ingress. + ## Hostnames must be provided if Ingress is enabled. + ## Secrets must be manually created in the namespace + ## + hosts: [] + # - argocd.example.com + + # -- List of ingress paths for dedicated [gRPC-ingress] + paths: + - / + # -- Ingress path type for dedicated [gRPC-ingress]. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + # -- Additional ingress paths for dedicated [gRPC-ingress] + extraPaths: [] + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Ingress TLS configuration for dedicated [gRPC-ingress] + tls: [] + # - secretName: your-certificate-name + # hosts: + # - argocd.example.com + + # -- Uses `server.service.servicePortHttps` instead `server.service.servicePortHttp` + https: false + + # Create a OpenShift Route with SSL passthrough for UI and CLI + # Consider setting 'hostname' e.g. https://argocd.apps-crc.testing/ using your Default Ingress Controller Domain + # Find your domain with: kubectl describe --namespace=openshift-ingress-operator ingresscontroller/default | grep Domain: + # If 'hostname' is an empty string "" OpenShift will create a hostname for you. + route: + # -- Enable an OpenShift Route for the Argo CD server + enabled: false + # -- Openshift Route annotations + annotations: {} + # -- Hostname of OpenShift Route + hostname: "" + # -- Termination type of Openshift Route + termination_type: passthrough + # -- Termination policy of Openshift Route + termination_policy: None + + ## Enable Admin ClusterRole resources. + ## Enable if you would like to grant rights to Argo CD to deploy to the local Kubernetes cluster. + clusterAdminAccess: + # -- Enable RBAC for local cluster deployments + enabled: true + + GKEbackendConfig: + # -- Enable BackendConfig custom resource for Google Kubernetes Engine + enabled: false + # -- [BackendConfigSpec] + spec: {} + # spec: + # iap: + # enabled: true + # oauthclientCredentials: + # secretName: argocd-secret + + ## Create a Google Managed Certificate for use with the GKE Ingress Controller + ## https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs + GKEmanagedCertificate: + # -- Enable ManagedCertificate custom resource for Google Kubernetes Engine. + enabled: false + # -- Domains for the Google Managed Certificate + domains: + - argocd.example.com + + ## Create a Google FrontendConfig Custom Resource, for use with the GKE Ingress Controller + ## https://cloud.google.com/kubernetes-engine/docs/how-to/ingress-features#configuring_ingress_features_through_frontendconfig_parameters + GKEfrontendConfig: + # -- Enable FrontConfig custom resource for Google Kubernetes Engine + enabled: false + # -- [FrontendConfigSpec] + spec: {} + # spec: + # redirectToHttps: + # enabled: true + # responseCodeName: RESPONSE_CODE + +## Repo Server +repoServer: + # -- Repo server name + name: repo-server + + # -- The number of repo server pods to run + replicas: 1 + + ## Repo server Horizontal Pod Autoscaler + autoscaling: + # -- Enable Horizontal Pod Autoscaler ([HPA]) for the repo server + enabled: false + # -- Minimum number of replicas for the repo server [HPA] + minReplicas: 1 + # -- Maximum number of replicas for the repo server [HPA] + maxReplicas: 5 + # -- Average CPU utilization percentage for the repo server [HPA] + targetCPUUtilizationPercentage: 50 + # -- Average memory utilization percentage for the repo server [HPA] + targetMemoryUtilizationPercentage: 50 + # -- Configures the scaling behavior of the target in both Up and Down directions. + # This is only available on HPA apiVersion `autoscaling/v2beta2` and newer + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + + ## Repo server Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the repo server + enabled: false + # -- Labels to be added to repo server pdb + labels: {} + # -- Annotations to be added to repo server pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `repoServer.pdb.minAvailable` + maxUnavailable: "" + + ## Repo server image + image: + # -- Repository to use for the repo server + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the repo server + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the repo server + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Additional command line arguments to pass to repo server + extraArgs: [] + + # -- Environment variables to pass to repo server + env: [] + + # -- envFrom to pass to repo server + # @default -- `[]` (See [values.yaml]) + envFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the repo server pod + ## Ref: https://argo-cd.readthedocs.io/en/stable/user-guide/config-management-plugins/ + extraContainers: [] + # - name: cmp + # # Entrypoint should be Argo CD lightweight CMP server i.e. argocd-cmp-server + # command: [/var/run/argocd/argocd-cmp-server] + # image: busybox # This can be off-the-shelf or custom-built image + # securityContext: + # runAsNonRoot: true + # runAsUser: 999 + # volumeMounts: + # - mountPath: /var/run/argocd + # name: var-files + # - mountPath: /home/argocd/cmp-server/plugins + # name: plugins + # # Remove this volumeMount if you've chosen to bake the config file into the sidecar image. + # - mountPath: /home/argocd/cmp-server/config/plugin.yaml + # subPath: plugin.yaml + # name: cmp-plugin + # # Starting with v2.4, do NOT mount the same tmp volume as the repo-server container. The filesystem separation helps + # # mitigate path traversal attacks. + # - mountPath: /tmp + # name: cmp-tmp + + # -- Init containers to add to the repo server pods + initContainers: [] + + # -- Additional volumeMounts to the repo server main container + volumeMounts: [] + + # -- Additional volumes to the repo server pod + volumes: [] + # - name: cmp-plugin + # configMap: + # name: cmp-plugin + # - name: cmp-tmp + # emptyDir: {} + + # -- Annotations to be added to repo server Deployment + deploymentAnnotations: {} + + # -- Annotations to be added to repo server pods + podAnnotations: {} + + # -- Labels to be added to repo server pods + podLabels: {} + + # -- Resource limits and requests for the repo server pods + resources: + limits: + cpu: 100m + memory: 256Mi + ephemeral-storage: 2Gi + requests: + cpu: 100m + memory: 256Mi + ephemeral-storage: 2Gi + + # -- Configures the repo server port + containerPort: 8081 + + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + livenessProbe: + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + + # -- [Node selector] + nodeSelector: {} + # -- [Tolerations] for use with node taints + tolerations: [] + # -- Assign custom [affinity] rules to the deployment + affinity: {} + + # -- Assign custom [TopologySpreadConstraints] rules to the repo server + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## If labelSelector is left out, it will default to the labelSelector configuration of the deployment + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + + # -- Priority class for the repo server + priorityClassName: "" + + # -- Repo server container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # TLS certificate configuration via Secret + ## Ref: https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#configuring-tls-to-argocd-repo-server + ## Note: Issuing certificates via cert-manager in not supported right now because it's not possible to restart repo server automatically without extra controllers. + certificateSecret: + # -- Create argocd-repo-server-tls secret + enabled: false + # -- Annotations to be added to argocd-repo-server-tls secret + annotations: {} + # -- Labels to be added to argocd-repo-server-tls secret + labels: {} + # -- Certificate authority. Required for self-signed certificates. + ca: '' + # -- Certificate private key + key: '' + # -- Certificate data. Must contain SANs of Repo service (ie: argocd-repo-server, argocd-repo-server.argo-cd.svc) + crt: '' + + ## Repo server service configuration + service: + # -- Repo server service annotations + annotations: {} + # -- Repo server service labels + labels: {} + # -- Repo server service port + port: 8081 + # -- Repo server service port name + portName: https-repo-server + + ## Repo server metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8084 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # "monitoring" + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## Enable Admin ClusterRole resources. + ## Enable if you would like to grant cluster rights to Argo CD repo server. + clusterAdminAccess: + # -- Enable RBAC for local cluster deployments + enabled: false + ## Enable Custom Rules for the Repo server's Cluster Role resource + ## Enable this and set the rules: to whatever custom rules you want for the Cluster Role resource. + ## Defaults to off + clusterRoleRules: + # -- Enable custom rules for the Repo server's Cluster Role resource + enabled: false + # -- List of custom rules for the Repo server's Cluster Role resource + rules: [] + + ## Repo server service account + ## If create is set to true, make sure to uncomment the name and update the rbac section below + serviceAccount: + # -- Create repo server service account + create: true + # -- Repo server service account name + name: "" # "argocd-repo-server" + # -- Annotations applied to created service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- Automount API credentials for the Service Account + automountServiceAccountToken: true + + # -- Repo server rbac rules + rbac: [] + # - apiGroups: + # - argoproj.io + # resources: + # - applications + # verbs: + # - get + # - list + # - watch + +## ApplicationSet controller +applicationSet: + # -- Enable ApplicationSet controller + enabled: true + + # -- ApplicationSet controller name string + name: applicationset-controller + + # -- The number of ApplicationSet controller pods to run + replicaCount: 1 + + ## ApplicationSet controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the ApplicationSet controller + enabled: false + # -- Labels to be added to ApplicationSet controller pdb + labels: {} + # -- Annotations to be added to ApplicationSet controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `applicationSet.pdb.minAvailable` + maxUnavailable: "" + + ## ApplicationSet controller image + image: + # -- Repository to use for the ApplicationSet controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the ApplicationSet controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the ApplicationSet controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- If defined, uses a Secret to pull an image from a private Docker registry or repository. + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- ApplicationSet controller log format. Either `text` or `json` + # @default -- `""` (defaults to global.logging.format) + logFormat: "" + # -- ApplicationSet controller log level. One of: `debug`, `info`, `warn`, `error` + # @default -- `""` (defaults to global.logging.level) + logLevel: "" + + args: + # -- The default metric address + metricsAddr: :8080 + # -- The default health check port + probeBindAddr: :8081 + # -- How application is synced between the generator and the cluster + policy: sync + # -- Enable dry run mode + dryRun: false + + # -- List of extra cli args to add + extraArgs: [] + + # -- Environment variables to pass to the ApplicationSet controller + extraEnv: [] + # - name: "MY_VAR" + # value: "value" + + # -- envFrom to pass to the ApplicationSet controller + # @default -- `[]` (See [values.yaml]) + extraEnvFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- Additional containers to be added to the ApplicationSet controller pod + extraContainers: [] + + # -- List of extra mounts to add (normally used with extraVolumes) + extraVolumeMounts: [] + + # -- List of extra volumes to add + extraVolumes: [] + + ## Metrics service configuration + metrics: + # -- Deploy metrics service + enabled: false + service: + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port + servicePort: 8085 + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor interval + interval: 30s + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus ServiceMonitor namespace + namespace: "" # monitoring + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + + ## ApplicationSet service configuration + service: + # -- ApplicationSet service annotations + annotations: {} + # -- ApplicationSet service labels + labels: {} + # -- ApplicationSet service port + port: 7000 + # -- ApplicationSet service port name + portName: webhook + + serviceAccount: + # -- Specifies whether a service account should be created + create: true + # -- Annotations to add to the service account + annotations: {} + # -- Labels applied to created service account + labels: {} + # -- The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + + # -- Annotations to be added to ApplicationSet controller Deployment + deploymentAnnotations: {} + + # -- Annotations for the ApplicationSet controller pods + podAnnotations: {} + + # -- Labels for the ApplicationSet controller pods + podLabels: {} + + # -- Resource limits and requests for the ApplicationSet controller pods. + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- ApplicationSet controller container-level security context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + ## Probes for ApplicationSet controller (optional) + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + readinessProbe: + # -- Enable Kubernetes liveness probe for ApplicationSet controller + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + livenessProbe: + # -- Enable Kubernetes liveness probe for ApplicationSet controller + enabled: false + # -- Number of seconds after the container has started before [probe] is initiated + initialDelaySeconds: 10 + # -- How often (in seconds) to perform the [probe] + periodSeconds: 10 + # -- Number of seconds after which the [probe] times out + timeoutSeconds: 1 + # -- Minimum consecutive successes for the [probe] to be considered successful after having failed + successThreshold: 1 + # -- Minimum consecutive failures for the [probe] to be considered failed after having succeeded + failureThreshold: 3 + + # -- [Node selector] + nodeSelector: {} + + # -- [Tolerations] for use with node taints + tolerations: [] + + # -- Assign custom [affinity] rules + affinity: {} + + # -- If specified, indicates the pod's priority. If not specified, the pod priority will be default or zero if there is no default. + priorityClassName: "" + + ## Webhook for the Git Generator + ## Ref: https://argocd-applicationset.readthedocs.io/en/master/Generators-Git/#webhook-configuration) + webhook: + ingress: + # -- Enable an ingress resource for Webhooks + enabled: false + # -- Additional ingress annotations + annotations: {} + # -- Additional ingress labels + labels: {} + # -- Defines which ingress ApplicationSet controller will implement the resource + ingressClassName: "" + + # -- List of ingress hosts + ## Hostnames must be provided if Ingress is enabled. + ## Secrets must be manually created in the namespace + hosts: [] + # - argocd-applicationset.example.com + + # -- List of ingress paths + paths: + - /api/webhook + # -- Ingress path type. One of `Exact`, `Prefix` or `ImplementationSpecific` + pathType: Prefix + # -- Additional ingress paths + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## for Kubernetes >=1.19 (when "networking.k8s.io/v1" is used) + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + # -- Ingress TLS configuration + tls: [] + # - secretName: argocd-applicationset-tls + # hosts: + # - argocd-applicationset.example.com + +## Notifications controller +notifications: + # -- Enable notifications controller + enabled: true + + # -- Notifications controller name string + name: notifications-controller + + # -- Argo CD dashboard url; used in place of {{.context.argocdUrl}} in templates + argocdUrl: + + ## Notifications controller Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the notifications controller + enabled: false + # -- Labels to be added to notifications controller pdb + labels: {} + # -- Annotations to be added to notifications controller pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `notifications.pdb.minAvailable` + maxUnavailable: "" + + ## Notifications controller image + image: + # -- Repository to use for the notifications controller + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the notifications controller + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the notifications controller + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + # -- Notifications controller log format. Either `text` or `json` + # @default -- `""` (defaults to global.logging.format) + logFormat: "" + # -- Notifications controller log level. One of: `debug`, `info`, `warn`, `error` + # @default -- `""` (defaults to global.logging.level) + logLevel: "" + + # -- Extra arguments to provide to the notifications controller + extraArgs: [] + + # -- Additional container environment variables + extraEnv: [] + + # -- envFrom to pass to the notifications controller + # @default -- `[]` (See [values.yaml]) + extraEnvFrom: [] + # - configMapRef: + # name: config-map-name + # - secretRef: + # name: secret-name + + # -- List of extra mounts to add (normally used with extraVolumes) + extraVolumeMounts: [] + + # -- List of extra volumes to add + extraVolumes: [] + + # -- Define user-defined context + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/templates/#defining-user-defined-context + context: {} + # region: east + # environmentName: staging + + secret: + # -- Whether helm chart creates notifications controller secret + create: true + + # -- key:value pairs of annotations to be added to the secret + annotations: {} + + # -- Generic key:value pairs to be inserted into the secret + ## Can be used for templates, notification services etc. Some examples given below. + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/services/overview/ + items: {} + # slack-token: + # # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/slack/ + + # grafana-apiKey: + # # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/grafana/ + + # webhooks-github-token: + + # email-username: + # email-password: + # For more information: https://argocd-notifications.readthedocs.io/en/stable/services/email/ + + metrics: + # -- Enables prometheus metrics server + enabled: false + # -- Metrics port + port: 9001 + service: + # -- Metrics service annotations + annotations: {} + # -- Metrics service labels + labels: {} + # -- Metrics service port name + portName: http-metrics + serviceMonitor: + # -- Enable a prometheus ServiceMonitor + enabled: false + # -- Prometheus ServiceMonitor selector + selector: {} + # prometheus: kube-prometheus + # -- Prometheus ServiceMonitor labels + additionalLabels: {} + # -- Prometheus ServiceMonitor annotations + annotations: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + # -- Prometheus ServiceMonitor scheme + scheme: "" + # -- Prometheus ServiceMonitor tlsConfig + tlsConfig: {} + # -- Prometheus [RelabelConfigs] to apply to samples before scraping + relabelings: [] + # -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestion + metricRelabelings: [] + + # -- Configures notification services such as slack, email or custom webhook + # @default -- See [values.yaml] + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/services/overview/ + notifiers: {} + # service.slack: | + # token: $slack-token + + # -- Annotations to be applied to the notifications controller Deployment + deploymentAnnotations: {} + + # -- Annotations to be applied to the notifications controller Pods + podAnnotations: {} + + # -- Labels to be applied to the notifications controller Pods + podLabels: {} + + # -- Resource limits and requests for the notifications controller + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Notification controller container-level security Context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- [Node selector] + nodeSelector: {} + + # -- [Tolerations] for use with node taints + tolerations: [] + + # -- Assign custom [affinity] rules + affinity: {} + + # -- Priority class for the notifications controller pods + priorityClassName: "" + + serviceAccount: + # -- Specifies whether a service account should be created + create: true + + # -- The name of the service account to use. + ## If not set and create is true, a name is generated using the fullname template + name: argocd-notifications-controller + + # -- Annotations applied to created service account + annotations: {} + + # -- Labels applied to created service account + labels: {} + cm: + # -- Whether helm chart creates notifications controller config map + create: true + + # -- Contains centrally managed global application subscriptions + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/subscriptions/ + subscriptions: [] + # # subscription for on-sync-status-unknown trigger notifications + # - recipients: + # - slack:test2 + # - email:test@gmail.com + # triggers: + # - on-sync-status-unknown + # # subscription restricted to applications with matching labels only + # - recipients: + # - slack:test3 + # selector: test=true + # triggers: + # - on-sync-status-unknown + + # -- The notification template is used to generate the notification content + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/templates/ + templates: {} + # template.app-deployed: | + # email: + # subject: New version of an application {{.app.metadata.name}} is up and running. + # message: | + # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} is now running new version of deployments manifests. + # slack: + # attachments: | + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#18be52", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # }, + # { + # "title": "Revision", + # "value": "{{.app.status.sync.revision}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-health-degraded: | + # email: + # subject: Application {{.app.metadata.name}} has degraded. + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} has degraded. + # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link": "{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#f4c030", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-failed: | + # email: + # subject: Failed to sync application {{.app.metadata.name}}. + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} The sync operation of application {{.app.metadata.name}} has failed at {{.app.status.operationState.finishedAt}} with the following error: {{.app.status.operationState.message}} + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#E96D76", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-running: | + # email: + # subject: Start syncing application {{.app.metadata.name}}. + # message: | + # The sync operation of application {{.app.metadata.name}} has started at {{.app.status.operationState.startedAt}}. + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#0DADEA", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-status-unknown: | + # email: + # subject: Application {{.app.metadata.name}} sync status is 'Unknown' + # message: | + # {{if eq .serviceType "slack"}}:exclamation:{{end}} Application {{.app.metadata.name}} sync is 'Unknown'. + # Application details: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}. + # {{if ne .serviceType "slack"}} + # {{range $c := .app.status.conditions}} + # * {{$c.message}} + # {{end}} + # {{end}} + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#E96D76", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + # template.app-sync-succeeded: | + # email: + # subject: Application {{.app.metadata.name}} has been successfully synced. + # message: | + # {{if eq .serviceType "slack"}}:white_check_mark:{{end}} Application {{.app.metadata.name}} has been successfully synced at {{.app.status.operationState.finishedAt}}. + # Sync operation details are available at: {{.context.argocdUrl}}/applications/{{.app.metadata.name}}?operation=true . + # slack: + # attachments: |- + # [{ + # "title": "{{ .app.metadata.name}}", + # "title_link":"{{.context.argocdUrl}}/applications/{{.app.metadata.name}}", + # "color": "#18be52", + # "fields": [ + # { + # "title": "Sync Status", + # "value": "{{.app.status.sync.status}}", + # "short": true + # }, + # { + # "title": "Repository", + # "value": "{{.app.spec.source.repoURL}}", + # "short": true + # } + # {{range $index, $c := .app.status.conditions}} + # {{if not $index}},{{end}} + # {{if $index}},{{end}} + # { + # "title": "{{$c.type}}", + # "value": "{{$c.message}}", + # "short": true + # } + # {{end}} + # ] + # }] + + # -- The trigger defines the condition when the notification should be sent + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/triggers/ + triggers: {} + # trigger.on-deployed: | + # - description: Application is synced and healthy. Triggered once per commit. + # oncePer: app.status.sync.revision + # send: + # - app-deployed + # when: app.status.operationState.phase in ['Succeeded'] and app.status.health.status == 'Healthy' + # trigger.on-health-degraded: | + # - description: Application has degraded + # send: + # - app-health-degraded + # when: app.status.health.status == 'Degraded' + # trigger.on-sync-failed: | + # - description: Application syncing has failed + # send: + # - app-sync-failed + # when: app.status.operationState.phase in ['Error', 'Failed'] + # trigger.on-sync-running: | + # - description: Application is being synced + # send: + # - app-sync-running + # when: app.status.operationState.phase in ['Running'] + # trigger.on-sync-status-unknown: | + # - description: Application status is 'Unknown' + # send: + # - app-sync-status-unknown + # when: app.status.sync.status == 'Unknown' + # trigger.on-sync-succeeded: | + # - description: Application syncing has succeeded + # send: + # - app-sync-succeeded + # when: app.status.operationState.phase in ['Succeeded'] + # + # For more information: https://argocd-notifications.readthedocs.io/en/stable/triggers/#default-triggers + # defaultTriggers: | + # - on-sync-status-unknown + + ## The optional bot component simplifies managing subscriptions + ## For more information: https://argocd-notifications.readthedocs.io/en/stable/bots/overview/ + bots: + slack: + # -- Enable slack bot + ## You have to set secret.notifiers.slack.signingSecret + enabled: false + + ## Slack bot Pod Disruption Budget + ## Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + pdb: + # -- Deploy a [PodDisruptionBudget] for the Slack bot + enabled: false + # -- Labels to be added to Slack bot pdb + labels: {} + # -- Annotations to be added to Slack bot pdb + annotations: {} + # -- Number of pods that are available after eviction as number or percentage (eg.: 50%) + # @default -- `""` (defaults to 0 if not specified) + minAvailable: "" + # -- Number of pods that are unavailble after eviction as number or percentage (eg.: 50%). + ## Has higher precedence over `notifications.bots.slack.pdb.minAvailable` + maxUnavailable: "" + + ## Slack bot image + image: + # -- Repository to use for the Slack bot + # @default -- `""` (defaults to global.image.repository) + repository: "" + # -- Tag to use for the Slack bot + # @default -- `""` (defaults to global.image.tag) + tag: "" + # -- Image pull policy for the Slack bot + # @default -- `""` (defaults to global.image.imagePullPolicy) + imagePullPolicy: "" + + # -- Secrets with credentials to pull images from a private registry + # @default -- `[]` (defaults to global.imagePullSecrets) + imagePullSecrets: [] + + service: + # -- Service annotations for Slack bot + annotations: {} + # -- Service port for Slack bot + port: 80 + # -- Service type for Slack bot + type: LoadBalancer + + serviceAccount: + # -- Specifies whether a service account should be created + create: true + + # -- The name of the service account to use. + ## If not set and create is true, a name is generated using the fullname template + name: argocd-notifications-bot + + # -- Annotations applied to created service account + annotations: {} + + # -- Slack bot container-level security Context + # @default -- See [values.yaml] + containerSecurityContext: + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + + # -- Resource limits and requests for the Slack bot + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + # -- Assign custom [affinity] rules + affinity: {} + + # -- [Tolerations] for use with node taints + tolerations: [] + + # -- [Node selector] + nodeSelector: {} \ No newline at end of file diff --git a/kube/services/datadog/datadog-application.yaml b/kube/services/datadog/datadog-application.yaml new file mode 100644 index 000000000..f5a8925e1 --- /dev/null +++ b/kube/services/datadog/datadog-application.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: datadog-application + namespace: argocd +spec: + project: default + source: + chart: datadog + repoURL: 'https://helm.datadoghq.com' + targetRevision: 3.6.4 + helm: + valueFiles: + - https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/kube/services/datadog/values.yaml + releaseName: datadog + destination: + server: 'https://kubernetes.default.svc' + namespace: datadog + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/kube/services/monitoring/prometheus-application.yaml b/kube/services/monitoring/prometheus-application.yaml new file mode 100644 index 000000000..75b085719 --- /dev/null +++ b/kube/services/monitoring/prometheus-application.yaml @@ -0,0 +1,24 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: prometheus-application + namespace: argocd +spec: + project: default + source: + chart: kube-prometheus-stack + repoURL: https://prometheus-community.github.io/helm-charts + targetRevision: 43.1.3 + helm: + valueFiles: + - https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/kube/services/monitoring/values.yaml + releaseName: prometheus + destination: + server: 'https://kubernetes.default.svc' + namespace: monitoring + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true \ No newline at end of file diff --git a/kube/services/monitoring/prometheus-values.yaml b/kube/services/monitoring/prometheus-values.yaml index 9ae425abd..e49bfba09 100644 --- a/kube/services/monitoring/prometheus-values.yaml +++ b/kube/services/monitoring/prometheus-values.yaml @@ -1261,4 +1261,4 @@ extraScrapeConfigs: networkPolicy: ## Enable creation of NetworkPolicy resources. ## - enabled: false + enabled: false \ No newline at end of file diff --git a/kube/services/monitoring/thanos-deploy.yaml b/kube/services/monitoring/thanos-deploy.yaml index 8ff7a54e4..a63cabd10 100644 --- a/kube/services/monitoring/thanos-deploy.yaml +++ b/kube/services/monitoring/thanos-deploy.yaml @@ -98,6 +98,7 @@ spec: labels: app: thanos-store spec: + serviceAccount: thanos containers: - name: thanos-store image: quay.io/thanos/thanos:v0.25.2 @@ -162,6 +163,7 @@ spec: labels: app: thanos-compactor spec: + serviceAccount: thanos containers: - name: thanos-compactor image: quay.io/thanos/thanos:v0.25.2 @@ -217,4 +219,4 @@ spec: interval: 30s selector: matchLabels: - app: thanos-compactor + app: thanos-compactor \ No newline at end of file diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml index 761764c89..c29e072bc 100644 --- a/kube/services/monitoring/values.yaml +++ b/kube/services/monitoring/values.yaml @@ -756,7 +756,7 @@ grafana: hosts: [] ## Path for grafana ingress - path: / + path: /grafana/ ## TLS configuration for grafana Ingress ## Secret must be manually created in the namespace @@ -2362,7 +2362,7 @@ prometheus: ## External URL at which Prometheus will be reachable. ## - externalUrl: "" + externalUrl: "/prometheus/" ## Define which Nodes the Pods are scheduled on. ## ref: https://kubernetes.io/docs/user-guide/node-selection/ diff --git a/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf b/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf index a9197eec6..a44263b75 100644 --- a/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf +++ b/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf @@ -7,8 +7,10 @@ auth_request /gen3-authz; set $proxy_service "prometheus"; - set $upstream http://prometheus-server.prometheus.svc.cluster.local; - #rewrite ^/prometheus/(.*) /$1 break; + set $upstream http://prometheus-kube-prometheus-prometheus.monitoring.svc.cluster.local:9090; + + rewrite ^/prometheus/(.*) /$1 break; + proxy_pass $upstream; #proxy_redirect http://$host/ https://$host/prometheus/; } From 53fdff0ddc090d983f371ed0fb51e4c9220e2ee4 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Wed, 8 Feb 2023 13:11:46 -0600 Subject: [PATCH 069/362] Add more logging to kube-setup-karpenter (#2147) --- gen3/bin/kube-setup-autoscaler.sh | 2 +- gen3/bin/kube-setup-karpenter.sh | 29 ++++++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-autoscaler.sh b/gen3/bin/kube-setup-autoscaler.sh index 00d5dc4e7..8aeff8b5b 100644 --- a/gen3/bin/kube-setup-autoscaler.sh +++ b/gen3/bin/kube-setup-autoscaler.sh @@ -73,7 +73,7 @@ function get_autoscaler_version(){ function deploy() { if [["$ctxNamespace" == "default" || "$ctxNamespace" == "null"]]; then - if (! g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1) || [[ "$FORCE" == true]]; then + if (! g3kubectl --namespace=kube-system get deployment cluster-autoscaler > /dev/null 2>&1 || "${FORCE}" == true); then if ! [ -z ${CAS_VERSION} ]; then casv=${CAS_VERSION} diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 50d3a4590..925eebaea 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -1,6 +1,6 @@ #!/bin/bash -#set -i +set -e source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" @@ -9,13 +9,15 @@ ctx="$(g3kubectl config current-context)" ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" gen3_deploy_karpenter() { + gen3_log_info "Deploying karpenter" # If the karpenter namespace doesn't exist or the force flag isn't in place then deploy - if [[( -z $(g3kubectl get namespaces | grep karpenter) || $FORCE ) && ("$ctxNamespace" == "default" || "$ctxNamespace" == "null")]]; then + if [[( -z $(g3kubectl get namespaces | grep karpenter) || $FORCE == "true" ) && ("$ctxNamespace" == "default" || "$ctxNamespace" == "null")]]; then + gen3_log_info "Ensuring that the spot instance service linked role is setup" # Ensure the spot instance service linked role is setup # It is required for running spot instances aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true karpenter=${karpenter:-v0.22.0} - echo '{b + echo '{ "Statement": [ { "Action": [ @@ -56,11 +58,21 @@ gen3_deploy_karpenter() { "Version": "2012-10-17" }' > controller-policy.json + gen3_log_info "Creating karpenter namespace" g3kubectl create namespace karpenter 2> /dev/null || true + + gen3_log_info "Creating karpenter AWS role and k8s service accounts" gen3 awsrole create "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true # Have to delete SA because helm chart will create the SA and there will be a conflict + + gen3_log_info "Have to delete SA because helm chart will create the SA and there will be a conflict" g3kubectl delete sa karpenter -n karpenter + + + gen3_log_info "aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true" aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true + + gen3_log_info "Need to tag the subnets/sg's so that karpenter can discover them automatically" # Need to tag the subnets/sg's so that karpenter can discover them automatically subnets=$(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_private_*' --query 'Subnets[].SubnetId' --output text) security_groups=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg,ssh_eks_'$vpc_name'' --query 'SecurityGroups[].GroupId' --output text) @@ -73,21 +85,32 @@ gen3_deploy_karpenter() { aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-jupyter" --resources ${security_groups_jupyter} aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-worfklow" --resources ${security_groups_workflow} + + gen3_log_info "Installing karpenter using helm" + helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter \ --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \ --set settings.aws.clusterEndpoint="${cluster_endpoint}" \ --set settings.aws.clusterName=${vpc_name} \ --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::$(aws sts get-caller-identity --output text --query 'Account'):role/gen3_service/karpenter-controller-role-${vpc_name}" + + gen3_log_info "sleep for a little bit so CRD's can be created for the provisioner/node template" # sleep for a little bit so CRD's can be created for the provisioner/node template sleep 10 + gen3_log_info "Deploy AWS node termination handler so that spot instances can be preemptively spun up before old instances stop" # Deploy AWS node termination handler so that spot instances can be preemptively spun up before old instances stop kubectl apply -f https://github.com/aws/aws-node-termination-handler/releases/download/v1.18.1/all-resources.yaml fi + + gen3_log_info "Remove cluster-autoscaler" gen3 kube-setup-autoscaler --remove + + gen3_log_info "Adding node templates for karpenter" g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateDefault.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateJupyter.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateWorkflow.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - if [[ $ARM ]]; then + gen3_log_info "Deploy binfmt daemonset so the emulation tools run on arm nodes" # Deploy binfmt daemonset so the emulation tools run on arm nodes g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/binfmt.yaml g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerArm.yaml From 41aa85dee34382d76616e50d54fa48f65b2e227b Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 9 Feb 2023 13:45:19 -0700 Subject: [PATCH 070/362] /grafana endpoint (#2145) * changing the values for grafana so it can be served behind /grafana prefix * correcting annotation synthax issue * making hosts a list * adding the proper configuration, so we can reach the /grafana endpoint * attempting to get the alertmanager endpoint working * adding an ingress class for alertmanager * adding annotations for the alertmanager ingress * adding a route prefix for altermanager * changing the ingress defaults back for altermanager * changing the routePrefix to / and adding /alertmanager endpoint to externalURL * adding nginx config and container flags to thanos-compactor and thanos-query, so their endpoint can be reached * correcting the externalURL and routePrefix for altermanager * removing rewrite rules for thanos compactor and query * correcting the port number for thanos-compactor * attempting to remove "hosts" to see if grafana endpoint will still work * had to delete routePrefix and add the rewrite rules back to get the thanos-compactor endpoint working * adding kube-setup-prometheus to roll all so prometheus is deployed via argocd. However, we only want this to run if argocd is set to true in the manifest.json to prevent previous prometheus setups from being destroyed. --- gen3/bin/kube-roll-all.sh | 3 + kube/services/monitoring/thanos-deploy.yaml | 3 + kube/services/monitoring/values.yaml | 32 +++++----- .../gen3.nginx.conf/prometheus-server.conf | 61 ++++++++++++++++++- 4 files changed, 84 insertions(+), 15 deletions(-) diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 70af01b36..68a0bd47d 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -246,6 +246,9 @@ fi gen3 kube-setup-revproxy if [[ "$GEN3_ROLL_FAST" != "true" ]]; then + if g3k_manifest_lookup .global.argocd 2> /dev/null; then + gen3 kube-setup-prometheus + fi # Internal k8s systems gen3 kube-setup-fluentd & # If there is an entry for karpenter in the manifest setup karpenter diff --git a/kube/services/monitoring/thanos-deploy.yaml b/kube/services/monitoring/thanos-deploy.yaml index a63cabd10..f5c07a656 100644 --- a/kube/services/monitoring/thanos-deploy.yaml +++ b/kube/services/monitoring/thanos-deploy.yaml @@ -25,6 +25,8 @@ spec: - '--log.level=debug' - '--query.replica-label=prometheus_replica' - '--store=prometheus-kube-prometheus-thanos-discovery.monitoring.svc:10901' + - '--web.external-prefix=/thanos-query/' + - '--web.route-prefix=/thanos-query/' resources: requests: cpu: '100m' @@ -173,6 +175,7 @@ spec: - '--data-dir=/var/thanos/store' - '--objstore.config-file=/config/thanos.yaml' - '--wait' + - '--web.external-prefix=/thanos-compactor/' ports: - name: http containerPort: 10902 diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml index c29e072bc..e033d8801 100644 --- a/kube/services/monitoring/values.yaml +++ b/kube/services/monitoring/values.yaml @@ -249,9 +249,7 @@ alertmanager: # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - - annotations: {} + #ingressClassName: nginx labels: {} @@ -261,8 +259,8 @@ alertmanager: ## Hosts must be provided if Ingress is enabled. ## hosts: [] - #- prometheus.emalinowskiv1.planx-pla.net - # - alertmanager.domain.com + #- prometheus.emalinowskiv1.planx-pla.net + # - alertmanager.domain.com ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix ## @@ -562,7 +560,7 @@ alertmanager: ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. ## - routePrefix: / + routePrefix: /alertmanager/ ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. ## @@ -731,18 +729,19 @@ grafana: ingress: ## If true, Grafana Ingress will be created ## - enabled: false + enabled: true ## IngressClassName for Grafana Ingress. ## Should be provided if Ingress is enable. ## - # ingressClassName: nginx + ingressClassName: nginx ## Annotations for Grafana Ingress ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" ## Labels to be added to the Ingress ## @@ -754,9 +753,14 @@ grafana: # hosts: # - grafana.domain.com hosts: [] + # - data.bloodpac.org ## Path for grafana ingress - path: /grafana/ + path: /grafana/?(.*) + + grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost ## TLS configuration for grafana Ingress ## Secret must be manually created in the namespace @@ -3361,12 +3365,12 @@ thanosRuler: ## The external URL the Thanos Ruler instances will be available under. This is necessary to generate correct URLs. This is necessary if Thanos Ruler is not served from root of a DNS name. string false ## - externalPrefix: + externalPrefix: /thanos ## The route prefix ThanosRuler registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. ## - routePrefix: / + routePrefix: ## ObjectStorageConfig configures object storage in Thanos. Alternative to ## ObjectStorageConfigFile, and lower order priority. diff --git a/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf b/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf index a44263b75..c936f541e 100644 --- a/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf +++ b/kube/services/revproxy/gen3.nginx.conf/prometheus-server.conf @@ -12,5 +12,64 @@ rewrite ^/prometheus/(.*) /$1 break; proxy_pass $upstream; - #proxy_redirect http://$host/ https://$host/prometheus/; } + location /grafana/ { + error_page 403 @errorworkspace; + set $authz_resource "/prometheus"; + set $authz_method "access"; + set $authz_service "prometheus"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; + + proxy_set_header Host $http_host; + + set $proxy_service "grafana"; + set $upstream http://prometheus-grafana.monitoring.svc.cluster.local; + + rewrite ^/grafana/(.*) /$1 break; + + proxy_pass $upstream; + } + location /alertmanager/ { + error_page 403 @errorworkspace; + set $authz_resource "/prometheus"; + set $authz_method "access"; + set $authz_service "prometheus"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; + + set $proxy_service "alertmanager"; + set $upstream http://alertmanager-operated.monitoring.svc.cluster.local:9093; + + #rewrite ^/alertmanager/(.*) /$1 break; + + proxy_pass $upstream; + } + location /thanos-query/ { + error_page 403 @errorworkspace; + set $authz_resource "/prometheus"; + set $authz_method "access"; + set $authz_service "prometheus"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; + + set $proxy_service "thanos-query"; + set $upstream http://thanos-query.monitoring.svc.cluster.local:9090; + + proxy_pass $upstream; + } + location /thanos-compactor/ { + error_page 403 @errorworkspace; + set $authz_resource "/prometheus"; + set $authz_method "access"; + set $authz_service "prometheus"; + # be careful - sub-request runs in same context as this request + auth_request /gen3-authz; + + set $proxy_service "thanos-compactor"; + set $upstream http://thanos-compactor.monitoring.svc.cluster.local:10902; + + rewrite ^/thanos-compactor/(.*) /$1 break; + + proxy_pass $upstream; + } \ No newline at end of file From d02945a65249057af74266a368e8034d7e3e4d4c Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 9 Feb 2023 15:25:34 -0700 Subject: [PATCH 071/362] Prometheus Endpoint (#2148) * removing the logic to only grab the prometheus nginx config if in the default namespaces so we can access the prometheus endpoint in staging environments * commenting out all pieces of the old grafana setup --- gen3/bin/kube-setup-prometheus.sh | 102 +++++++++--------- gen3/bin/kube-setup-revproxy.sh | 49 ++++----- .../revproxy/gen3.nginx.conf/grafana.conf | 30 +++--- 3 files changed, 89 insertions(+), 92 deletions(-) diff --git a/gen3/bin/kube-setup-prometheus.sh b/gen3/bin/kube-setup-prometheus.sh index 965cb5eb6..240181be0 100644 --- a/gen3/bin/kube-setup-prometheus.sh +++ b/gen3/bin/kube-setup-prometheus.sh @@ -32,25 +32,25 @@ function delete_prometheus() gen3 arun helm delete prometheus --namespace prometheus } -function delete_grafana() -{ - gen3 arun helm delete grafana --namespace grafana -} - -function create_grafana_secrets() -{ - if ! g3kubectl get secrets/grafana-admin > /dev/null 2>&1; then - credsFile=$(mktemp -p "$XDG_RUNTIME_DIR" "creds.json_XXXXXX") - creds="$(base64 /dev/urandom | head -c 12)" - if [[ "$creds" != null ]]; then - echo ${creds} >> "$credsFile" - g3kubectl create secret generic grafana-admin "--from-file=credentials=${credsFile}" - rm -f ${credsFile} - else - echo "WARNING: there was an error creating the secrets for grafana" - fi - fi -} +# function delete_grafana() +# { +# gen3 arun helm delete grafana --namespace grafana +# } + +# function create_grafana_secrets() +# { +# if ! g3kubectl get secrets/grafana-admin > /dev/null 2>&1; then +# credsFile=$(mktemp -p "$XDG_RUNTIME_DIR" "creds.json_XXXXXX") +# creds="$(base64 /dev/urandom | head -c 12)" +# if [[ "$creds" != null ]]; then +# echo ${creds} >> "$credsFile" +# g3kubectl create secret generic grafana-admin "--from-file=credentials=${credsFile}" +# rm -f ${credsFile} +# else +# echo "WARNING: there was an error creating the secrets for grafana" +# fi +# fi +# } function deploy_prometheus() { @@ -87,36 +87,36 @@ function deploy_prometheus() } -function deploy_grafana() -{ - helm_repository - if (! g3kubectl get namespace grafana > /dev/null 2>&1); - then - g3kubectl create namespace grafana - g3kubectl label namespace grafana app=grafana - fi - - #create_grafana_secrets - TMPGRAFANAVALUES=$(mktemp -p "$XDG_RUNTIME_DIR" "grafana.json_XXXXXX") - ADMINPASS=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d) - yq '.adminPassword = "'${ADMINPASS}'"' "${GEN3_HOME}/kube/services/monitoring/grafana-values.yaml" --yaml-output > ${TMPGRAFANAVALUES} - # curl -o grafana-values.yaml https://raw.githubusercontent.com/helm/charts/master/stable/grafana/values.yaml - - if (! g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then - if ( g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1); - then - delete_grafana - fi +# function deploy_grafana() +# { +# helm_repository +# if (! g3kubectl get namespace grafana > /dev/null 2>&1); +# then +# g3kubectl create namespace grafana +# g3kubectl label namespace grafana app=grafana +# fi + +# #create_grafana_secrets +# TMPGRAFANAVALUES=$(mktemp -p "$XDG_RUNTIME_DIR" "grafana.json_XXXXXX") +# ADMINPASS=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d) +# yq '.adminPassword = "'${ADMINPASS}'"' "${GEN3_HOME}/kube/services/monitoring/grafana-values.yaml" --yaml-output > ${TMPGRAFANAVALUES} +# # curl -o grafana-values.yaml https://raw.githubusercontent.com/helm/charts/master/stable/grafana/values.yaml + +# if (! g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1) || [[ "$1" == "--force" ]]; then +# if ( g3kubectl --namespace=grafana get deployment grafana > /dev/null 2>&1); +# then +# delete_grafana +# fi - local HOSTNAME - HOSTNAME=$(gen3 api hostname) +# local HOSTNAME +# HOSTNAME=$(gen3 api hostname) - g3k_kv_filter "${TMPGRAFANAVALUES}" DOMAIN ${HOSTNAME} | gen3 arun helm upgrade --install grafana stable/grafana --namespace grafana -f - - gen3 kube-setup-revproxy - else - echo "Grafana is already installed, use --force to try redeploying" - fi -} +# g3k_kv_filter "${TMPGRAFANAVALUES}" DOMAIN ${HOSTNAME} | gen3 arun helm upgrade --install grafana stable/grafana --namespace grafana -f - +# gen3 kube-setup-revproxy +# else +# echo "Grafana is already installed, use --force to try redeploying" +# fi +# } function deploy_thanos() { if [[ -z $vpc_name ]]; then @@ -145,11 +145,11 @@ case "$command" in prometheus) deploy_prometheus "$@" ;; - grafana) - deploy_grafana "$@" - ;; + # grafana) + # deploy_grafana "$@" + # ;; *) deploy_prometheus "$@" - deploy_grafana "$@" + # deploy_grafana "$@" ;; esac diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 307acaecf..9d60c62cb 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -130,35 +130,32 @@ then fi fi -if [[ $current_namespace == "default" ]]; +if g3kubectl get namespace monitoring > /dev/null 2>&1; then - if g3kubectl get namespace monitoring > /dev/null 2>&1; - then - filePath="$scriptDir/gen3.nginx.conf/prometheus-server.conf" - if [[ -f "$filePath" ]]; then - confFileList+=("--from-file" "$filePath") - fi - fi + filePath="$scriptDir/gen3.nginx.conf/prometheus-server.conf" + if [[ -f "$filePath" ]]; then + confFileList+=("--from-file" "$filePath") + fi fi -#echo "${confFileList[@]}" $BASHPID -if [[ $current_namespace == "default" ]]; then - if g3kubectl get namespace grafana > /dev/null 2>&1; then - for grafana in $(g3kubectl get services -n grafana -o jsonpath='{.items[*].metadata.name}'); - do - filePath="$scriptDir/gen3.nginx.conf/${grafana}.conf" - touch "${XDG_RUNTIME_DIR}/${grafana}.conf" - tmpCredsFile="${XDG_RUNTIME_DIR}/${grafana}.conf" - adminPass=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d) - adminCred=$(echo -n "admin:${adminPass}" | base64 --wrap=0) - sed "s/CREDS/${adminCred}/" ${filePath} > ${tmpCredsFile} - if [[ -f "${tmpCredsFile}" ]]; then - confFileList+=("--from-file" "${tmpCredsFile}") - fi - #rm -f ${tmpCredsFile} - done - fi -fi +# #echo "${confFileList[@]}" $BASHPID +# if [[ $current_namespace == "default" ]]; then +# if g3kubectl get namespace grafana > /dev/null 2>&1; then +# for grafana in $(g3kubectl get services -n grafana -o jsonpath='{.items[*].metadata.name}'); +# do +# filePath="$scriptDir/gen3.nginx.conf/${grafana}.conf" +# touch "${XDG_RUNTIME_DIR}/${grafana}.conf" +# tmpCredsFile="${XDG_RUNTIME_DIR}/${grafana}.conf" +# adminPass=$(g3kubectl get secrets grafana-admin -o json |jq .data.credentials -r |base64 -d) +# adminCred=$(echo -n "admin:${adminPass}" | base64 --wrap=0) +# sed "s/CREDS/${adminCred}/" ${filePath} > ${tmpCredsFile} +# if [[ -f "${tmpCredsFile}" ]]; then +# confFileList+=("--from-file" "${tmpCredsFile}") +# fi +# #rm -f ${tmpCredsFile} +# done +# fi +# fi if g3k_manifest_lookup .global.document_url > /dev/null 2>&1; then documentUrl="$(g3k_manifest_lookup .global.document_url)" diff --git a/kube/services/revproxy/gen3.nginx.conf/grafana.conf b/kube/services/revproxy/gen3.nginx.conf/grafana.conf index 93e50798d..a78b0684f 100644 --- a/kube/services/revproxy/gen3.nginx.conf/grafana.conf +++ b/kube/services/revproxy/gen3.nginx.conf/grafana.conf @@ -1,17 +1,17 @@ - location /grafana/ { - error_page 403 @errorworkspace; - set $authz_resource "/prometheus"; - set $authz_method "access"; - set $authz_service "prometheus"; - # be careful - sub-request runs in same context as this request - auth_request /gen3-authz; + # location /grafana/ { + # error_page 403 @errorworkspace; + # set $authz_resource "/prometheus"; + # set $authz_method "access"; + # set $authz_service "prometheus"; + # # be careful - sub-request runs in same context as this request + # auth_request /gen3-authz; - proxy_set_header Host $host; - proxy_set_header Authorization "Basic CREDS"; + # proxy_set_header Host $host; + # proxy_set_header Authorization "Basic CREDS"; - set $proxy_service "grafana"; - set $upstream http://grafana.grafana.svc.cluster.local; - rewrite ^/grafana/(.*) /$1 break; - proxy_pass $upstream; - #proxy_redirect http://$host/ https://$host/grafana/; - } + # set $proxy_service "grafana"; + # set $upstream http://grafana.grafana.svc.cluster.local; + # rewrite ^/grafana/(.*) /$1 break; + # proxy_pass $upstream; + # #proxy_redirect http://$host/ https://$host/grafana/; + # } From 203909d12331f0665f4b869c8312d789c130de7a Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 14 Feb 2023 14:11:09 -0600 Subject: [PATCH 072/362] PXP-10558 `gen3 kube-setup-pelicanjob` creates Fence client (#2153) --- .secrets.baseline | 2 +- gen3/bin/kube-setup-pelicanjob.sh | 24 +++++++++++++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index baa9cc4fb..2b143a495 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-02-01T22:29:59Z", + "generated_at": "2023-02-03T22:39:02Z", "plugins_used": [ { "name": "AWSKeyDetector" diff --git a/gen3/bin/kube-setup-pelicanjob.sh b/gen3/bin/kube-setup-pelicanjob.sh index 930985cb8..907b9f045 100644 --- a/gen3/bin/kube-setup-pelicanjob.sh +++ b/gen3/bin/kube-setup-pelicanjob.sh @@ -24,14 +24,36 @@ if ! g3kubectl describe secret pelicanservice-g3auto | grep config.json > /dev/n user=$(gen3 secrets decode $awsuser-g3auto awsusercreds.json) key_id=$(jq -r .id <<< $user) access_key=$(jq -r .secret <<< $user) + + # setup fence OIDC client with client_credentials grant for access to MDS API + hostname=$(gen3 api hostname) + gen3_log_info "kube-setup-sower-jobs" "creating fence oidc client for $hostname" + secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client pelican-export-job --grant-types client_credentials | tail -1) + # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + # try delete client + g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client pelican-export-job > /dev/null 2>&1 + secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client pelican-export-job --grant-types client_credentials | tail -1) + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + gen3_log_err "kube-setup-sower-jobs" "Failed generating oidc client: $secrets" + return 1 + fi + fi + pelican_export_client_id="${BASH_REMATCH[2]}" + pelican_export_client_secret="${BASH_REMATCH[3]}" + cat - > "$credsFile" < Date: Wed, 15 Feb 2023 09:32:43 -0600 Subject: [PATCH 073/362] fix(karpenter-scheduling): Updated kubernetes manifests to work with karpenter labels as well as ASG labels (#2149) Co-authored-by: Edward Malinowski --- .secrets.baseline | 4 ++-- kube/services/access-backend/access-backend-deploy.yaml | 9 ++++++++- .../services/ambassador-gen3/ambassador-gen3-deploy.yaml | 9 ++++++++- kube/services/ambassador/ambassador-deploy.yaml | 9 ++++++++- kube/services/ambtest/ambtest-deploy.yaml | 9 ++++++++- kube/services/arborist/arborist-deploy-2.yaml | 9 ++++++++- kube/services/arborist/arborist-deploy.yaml | 9 ++++++++- kube/services/argo-wrapper/argo-wrapper-deploy.yaml | 9 ++++++++- .../arranger-dashboard/arranger-dashboard-deploy.yaml | 9 ++++++++- kube/services/arranger/arranger-deploy.yaml | 9 ++++++++- kube/services/audit-service/audit-service-deploy.yaml | 9 ++++++++- kube/services/auspice/auspice-deploy.yaml | 9 ++++++++- kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml | 7 +++++++ kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml | 9 ++++++++- kube/services/cogwheel/cogwheel-deploy.yaml | 9 ++++++++- .../cohort-middleware/cohort-middleware-deploy.yaml | 9 ++++++++- kube/services/dashboard/dashboard-deploy.yaml | 9 ++++++++- kube/services/datasim/datasim-deploy.yaml | 9 ++++++++- kube/services/dicom-server/dicom-server-deploy.yaml | 9 ++++++++- kube/services/dicom-viewer/dicom-viewer-deploy.yaml | 9 ++++++++- kube/services/fence/fence-canary-deploy.yaml | 9 ++++++++- kube/services/fence/fence-deploy.yaml | 9 ++++++++- kube/services/fenceshib/fenceshib-deploy.yaml | 9 ++++++++- .../frontend-framework/frontend-framework-deploy.yaml | 9 ++++++++- .../frontend-framework-root-deploy.yaml | 9 ++++++++- kube/services/gdcapi/gdcapi-deploy.yaml | 9 ++++++++- .../google-sa-validation-deploy.yaml | 9 ++++++++- kube/services/guppy/guppy-deploy.yaml | 9 ++++++++- kube/services/hatchery/hatchery-deploy.yaml | 9 ++++++++- kube/services/indexd/indexd-canary-deploy.yaml | 9 ++++++++- kube/services/indexd/indexd-deploy.yaml | 9 ++++++++- kube/services/influxdb/influxdb-deployment.yaml | 9 ++++++++- .../jenkins-ci-worker/jenkins-ci-worker-deploy.yaml | 7 ++++++- kube/services/jenkins-worker/jenkins-worker-deploy.yaml | 7 ++++++- kube/services/jenkins/jenkins-deploy.yaml | 5 +++++ .../services/jenkins2-worker/jenkins2-worker-deploy.yaml | 7 ++++++- kube/services/jenkins2/jenkins2-deploy.yaml | 7 ++++++- .../jobs/arborist-rm-expired-access-cronjob.yaml | 9 ++++++++- kube/services/jobs/arborist-rm-expired-access-job.yaml | 9 ++++++++- kube/services/jobs/arboristdb-create-job.yaml | 9 ++++++++- kube/services/jobs/aws-bucket-replicate-job.yaml | 9 ++++++++- kube/services/jobs/bucket-manifest-job.yaml | 9 ++++++++- kube/services/jobs/bucket-replicate-job.yaml | 9 ++++++++- kube/services/jobs/bucket-replication-job.yaml | 9 ++++++++- kube/services/jobs/bucket-size-report-job.yaml | 9 ++++++++- kube/services/jobs/cedar-ingestion-job.yaml | 9 ++++++++- kube/services/jobs/client-modify-job.yaml | 9 ++++++++- kube/services/jobs/cogwheel-register-client-job.yaml | 9 ++++++++- kube/services/jobs/config-fence-job.yaml | 9 ++++++++- kube/services/jobs/covid19-bayes-cronjob.yaml | 9 ++++++++- kube/services/jobs/covid19-bayes-job.yaml | 9 ++++++++- kube/services/jobs/covid19-etl-job.yaml | 9 ++++++++- kube/services/jobs/covid19-notebook-etl-job.yaml | 9 ++++++++- kube/services/jobs/data-ingestion-job.yaml | 9 ++++++++- kube/services/jobs/distribute-licenses-job.yaml | 9 ++++++++- kube/services/jobs/envtest-job.yaml | 9 ++++++++- kube/services/jobs/es-garbage-job.yaml | 9 ++++++++- kube/services/jobs/etl-cronjob.yaml | 9 ++++++++- kube/services/jobs/etl-job.yaml | 9 ++++++++- .../jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml | 9 ++++++++- .../jobs/fence-cleanup-expired-ga4gh-info-job.yaml | 9 ++++++++- kube/services/jobs/fence-db-migrate-job.yaml | 9 ++++++++- kube/services/jobs/fence-delete-expired-clients-job.yaml | 7 +++++++ kube/services/jobs/fence-visa-update-cronjob.yaml | 9 ++++++++- kube/services/jobs/fence-visa-update-job.yaml | 9 ++++++++- kube/services/jobs/fencedb-create-job.yaml | 9 ++++++++- kube/services/jobs/fluentd-restart-job.yaml | 9 ++++++++- kube/services/jobs/gdcdb-create-job.yaml | 9 ++++++++- kube/services/jobs/gen3qa-check-bucket-access-job.yaml | 9 ++++++++- kube/services/jobs/gentestdata-job.yaml | 9 ++++++++- kube/services/jobs/gitops-sync-job.yaml | 9 ++++++++- kube/services/jobs/google-bucket-manifest-job.yaml | 9 ++++++++- kube/services/jobs/google-bucket-replicate-job.yaml | 9 ++++++++- kube/services/jobs/google-create-bucket-job.yaml | 9 ++++++++- .../jobs/google-delete-expired-access-cronjob.yaml | 9 ++++++++- kube/services/jobs/google-delete-expired-access-job.yaml | 9 ++++++++- .../google-delete-expired-service-account-cronjob.yaml | 9 ++++++++- .../jobs/google-delete-expired-service-account-job.yaml | 9 ++++++++- kube/services/jobs/google-init-proxy-groups-cronjob.yaml | 9 ++++++++- kube/services/jobs/google-init-proxy-groups-job.yaml | 9 ++++++++- .../jobs/google-manage-account-access-cronjob.yaml | 9 ++++++++- kube/services/jobs/google-manage-account-access-job.yaml | 9 ++++++++- kube/services/jobs/google-manage-keys-cronjob.yaml | 9 ++++++++- kube/services/jobs/google-manage-keys-job.yaml | 9 ++++++++- .../jobs/google-verify-bucket-access-group-cronjob.yaml | 9 ++++++++- .../jobs/google-verify-bucket-access-group-job.yaml | 9 ++++++++- kube/services/jobs/graph-create-job.yaml | 9 ++++++++- kube/services/jobs/hatchery-metrics-job.yaml | 9 ++++++++- kube/services/jobs/hatchery-reaper-job.yaml | 9 ++++++++- kube/services/jobs/healthcheck-cronjob.yaml | 9 ++++++++- kube/services/jobs/indexd-authz-job.yaml | 9 ++++++++- kube/services/jobs/indexd-userdb-job.yaml | 9 ++++++++- kube/services/jobs/metadata-aggregate-sync-job.yaml | 9 ++++++++- kube/services/jobs/opencost-report-argo-job.yaml | 7 +++++++ kube/services/jobs/psql-fix-job.yaml | 9 ++++++++- kube/services/jobs/remove-objects-from-clouds-job.yaml | 9 ++++++++- kube/services/jobs/replicate-validation-job.yaml | 9 ++++++++- kube/services/jobs/s3sync-cronjob.yaml | 9 ++++++++- kube/services/jobs/usersync-job.yaml | 9 ++++++++- kube/services/jobs/useryaml-job.yaml | 9 ++++++++- kube/services/jupyterhub/jupyterhub-deploy.yaml | 9 ++++++++- kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml | 9 ++++++++- .../services/manifestservice/manifestservice-deploy.yaml | 9 ++++++++- kube/services/mariner/mariner-deploy.yaml | 9 ++++++++- kube/services/metadata/metadata-deploy.yaml | 9 ++++++++- kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml | 9 ++++++++- kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml | 9 ++++++++- kube/services/peregrine/peregrine-canary-deploy.yaml | 9 ++++++++- kube/services/peregrine/peregrine-deploy.yaml | 9 ++++++++- kube/services/pidgin/pidgin-deploy.yaml | 9 ++++++++- kube/services/portal/portal-deploy.yaml | 9 ++++++++- kube/services/portal/portal-root-deploy.yaml | 7 +++++++ .../presigned-url-fence/presigned-url-fence-deploy.yaml | 9 ++++++++- kube/services/qa-dashboard/qa-dashboard-deployment.yaml | 9 ++++++++- kube/services/qabot/qabot-deploy.yaml | 9 ++++++++- kube/services/requestor/requestor-deploy.yaml | 9 ++++++++- kube/services/revproxy/revproxy-deploy.yaml | 9 ++++++++- kube/services/selenium/selenium-hub-deployment.yaml | 9 ++++++++- .../selenium/selenium-node-chrome-deployment.yaml | 9 ++++++++- kube/services/sftp/sftp-deploy.yaml | 9 ++++++++- kube/services/sheepdog/sheepdog-canary-deploy.yaml | 9 ++++++++- kube/services/sheepdog/sheepdog-deploy.yaml | 9 ++++++++- kube/services/shiny/shiny-deploy.yaml | 9 ++++++++- kube/services/sower/sower-deploy.yaml | 9 ++++++++- kube/services/spark/spark-deploy.yaml | 9 ++++++++- kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml | 9 ++++++++- .../services/statsd-exporter/statsd-exporter-deploy.yaml | 9 ++++++++- kube/services/status-api/status-api-deploy.yaml | 9 ++++++++- kube/services/superset/superset-deploy.yaml | 9 ++++++++- kube/services/thor/thor-deploy.yaml | 9 ++++++++- kube/services/tty/tty-deploy.yaml | 9 ++++++++- kube/services/tube/tube-deploy.yaml | 9 ++++++++- kube/services/ws-storage/ws-storage-deploy.yaml | 9 ++++++++- kube/services/wts/wts-deploy.yaml | 9 ++++++++- 134 files changed, 1051 insertions(+), 130 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 2b143a495..9890c38e6 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-02-03T22:39:02Z", + "generated_at": "2023-02-09T21:25:40Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -1139,7 +1139,7 @@ "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", "is_secret": false, "is_verified": false, - "line_number": 80, + "line_number": 87, "type": "Basic Auth Credentials" } ], diff --git a/kube/services/access-backend/access-backend-deploy.yaml b/kube/services/access-backend/access-backend-deploy.yaml index e12a954f7..9f46176d5 100644 --- a/kube/services/access-backend/access-backend-deploy.yaml +++ b/kube/services/access-backend/access-backend-deploy.yaml @@ -40,12 +40,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml b/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml index 2a5ce95ff..c7a49dfd7 100644 --- a/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml +++ b/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml @@ -31,12 +31,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT serviceAccountName: ambassador containers: - name: ambassador diff --git a/kube/services/ambassador/ambassador-deploy.yaml b/kube/services/ambassador/ambassador-deploy.yaml index cd3f52cf1..073f032ac 100644 --- a/kube/services/ambassador/ambassador-deploy.yaml +++ b/kube/services/ambassador/ambassador-deploy.yaml @@ -37,12 +37,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT serviceAccountName: ambassador containers: - name: ambassador diff --git a/kube/services/ambtest/ambtest-deploy.yaml b/kube/services/ambtest/ambtest-deploy.yaml index 00247f7ea..61420ea5d 100644 --- a/kube/services/ambtest/ambtest-deploy.yaml +++ b/kube/services/ambtest/ambtest-deploy.yaml @@ -38,12 +38,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: ambtest-conf diff --git a/kube/services/arborist/arborist-deploy-2.yaml b/kube/services/arborist/arborist-deploy-2.yaml index 8949b60bb..fbd017caa 100644 --- a/kube/services/arborist/arborist-deploy-2.yaml +++ b/kube/services/arborist/arborist-deploy-2.yaml @@ -39,12 +39,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: arborist-secret diff --git a/kube/services/arborist/arborist-deploy.yaml b/kube/services/arborist/arborist-deploy.yaml index 11a7dcda9..5deef6ac7 100644 --- a/kube/services/arborist/arborist-deploy.yaml +++ b/kube/services/arborist/arborist-deploy.yaml @@ -40,12 +40,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: # ----------------------------------------------------------------------------- diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index 9c661e348..67acf0ca7 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -40,12 +40,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: argo-config configMap: diff --git a/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml b/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml index c0dae7370..8707a79d5 100644 --- a/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml +++ b/kube/services/arranger-dashboard/arranger-dashboard-deploy.yaml @@ -34,12 +34,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false containers: - name: arranger-dashboard diff --git a/kube/services/arranger/arranger-deploy.yaml b/kube/services/arranger/arranger-deploy.yaml index 7620ce536..31d715d7c 100644 --- a/kube/services/arranger/arranger-deploy.yaml +++ b/kube/services/arranger/arranger-deploy.yaml @@ -35,12 +35,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: arranger-config diff --git a/kube/services/audit-service/audit-service-deploy.yaml b/kube/services/audit-service/audit-service-deploy.yaml index 97fe415c8..be31e7a4c 100644 --- a/kube/services/audit-service/audit-service-deploy.yaml +++ b/kube/services/audit-service/audit-service-deploy.yaml @@ -44,12 +44,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/auspice/auspice-deploy.yaml b/kube/services/auspice/auspice-deploy.yaml index 6e4b371b7..63eeba922 100644 --- a/kube/services/auspice/auspice-deploy.yaml +++ b/kube/services/auspice/auspice-deploy.yaml @@ -35,12 +35,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false containers: - name: auspice diff --git a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml index 4534c480e..59690b375 100644 --- a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml +++ b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml @@ -26,6 +26,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType diff --git a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml index 8f4b88311..e7f89f8b8 100644 --- a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml +++ b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml @@ -36,12 +36,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/cogwheel/cogwheel-deploy.yaml b/kube/services/cogwheel/cogwheel-deploy.yaml index 0e857015f..c66f4d3b3 100644 --- a/kube/services/cogwheel/cogwheel-deploy.yaml +++ b/kube/services/cogwheel/cogwheel-deploy.yaml @@ -16,12 +16,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: cogwheel-g3auto secret: diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index 96ff1b73b..cb2634424 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -42,12 +42,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: cohort-middleware-config diff --git a/kube/services/dashboard/dashboard-deploy.yaml b/kube/services/dashboard/dashboard-deploy.yaml index e03766304..ebbbdfa11 100644 --- a/kube/services/dashboard/dashboard-deploy.yaml +++ b/kube/services/dashboard/dashboard-deploy.yaml @@ -41,12 +41,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/datasim/datasim-deploy.yaml b/kube/services/datasim/datasim-deploy.yaml index a0e33149f..0f6f21d68 100644 --- a/kube/services/datasim/datasim-deploy.yaml +++ b/kube/services/datasim/datasim-deploy.yaml @@ -34,12 +34,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/dicom-server/dicom-server-deploy.yaml b/kube/services/dicom-server/dicom-server-deploy.yaml index 854cda23b..43bd90e5d 100644 --- a/kube/services/dicom-server/dicom-server-deploy.yaml +++ b/kube/services/dicom-server/dicom-server-deploy.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT volumes: - name: config-volume-g3auto secret: diff --git a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml index ed2af0fec..9df6fbc93 100644 --- a/kube/services/dicom-viewer/dicom-viewer-deploy.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-deploy.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT containers: - name: dicom-viewer GEN3_DICOM-VIEWER_IMAGE diff --git a/kube/services/fence/fence-canary-deploy.yaml b/kube/services/fence/fence-canary-deploy.yaml index 7c41c1f0f..513a1a998 100644 --- a/kube/services/fence/fence-canary-deploy.yaml +++ b/kube/services/fence/fence-canary-deploy.yaml @@ -41,12 +41,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index 72b2b9572..1722676e0 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -47,12 +47,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: # ----------------------------------------------------------------------------- diff --git a/kube/services/fenceshib/fenceshib-deploy.yaml b/kube/services/fenceshib/fenceshib-deploy.yaml index 469d7eb55..0b74bd767 100644 --- a/kube/services/fenceshib/fenceshib-deploy.yaml +++ b/kube/services/fenceshib/fenceshib-deploy.yaml @@ -42,12 +42,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/frontend-framework/frontend-framework-deploy.yaml b/kube/services/frontend-framework/frontend-framework-deploy.yaml index 743d4736c..3a36bfe7a 100644 --- a/kube/services/frontend-framework/frontend-framework-deploy.yaml +++ b/kube/services/frontend-framework/frontend-framework-deploy.yaml @@ -34,12 +34,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml index f5766555c..8cad981c8 100644 --- a/kube/services/frontend-framework/frontend-framework-root-deploy.yaml +++ b/kube/services/frontend-framework/frontend-framework-root-deploy.yaml @@ -34,12 +34,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/gdcapi/gdcapi-deploy.yaml b/kube/services/gdcapi/gdcapi-deploy.yaml index 261b48994..5967663f0 100644 --- a/kube/services/gdcapi/gdcapi-deploy.yaml +++ b/kube/services/gdcapi/gdcapi-deploy.yaml @@ -18,12 +18,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/google-sa-validation/google-sa-validation-deploy.yaml b/kube/services/google-sa-validation/google-sa-validation-deploy.yaml index aa120b2e8..b35fda845 100644 --- a/kube/services/google-sa-validation/google-sa-validation-deploy.yaml +++ b/kube/services/google-sa-validation/google-sa-validation-deploy.yaml @@ -24,12 +24,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml index 666be88a1..55cd17e41 100644 --- a/kube/services/guppy/guppy-deploy.yaml +++ b/kube/services/guppy/guppy-deploy.yaml @@ -39,12 +39,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: guppy-config diff --git a/kube/services/hatchery/hatchery-deploy.yaml b/kube/services/hatchery/hatchery-deploy.yaml index 3bb4ec0a8..f7de81d79 100644 --- a/kube/services/hatchery/hatchery-deploy.yaml +++ b/kube/services/hatchery/hatchery-deploy.yaml @@ -40,12 +40,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: hatchery-service-account securityContext: fsGroup: 1001 diff --git a/kube/services/indexd/indexd-canary-deploy.yaml b/kube/services/indexd/indexd-canary-deploy.yaml index 4ae860da0..7e17ba9af 100644 --- a/kube/services/indexd/indexd-canary-deploy.yaml +++ b/kube/services/indexd/indexd-canary-deploy.yaml @@ -39,12 +39,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/indexd/indexd-deploy.yaml b/kube/services/indexd/indexd-deploy.yaml index afce6a3b5..5ef123b19 100644 --- a/kube/services/indexd/indexd-deploy.yaml +++ b/kube/services/indexd/indexd-deploy.yaml @@ -43,12 +43,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/influxdb/influxdb-deployment.yaml b/kube/services/influxdb/influxdb-deployment.yaml index 36bdbe576..3279e3c55 100644 --- a/kube/services/influxdb/influxdb-deployment.yaml +++ b/kube/services/influxdb/influxdb-deployment.yaml @@ -19,12 +19,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND containers: - image: docker.io/influxdb:1.8.0 imagePullPolicy: IfNotPresent diff --git a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml index 630fc5837..9184cd336 100644 --- a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml +++ b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml @@ -24,7 +24,12 @@ spec: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND + - matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml index 7cb169649..bb0775df2 100644 --- a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml +++ b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml @@ -24,7 +24,12 @@ spec: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND + - matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins/jenkins-deploy.yaml b/kube/services/jenkins/jenkins-deploy.yaml index 5e2f8c154..89be7ec5b 100644 --- a/kube/services/jenkins/jenkins-deploy.yaml +++ b/kube/services/jenkins/jenkins-deploy.yaml @@ -33,6 +33,11 @@ spec: operator: In values: - ONDEMAND + - matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml index c85efcff3..5646e8bc2 100644 --- a/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml +++ b/kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml @@ -24,7 +24,12 @@ spec: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND + - matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jenkins2/jenkins2-deploy.yaml b/kube/services/jenkins2/jenkins2-deploy.yaml index a3c5b2f88..ee838bae6 100644 --- a/kube/services/jenkins2/jenkins2-deploy.yaml +++ b/kube/services/jenkins2/jenkins2-deploy.yaml @@ -32,7 +32,12 @@ spec: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND + - matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand serviceAccountName: jenkins-service securityContext: runAsUser: 1000 diff --git a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml index f99bd4d1c..29603d27f 100644 --- a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml +++ b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml @@ -18,12 +18,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: arborist-secret secret: diff --git a/kube/services/jobs/arborist-rm-expired-access-job.yaml b/kube/services/jobs/arborist-rm-expired-access-job.yaml index bc9625ccc..6985906d0 100644 --- a/kube/services/jobs/arborist-rm-expired-access-job.yaml +++ b/kube/services/jobs/arborist-rm-expired-access-job.yaml @@ -12,12 +12,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false dnsConfig: options: diff --git a/kube/services/jobs/arboristdb-create-job.yaml b/kube/services/jobs/arboristdb-create-job.yaml index d96af6613..7898a0c91 100644 --- a/kube/services/jobs/arboristdb-create-job.yaml +++ b/kube/services/jobs/arboristdb-create-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: arborist-secret diff --git a/kube/services/jobs/aws-bucket-replicate-job.yaml b/kube/services/jobs/aws-bucket-replicate-job.yaml index 4bc15a294..d3893d2bb 100644 --- a/kube/services/jobs/aws-bucket-replicate-job.yaml +++ b/kube/services/jobs/aws-bucket-replicate-job.yaml @@ -14,12 +14,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/bucket-manifest-job.yaml b/kube/services/jobs/bucket-manifest-job.yaml index 24f42b76f..9cfbe054b 100644 --- a/kube/services/jobs/bucket-manifest-job.yaml +++ b/kube/services/jobs/bucket-manifest-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: sa-#SA_NAME_PLACEHOLDER# volumes: - name: cred-volume diff --git a/kube/services/jobs/bucket-replicate-job.yaml b/kube/services/jobs/bucket-replicate-job.yaml index 46a8be51f..0f7ae9260 100644 --- a/kube/services/jobs/bucket-replicate-job.yaml +++ b/kube/services/jobs/bucket-replicate-job.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: batch-operations-account securityContext: fsGroup: 1000 diff --git a/kube/services/jobs/bucket-replication-job.yaml b/kube/services/jobs/bucket-replication-job.yaml index 86a569c94..c8e541d9e 100644 --- a/kube/services/jobs/bucket-replication-job.yaml +++ b/kube/services/jobs/bucket-replication-job.yaml @@ -12,12 +12,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: sa-#SA_NAME_PLACEHOLDER# volumes: - name: cred-volume diff --git a/kube/services/jobs/bucket-size-report-job.yaml b/kube/services/jobs/bucket-size-report-job.yaml index 9a9d0f958..89d927f15 100644 --- a/kube/services/jobs/bucket-size-report-job.yaml +++ b/kube/services/jobs/bucket-size-report-job.yaml @@ -12,12 +12,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND restartPolicy: Never securityContext: fsGroup: 1000 diff --git a/kube/services/jobs/cedar-ingestion-job.yaml b/kube/services/jobs/cedar-ingestion-job.yaml index a43b2937d..ecc83335c 100644 --- a/kube/services/jobs/cedar-ingestion-job.yaml +++ b/kube/services/jobs/cedar-ingestion-job.yaml @@ -29,12 +29,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/client-modify-job.yaml b/kube/services/jobs/client-modify-job.yaml index 4e86709f0..5726092be 100644 --- a/kube/services/jobs/client-modify-job.yaml +++ b/kube/services/jobs/client-modify-job.yaml @@ -15,12 +15,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: yaml-merge configMap: diff --git a/kube/services/jobs/cogwheel-register-client-job.yaml b/kube/services/jobs/cogwheel-register-client-job.yaml index 81c6ff487..1bdbf906d 100644 --- a/kube/services/jobs/cogwheel-register-client-job.yaml +++ b/kube/services/jobs/cogwheel-register-client-job.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: cogwheel-g3auto secret: diff --git a/kube/services/jobs/config-fence-job.yaml b/kube/services/jobs/config-fence-job.yaml index 62ec47053..38be19d61 100644 --- a/kube/services/jobs/config-fence-job.yaml +++ b/kube/services/jobs/config-fence-job.yaml @@ -22,12 +22,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: shared-data diff --git a/kube/services/jobs/covid19-bayes-cronjob.yaml b/kube/services/jobs/covid19-bayes-cronjob.yaml index 53d92b9ca..733c17cf7 100644 --- a/kube/services/jobs/covid19-bayes-cronjob.yaml +++ b/kube/services/jobs/covid19-bayes-cronjob.yaml @@ -20,12 +20,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: s3-access-opencdn-databucket-gen3 restartPolicy: Never nodeSelector: diff --git a/kube/services/jobs/covid19-bayes-job.yaml b/kube/services/jobs/covid19-bayes-job.yaml index 36853a8d6..0afc186b9 100644 --- a/kube/services/jobs/covid19-bayes-job.yaml +++ b/kube/services/jobs/covid19-bayes-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: s3-access-opencdn-databucket-gen3 restartPolicy: Never containers: diff --git a/kube/services/jobs/covid19-etl-job.yaml b/kube/services/jobs/covid19-etl-job.yaml index 84ab52a4e..dd2f6571f 100644 --- a/kube/services/jobs/covid19-etl-job.yaml +++ b/kube/services/jobs/covid19-etl-job.yaml @@ -14,12 +14,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: s3-access-opencdn-databucket-gen3 volumes: - name: cred-volume diff --git a/kube/services/jobs/covid19-notebook-etl-job.yaml b/kube/services/jobs/covid19-notebook-etl-job.yaml index e5045036b..e482c0505 100644 --- a/kube/services/jobs/covid19-notebook-etl-job.yaml +++ b/kube/services/jobs/covid19-notebook-etl-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: s3-access-opencdn-databucket-gen3 volumes: - name: cred-volume diff --git a/kube/services/jobs/data-ingestion-job.yaml b/kube/services/jobs/data-ingestion-job.yaml index 940e1ff08..797b18912 100644 --- a/kube/services/jobs/data-ingestion-job.yaml +++ b/kube/services/jobs/data-ingestion-job.yaml @@ -12,12 +12,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND restartPolicy: Never volumes: - name: shared-data diff --git a/kube/services/jobs/distribute-licenses-job.yaml b/kube/services/jobs/distribute-licenses-job.yaml index 02a5b08ed..8c276f194 100644 --- a/kube/services/jobs/distribute-licenses-job.yaml +++ b/kube/services/jobs/distribute-licenses-job.yaml @@ -23,12 +23,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND restartPolicy: Never serviceAccountName: hatchery-service-account containers: diff --git a/kube/services/jobs/envtest-job.yaml b/kube/services/jobs/envtest-job.yaml index 50923579c..382b725ff 100644 --- a/kube/services/jobs/envtest-job.yaml +++ b/kube/services/jobs/envtest-job.yaml @@ -14,12 +14,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT restartPolicy: Never automountServiceAccountToken: false containers: diff --git a/kube/services/jobs/es-garbage-job.yaml b/kube/services/jobs/es-garbage-job.yaml index 3583d1217..9d5dcf33f 100644 --- a/kube/services/jobs/es-garbage-job.yaml +++ b/kube/services/jobs/es-garbage-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND restartPolicy: Never serviceAccountName: gitops-sa securityContext: diff --git a/kube/services/jobs/etl-cronjob.yaml b/kube/services/jobs/etl-cronjob.yaml index 2b2a00304..c68fc9fd8 100644 --- a/kube/services/jobs/etl-cronjob.yaml +++ b/kube/services/jobs/etl-cronjob.yaml @@ -19,12 +19,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: creds-volume secret: diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml index 43761b7f8..fa201c99a 100644 --- a/kube/services/jobs/etl-job.yaml +++ b/kube/services/jobs/etl-job.yaml @@ -14,12 +14,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: creds-volume secret: diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml index 20358c6a6..74d7fc9a4 100644 --- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml +++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml @@ -20,12 +20,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml index f464b690b..afeaebf72 100644 --- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml +++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fence-db-migrate-job.yaml b/kube/services/jobs/fence-db-migrate-job.yaml index e954ba116..298d61a5d 100644 --- a/kube/services/jobs/fence-db-migrate-job.yaml +++ b/kube/services/jobs/fence-db-migrate-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fence-delete-expired-clients-job.yaml b/kube/services/jobs/fence-delete-expired-clients-job.yaml index 041b5c2b7..9252f6828 100644 --- a/kube/services/jobs/fence-delete-expired-clients-job.yaml +++ b/kube/services/jobs/fence-delete-expired-clients-job.yaml @@ -15,6 +15,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType diff --git a/kube/services/jobs/fence-visa-update-cronjob.yaml b/kube/services/jobs/fence-visa-update-cronjob.yaml index a33b7f2a6..9e8628b26 100644 --- a/kube/services/jobs/fence-visa-update-cronjob.yaml +++ b/kube/services/jobs/fence-visa-update-cronjob.yaml @@ -19,12 +19,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fence-visa-update-job.yaml b/kube/services/jobs/fence-visa-update-job.yaml index b5b125c7a..45342c0d0 100644 --- a/kube/services/jobs/fence-visa-update-job.yaml +++ b/kube/services/jobs/fence-visa-update-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/fencedb-create-job.yaml b/kube/services/jobs/fencedb-create-job.yaml index 71789f257..a99c7aca3 100644 --- a/kube/services/jobs/fencedb-create-job.yaml +++ b/kube/services/jobs/fencedb-create-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: creds-volume diff --git a/kube/services/jobs/fluentd-restart-job.yaml b/kube/services/jobs/fluentd-restart-job.yaml index 1cdf6e2ec..e843d9c68 100644 --- a/kube/services/jobs/fluentd-restart-job.yaml +++ b/kube/services/jobs/fluentd-restart-job.yaml @@ -14,12 +14,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND restartPolicy: Never serviceAccountName: fluentd-restart containers: diff --git a/kube/services/jobs/gdcdb-create-job.yaml b/kube/services/jobs/gdcdb-create-job.yaml index 14234707a..1668429ad 100644 --- a/kube/services/jobs/gdcdb-create-job.yaml +++ b/kube/services/jobs/gdcdb-create-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: creds-volume diff --git a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml index 843b3e3d5..87ebc56be 100644 --- a/kube/services/jobs/gen3qa-check-bucket-access-job.yaml +++ b/kube/services/jobs/gen3qa-check-bucket-access-job.yaml @@ -12,12 +12,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND containers: - name: gen3qa-check-bucket-access GEN3_GEN3_QA_CONTROLLER_IMAGE|-image: quay.io/cdis/gen3-qa-controller:fix_gen3qa_get_check-| diff --git a/kube/services/jobs/gentestdata-job.yaml b/kube/services/jobs/gentestdata-job.yaml index 78e382f44..db2fcd82d 100644 --- a/kube/services/jobs/gentestdata-job.yaml +++ b/kube/services/jobs/gentestdata-job.yaml @@ -38,12 +38,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/gitops-sync-job.yaml b/kube/services/jobs/gitops-sync-job.yaml index a81fd0d6e..664bdf4c1 100644 --- a/kube/services/jobs/gitops-sync-job.yaml +++ b/kube/services/jobs/gitops-sync-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND restartPolicy: Never serviceAccountName: gitops-sa securityContext: diff --git a/kube/services/jobs/google-bucket-manifest-job.yaml b/kube/services/jobs/google-bucket-manifest-job.yaml index 38ed105a3..619c1c03e 100644 --- a/kube/services/jobs/google-bucket-manifest-job.yaml +++ b/kube/services/jobs/google-bucket-manifest-job.yaml @@ -12,12 +12,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/google-bucket-replicate-job.yaml b/kube/services/jobs/google-bucket-replicate-job.yaml index bc6263a26..7e9b2e0a7 100644 --- a/kube/services/jobs/google-bucket-replicate-job.yaml +++ b/kube/services/jobs/google-bucket-replicate-job.yaml @@ -16,12 +16,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/google-create-bucket-job.yaml b/kube/services/jobs/google-create-bucket-job.yaml index 4bc2b41c1..6e3f248a7 100644 --- a/kube/services/jobs/google-create-bucket-job.yaml +++ b/kube/services/jobs/google-create-bucket-job.yaml @@ -51,12 +51,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-delete-expired-access-cronjob.yaml b/kube/services/jobs/google-delete-expired-access-cronjob.yaml index 7132f0379..ce485cce3 100644 --- a/kube/services/jobs/google-delete-expired-access-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-access-cronjob.yaml @@ -20,12 +20,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-delete-expired-access-job.yaml b/kube/services/jobs/google-delete-expired-access-job.yaml index 901e0cab2..c50272254 100644 --- a/kube/services/jobs/google-delete-expired-access-job.yaml +++ b/kube/services/jobs/google-delete-expired-access-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml index 2106fc9d7..eb102f5bf 100644 --- a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-delete-expired-service-account-job.yaml b/kube/services/jobs/google-delete-expired-service-account-job.yaml index 8da478ea4..04c19f9e7 100644 --- a/kube/services/jobs/google-delete-expired-service-account-job.yaml +++ b/kube/services/jobs/google-delete-expired-service-account-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml index 7571e7f12..499d6cabd 100644 --- a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml +++ b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-init-proxy-groups-job.yaml b/kube/services/jobs/google-init-proxy-groups-job.yaml index 0b57da66c..3fa0eb63d 100644 --- a/kube/services/jobs/google-init-proxy-groups-job.yaml +++ b/kube/services/jobs/google-init-proxy-groups-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-manage-account-access-cronjob.yaml b/kube/services/jobs/google-manage-account-access-cronjob.yaml index 0e5e16d44..4e796cea0 100644 --- a/kube/services/jobs/google-manage-account-access-cronjob.yaml +++ b/kube/services/jobs/google-manage-account-access-cronjob.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-manage-account-access-job.yaml b/kube/services/jobs/google-manage-account-access-job.yaml index 624259d4a..d7f6204a0 100644 --- a/kube/services/jobs/google-manage-account-access-job.yaml +++ b/kube/services/jobs/google-manage-account-access-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-manage-keys-cronjob.yaml b/kube/services/jobs/google-manage-keys-cronjob.yaml index 7de185099..ea0bcc45f 100644 --- a/kube/services/jobs/google-manage-keys-cronjob.yaml +++ b/kube/services/jobs/google-manage-keys-cronjob.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-manage-keys-job.yaml b/kube/services/jobs/google-manage-keys-job.yaml index a7454b73b..84c855fb6 100644 --- a/kube/services/jobs/google-manage-keys-job.yaml +++ b/kube/services/jobs/google-manage-keys-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml index 26b290202..57981d813 100644 --- a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml +++ b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/google-verify-bucket-access-group-job.yaml b/kube/services/jobs/google-verify-bucket-access-group-job.yaml index e387ffd59..93eae91dc 100644 --- a/kube/services/jobs/google-verify-bucket-access-group-job.yaml +++ b/kube/services/jobs/google-verify-bucket-access-group-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/graph-create-job.yaml b/kube/services/jobs/graph-create-job.yaml index f1f454e26..f6595cdd2 100644 --- a/kube/services/jobs/graph-create-job.yaml +++ b/kube/services/jobs/graph-create-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: creds-volume diff --git a/kube/services/jobs/hatchery-metrics-job.yaml b/kube/services/jobs/hatchery-metrics-job.yaml index 6dece59a3..26f5ad973 100644 --- a/kube/services/jobs/hatchery-metrics-job.yaml +++ b/kube/services/jobs/hatchery-metrics-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND restartPolicy: Never serviceAccountName: hatchery-service-account securityContext: diff --git a/kube/services/jobs/hatchery-reaper-job.yaml b/kube/services/jobs/hatchery-reaper-job.yaml index 58a65b573..f2b5411fb 100644 --- a/kube/services/jobs/hatchery-reaper-job.yaml +++ b/kube/services/jobs/hatchery-reaper-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND restartPolicy: Never serviceAccountName: hatchery-service-account securityContext: diff --git a/kube/services/jobs/healthcheck-cronjob.yaml b/kube/services/jobs/healthcheck-cronjob.yaml index a9a40598c..d79274bb7 100644 --- a/kube/services/jobs/healthcheck-cronjob.yaml +++ b/kube/services/jobs/healthcheck-cronjob.yaml @@ -19,12 +19,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND restartPolicy: Never serviceAccountName: jenkins-service containers: diff --git a/kube/services/jobs/indexd-authz-job.yaml b/kube/services/jobs/indexd-authz-job.yaml index 41ad4a4b8..8b041740e 100644 --- a/kube/services/jobs/indexd-authz-job.yaml +++ b/kube/services/jobs/indexd-authz-job.yaml @@ -12,12 +12,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/jobs/indexd-userdb-job.yaml b/kube/services/jobs/indexd-userdb-job.yaml index 57ab5677c..676307481 100644 --- a/kube/services/jobs/indexd-userdb-job.yaml +++ b/kube/services/jobs/indexd-userdb-job.yaml @@ -20,12 +20,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml index d88d12295..d62ab0c77 100644 --- a/kube/services/jobs/metadata-aggregate-sync-job.yaml +++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml @@ -12,12 +12,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: config-volume-g3auto secret: diff --git a/kube/services/jobs/opencost-report-argo-job.yaml b/kube/services/jobs/opencost-report-argo-job.yaml index 26fbbae60..788bd1dec 100644 --- a/kube/services/jobs/opencost-report-argo-job.yaml +++ b/kube/services/jobs/opencost-report-argo-job.yaml @@ -34,6 +34,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType diff --git a/kube/services/jobs/psql-fix-job.yaml b/kube/services/jobs/psql-fix-job.yaml index 3e93b77a6..40fa74b96 100644 --- a/kube/services/jobs/psql-fix-job.yaml +++ b/kube/services/jobs/psql-fix-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: jenkins-service containers: - name: fix diff --git a/kube/services/jobs/remove-objects-from-clouds-job.yaml b/kube/services/jobs/remove-objects-from-clouds-job.yaml index 3f1cf6f1b..b839b24e7 100644 --- a/kube/services/jobs/remove-objects-from-clouds-job.yaml +++ b/kube/services/jobs/remove-objects-from-clouds-job.yaml @@ -15,12 +15,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/replicate-validation-job.yaml b/kube/services/jobs/replicate-validation-job.yaml index 28e7bc28e..d64cfcc13 100644 --- a/kube/services/jobs/replicate-validation-job.yaml +++ b/kube/services/jobs/replicate-validation-job.yaml @@ -15,12 +15,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: aws-cred-volume secret: diff --git a/kube/services/jobs/s3sync-cronjob.yaml b/kube/services/jobs/s3sync-cronjob.yaml index 9113b4881..c0fb8196e 100644 --- a/kube/services/jobs/s3sync-cronjob.yaml +++ b/kube/services/jobs/s3sync-cronjob.yaml @@ -25,12 +25,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND volumes: - name: cred-volume secret: diff --git a/kube/services/jobs/usersync-job.yaml b/kube/services/jobs/usersync-job.yaml index aa0718260..058f49bf6 100644 --- a/kube/services/jobs/usersync-job.yaml +++ b/kube/services/jobs/usersync-job.yaml @@ -35,12 +35,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: useryaml-job volumes: - name: yaml-merge diff --git a/kube/services/jobs/useryaml-job.yaml b/kube/services/jobs/useryaml-job.yaml index 49cff4854..5853a05c4 100644 --- a/kube/services/jobs/useryaml-job.yaml +++ b/kube/services/jobs/useryaml-job.yaml @@ -13,12 +13,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/jupyterhub/jupyterhub-deploy.yaml b/kube/services/jupyterhub/jupyterhub-deploy.yaml index 293d1169e..38b2cd41d 100644 --- a/kube/services/jupyterhub/jupyterhub-deploy.yaml +++ b/kube/services/jupyterhub/jupyterhub-deploy.yaml @@ -22,12 +22,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: jupyter-service volumes: - name: config-volume diff --git a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml index a2d0c41f0..9805a8e38 100644 --- a/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml +++ b/kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml @@ -36,12 +36,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/manifestservice/manifestservice-deploy.yaml b/kube/services/manifestservice/manifestservice-deploy.yaml index 3db33dd7d..8cb285f28 100644 --- a/kube/services/manifestservice/manifestservice-deploy.yaml +++ b/kube/services/manifestservice/manifestservice-deploy.yaml @@ -39,12 +39,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/mariner/mariner-deploy.yaml b/kube/services/mariner/mariner-deploy.yaml index c151013b5..ec4b8a0d4 100644 --- a/kube/services/mariner/mariner-deploy.yaml +++ b/kube/services/mariner/mariner-deploy.yaml @@ -49,12 +49,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: true containers: - name: mariner diff --git a/kube/services/metadata/metadata-deploy.yaml b/kube/services/metadata/metadata-deploy.yaml index ca8f268b7..c520577a2 100644 --- a/kube/services/metadata/metadata-deploy.yaml +++ b/kube/services/metadata/metadata-deploy.yaml @@ -41,12 +41,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume-g3auto diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml index b96b100e2..1eef4f92d 100644 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml +++ b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml @@ -35,12 +35,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: ohdsi-atlas-config-local diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 3caf4bb9e..175761c78 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -38,12 +38,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: ohdsi-webapi-reverse-proxy-config diff --git a/kube/services/peregrine/peregrine-canary-deploy.yaml b/kube/services/peregrine/peregrine-canary-deploy.yaml index ce5177ddd..4fffc1557 100644 --- a/kube/services/peregrine/peregrine-canary-deploy.yaml +++ b/kube/services/peregrine/peregrine-canary-deploy.yaml @@ -39,12 +39,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: shared-data diff --git a/kube/services/peregrine/peregrine-deploy.yaml b/kube/services/peregrine/peregrine-deploy.yaml index 1c84be131..3bef06d87 100644 --- a/kube/services/peregrine/peregrine-deploy.yaml +++ b/kube/services/peregrine/peregrine-deploy.yaml @@ -45,12 +45,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: shared-data diff --git a/kube/services/pidgin/pidgin-deploy.yaml b/kube/services/pidgin/pidgin-deploy.yaml index f50cf167e..eef4856c1 100644 --- a/kube/services/pidgin/pidgin-deploy.yaml +++ b/kube/services/pidgin/pidgin-deploy.yaml @@ -39,12 +39,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: cert-volume diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml index 408c826ab..9ad6e1c6e 100644 --- a/kube/services/portal/portal-deploy.yaml +++ b/kube/services/portal/portal-deploy.yaml @@ -35,12 +35,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: ca-volume diff --git a/kube/services/portal/portal-root-deploy.yaml b/kube/services/portal/portal-root-deploy.yaml index 867133b9e..948744576 100644 --- a/kube/services/portal/portal-root-deploy.yaml +++ b/kube/services/portal/portal-root-deploy.yaml @@ -35,6 +35,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType diff --git a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml index 457452490..44e951f26 100644 --- a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml +++ b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml @@ -47,12 +47,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: yaml-merge diff --git a/kube/services/qa-dashboard/qa-dashboard-deployment.yaml b/kube/services/qa-dashboard/qa-dashboard-deployment.yaml index 3bbd17b99..c701cd90d 100644 --- a/kube/services/qa-dashboard/qa-dashboard-deployment.yaml +++ b/kube/services/qa-dashboard/qa-dashboard-deployment.yaml @@ -23,12 +23,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT containers: - name: qa-metrics image: "quay.io/cdis/qa-metrics:latest" diff --git a/kube/services/qabot/qabot-deploy.yaml b/kube/services/qabot/qabot-deploy.yaml index c2f9e208c..c788180c3 100644 --- a/kube/services/qabot/qabot-deploy.yaml +++ b/kube/services/qabot/qabot-deploy.yaml @@ -23,12 +23,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND containers: - name: qabot image: "quay.io/cdis/qa-bot:latest" diff --git a/kube/services/requestor/requestor-deploy.yaml b/kube/services/requestor/requestor-deploy.yaml index 3561019e9..1485190f9 100644 --- a/kube/services/requestor/requestor-deploy.yaml +++ b/kube/services/requestor/requestor-deploy.yaml @@ -41,12 +41,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index d8cfe9f41..ad00e225c 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -38,12 +38,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: revproxy-conf diff --git a/kube/services/selenium/selenium-hub-deployment.yaml b/kube/services/selenium/selenium-hub-deployment.yaml index 5c1ba3aa1..14f83fe48 100644 --- a/kube/services/selenium/selenium-hub-deployment.yaml +++ b/kube/services/selenium/selenium-hub-deployment.yaml @@ -21,12 +21,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND containers: - env: - name: GRID_MAX_SESSION diff --git a/kube/services/selenium/selenium-node-chrome-deployment.yaml b/kube/services/selenium/selenium-node-chrome-deployment.yaml index 340f87ac1..d6b35f471 100644 --- a/kube/services/selenium/selenium-node-chrome-deployment.yaml +++ b/kube/services/selenium/selenium-node-chrome-deployment.yaml @@ -26,12 +26,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND containers: - env: - name: SE_EVENT_BUS_HOST diff --git a/kube/services/sftp/sftp-deploy.yaml b/kube/services/sftp/sftp-deploy.yaml index bbb619341..3783c7871 100644 --- a/kube/services/sftp/sftp-deploy.yaml +++ b/kube/services/sftp/sftp-deploy.yaml @@ -19,12 +19,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: - name: sftp-secret diff --git a/kube/services/sheepdog/sheepdog-canary-deploy.yaml b/kube/services/sheepdog/sheepdog-canary-deploy.yaml index 23a3c9d6a..f80f73c63 100644 --- a/kube/services/sheepdog/sheepdog-canary-deploy.yaml +++ b/kube/services/sheepdog/sheepdog-canary-deploy.yaml @@ -38,12 +38,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/sheepdog/sheepdog-deploy.yaml b/kube/services/sheepdog/sheepdog-deploy.yaml index 1b579207b..f3df8cecd 100644 --- a/kube/services/sheepdog/sheepdog-deploy.yaml +++ b/kube/services/sheepdog/sheepdog-deploy.yaml @@ -43,12 +43,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/kube/services/shiny/shiny-deploy.yaml b/kube/services/shiny/shiny-deploy.yaml index 48d53e87f..55d795315 100644 --- a/kube/services/shiny/shiny-deploy.yaml +++ b/kube/services/shiny/shiny-deploy.yaml @@ -24,12 +24,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT volumes: - name: config-volume secret: diff --git a/kube/services/sower/sower-deploy.yaml b/kube/services/sower/sower-deploy.yaml index 0bc582552..4d7a1c93b 100644 --- a/kube/services/sower/sower-deploy.yaml +++ b/kube/services/sower/sower-deploy.yaml @@ -38,12 +38,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: sower-service-account volumes: - name: sower-config diff --git a/kube/services/spark/spark-deploy.yaml b/kube/services/spark/spark-deploy.yaml index da0349f41..00487eb0f 100644 --- a/kube/services/spark/spark-deploy.yaml +++ b/kube/services/spark/spark-deploy.yaml @@ -37,12 +37,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false volumes: containers: diff --git a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml index 4d0b70ab8..3b7bb1de9 100644 --- a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml +++ b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml @@ -41,12 +41,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: true volumes: - name: ssjdispatcher-creds-volume diff --git a/kube/services/statsd-exporter/statsd-exporter-deploy.yaml b/kube/services/statsd-exporter/statsd-exporter-deploy.yaml index f39a167f3..9a1048f20 100644 --- a/kube/services/statsd-exporter/statsd-exporter-deploy.yaml +++ b/kube/services/statsd-exporter/statsd-exporter-deploy.yaml @@ -26,12 +26,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND containers: - name: "statsd-exporter" GEN3_STATSD-EXPORTER_IMAGE|-image: prom/statsd-exporter:v0.15.0-| diff --git a/kube/services/status-api/status-api-deploy.yaml b/kube/services/status-api/status-api-deploy.yaml index 763d06a8b..c4bf542ab 100644 --- a/kube/services/status-api/status-api-deploy.yaml +++ b/kube/services/status-api/status-api-deploy.yaml @@ -23,12 +23,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND containers: - name: status-api image: "quay.io/cdis/status-dashboard:latest" diff --git a/kube/services/superset/superset-deploy.yaml b/kube/services/superset/superset-deploy.yaml index 473e3c188..1312e7ea2 100644 --- a/kube/services/superset/superset-deploy.yaml +++ b/kube/services/superset/superset-deploy.yaml @@ -239,12 +239,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND securityContext: runAsUser: 0 initContainers: diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml index b531389d5..419dd561e 100644 --- a/kube/services/thor/thor-deploy.yaml +++ b/kube/services/thor/thor-deploy.yaml @@ -34,12 +34,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND automountServiceAccountToken: false containers: - name: thor diff --git a/kube/services/tty/tty-deploy.yaml b/kube/services/tty/tty-deploy.yaml index c8b8386a9..138ee836b 100644 --- a/kube/services/tty/tty-deploy.yaml +++ b/kube/services/tty/tty-deploy.yaml @@ -38,12 +38,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - ONDEMAND serviceAccountName: tty-sa securityContext: fsGroup: 1000 diff --git a/kube/services/tube/tube-deploy.yaml b/kube/services/tube/tube-deploy.yaml index dd357a92f..6c6feae0f 100644 --- a/kube/services/tube/tube-deploy.yaml +++ b/kube/services/tube/tube-deploy.yaml @@ -38,12 +38,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT automountServiceAccountToken: false volumes: - name: creds-volume diff --git a/kube/services/ws-storage/ws-storage-deploy.yaml b/kube/services/ws-storage/ws-storage-deploy.yaml index 48a03be26..f033e39a1 100644 --- a/kube/services/ws-storage/ws-storage-deploy.yaml +++ b/kube/services/ws-storage/ws-storage-deploy.yaml @@ -47,12 +47,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT volumes: - name: config-volume secret: diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml index ef950921d..dd24f8808 100644 --- a/kube/services/wts/wts-deploy.yaml +++ b/kube/services/wts/wts-deploy.yaml @@ -45,12 +45,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - SPOT + - SPOT terminationGracePeriodSeconds: 10 volumes: - name: wts-secret From c11f35d07d67a38c1b9fedf51598628428c2ae33 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Wed, 15 Feb 2023 10:16:23 -0600 Subject: [PATCH 074/362] Add connection pool to argo (#2154) --- kube/services/argo/values.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index bf407d651..8ada3dd17 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -6,6 +6,10 @@ controller: # -- enable persistence using postgres persistence: + connectionPool: + maxIdleConns: 100 + maxOpenConns: 0 + connMaxLifetime: 300s archive: true archiveLabelSelector: matchLabels: From c5271325a3182ca60da951ea949fd7dba3bb3505 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Wed, 15 Feb 2023 15:36:35 -0600 Subject: [PATCH 075/362] Update ingress.yaml (#2155) --- kube/services/ingress/ingress.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml index 6c9de7f56..9352005d7 100644 --- a/kube/services/ingress/ingress.yaml +++ b/kube/services/ingress/ingress.yaml @@ -9,6 +9,7 @@ metadata: alb.ingress.kubernetes.io/certificate-arn: $ARN alb.ingress.kubernetes.io/group.name: "$vpc_name" alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' + alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=600 alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' spec: ingressClassName: alb @@ -22,4 +23,4 @@ spec: service: name: revproxy-service port: - number: 80 \ No newline at end of file + number: 80 From 295f0ae6359724d6ecefda9bf39894153c550f53 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 20 Feb 2023 11:54:01 -0600 Subject: [PATCH 076/362] Add additional functionality to hatchery-reaper (#1920) Co-authored-by: emalinowski --- kube/services/jobs/hatchery-reaper-job.yaml | 42 ++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) diff --git a/kube/services/jobs/hatchery-reaper-job.yaml b/kube/services/jobs/hatchery-reaper-job.yaml index f2b5411fb..ec615c0d7 100644 --- a/kube/services/jobs/hatchery-reaper-job.yaml +++ b/kube/services/jobs/hatchery-reaper-job.yaml @@ -58,7 +58,47 @@ spec: - | export GEN3_HOME="$HOME/cloud-automation" source "$GEN3_HOME/gen3/gen3setup.sh" + # 60 minute idle timeout max + limit=3600 + remote_users=$(kubectl get svc -n jupyter-pods -o json | jq -r . | jq -r '.items[].metadata.annotations."getambassador.io/config"' | yq -r .headers.remote_user) + + # helper function to construct service name + function escape() { + string="$1" + shift + safeBytes="abcdefghijklmnopqrstuvwxyz0123456789" + retString="" + while read -n 1 char ; do + if [[ $safeBytes == *"$char"* ]]; then + retString+=$char + else + hex=$(printf "%02x" "'${char}'") + retString+="-"$hex + fi + done <<< "$string" + echo $retString + } + + for user in $remote_users; do + echo $user + status=$(curl -s -H "REMOTE_USER: $user" hatchery-service/status | jq -r .status) + if [[ $status == "Running" ]]; then + echo "$user has workspace that is $status" + serviceName=h-$(escape $user)-s + service=$(kubectl get svc -n jupyter-pods $serviceName -o json | jq -r '.metadata.annotations."getambassador.io/config"' | yq -r .service) + last_activity=$(curl -s -H "REMOTE_USER: $user" $service/lw-workspace/proxy/api/status | jq -r .last_activity ) + now=$(date +%s) + delta=$(expr $now - $(date -d "$last_activity" +%s)) + echo Workspace for $user has been idle for $delta seconds + if [ "$delta" -gt "$limit" ]; then + echo "Workspace for $user has been running for $delta seconds, which is higher than the $limit... Terminating" + curl -XPOST -s -H "REMOTE_USER: $user" hatchery-service/terminate + fi + fi + done + + # legacy reaper code if appList="$(gen3 jupyter idle none "$(gen3 db namespace)" kill)" && [[ -n "$appList" && -n "$slackWebHook" && "$slackWebHook" != "None" ]]; then curl -X POST --data-urlencode "payload={\"text\": \"hatchery-reaper in $gen3Hostname: \n\`\`\`\n${appList}\n\`\`\`\"}" "${slackWebHook}" fi - echo "All Done!" + echo "All Done!" \ No newline at end of file From cca7f06a5bde53a25d93228c330c964a09a292df Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 20 Feb 2023 17:34:53 -0500 Subject: [PATCH 077/362] Feat/argo workflow metrics (#2151) * Added annotations to get argo controller to send metrics to Datadog * Added annotations and settings needed to get the Argo controller to export metrics to Datadog * Added options to make the Argo workflow controller export metrics to Datadog. * Added changes to deploy Argo and Datadog so that Argo exports Prometheus metrics that Datadog can scrape. For now, we're going to leave this as a separate branch while we figure out the cost-benefit on exporting these metrics, and if it makes sense to do this by default. * Added annotations to allow Datadog to import Prometheus metrics emitted by the Argo workflow controller. * Added a version pin to kube-setup-argo so running it again will not unexpectedly upgrade an Argo workflows deployment --------- Co-authored-by: J. Q <55899496+jawadqur@users.noreply.github.com> --- gen3/bin/kube-setup-argo.sh | 2 +- kube/services/argo/values.yaml | 22 +++++++++++++++++++++- kube/services/datadog/values.yaml | 4 ++-- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index 881638808..beba520aa 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -243,7 +243,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} + helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version 0.22.11 else gen3_log_info "kube-setup-argo exiting - argo already deployed, use --force to redeploy" fi diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 8ada3dd17..329b058d7 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -2,7 +2,27 @@ controller: parallelism: 10 metricsConfig: # -- Enables prometheus metrics server - enabled: false + enabled: true + servicePort: 9090 + + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "9090" + + ad.datadoghq.com/controller.checks: | + { + "openmetrics": { + "init_config": {}, + "instances": [ + { + "openmetrics_endpoint": "http://%%host%%:%%port%%/metrics ", + "namespace": "argo", + "metrics": ["*"] + } + ] + } + } # -- enable persistence using postgres persistence: diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 95ec57239..0ea0fd573 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -193,9 +193,9 @@ datadog: ## ref: https://docs.datadoghq.com/agent/kubernetes/prometheus/ prometheusScrape: # datadog.prometheusScrape.enabled -- Enable autodiscovering pods and services exposing prometheus metrics. - enabled: false + enabled: true # datadog.prometheusScrape.serviceEndpoints -- Enable generating dedicated checks for service endpoints. - serviceEndpoints: false + serviceEndpoints: true # datadog.prometheusScrape.additionalConfigs -- Allows adding advanced openmetrics check configurations with custom discovery rules. (Requires Agent version 7.27+) additionalConfigs: [] # - From ef86fafb805bf8f11a8a24f6646bccd94c2674d6 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Tue, 21 Feb 2023 14:39:02 -0600 Subject: [PATCH 078/362] Update kube-setup-karpenter.sh --- gen3/bin/kube-setup-karpenter.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 925eebaea..f22d7824a 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -1,6 +1,6 @@ #!/bin/bash -set -e +#set -e source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" From 7daedb1316d496d102d34aeeadb641a848bb3d58 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Thu, 23 Feb 2023 10:13:55 -0600 Subject: [PATCH 079/362] Use jenkins-niaid for service PRs (#2162) --- files/scripts/ci-env-pool-reset.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index a142fd7c2..115774a78 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -29,11 +29,11 @@ source "${GEN3_HOME}/gen3/gen3setup.sh" cat - > jenkins-envs-services.txt < jenkins-envs-releases.txt < Date: Fri, 24 Feb 2023 07:50:48 -0800 Subject: [PATCH 080/362] change resources for portal-deploy (#2165) * change resources for portal-deploy * more resources * go back * increase resources on portal-root-deploy --- kube/services/portal/portal-deploy.yaml | 6 +++--- kube/services/portal/portal-root-deploy.yaml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml index 9ad6e1c6e..0b53a3bad 100644 --- a/kube/services/portal/portal-deploy.yaml +++ b/kube/services/portal/portal-deploy.yaml @@ -87,11 +87,11 @@ spec: failureThreshold: 30 resources: requests: - cpu: 0.6 - memory: 512Mi + cpu: 1 + memory: 1Gi limits: # portal pigs out on resources at startup, then settles down - cpu: 2 + cpu: 4 memory: 4096Mi ports: - containerPort: 80 diff --git a/kube/services/portal/portal-root-deploy.yaml b/kube/services/portal/portal-root-deploy.yaml index 948744576..92b531615 100644 --- a/kube/services/portal/portal-root-deploy.yaml +++ b/kube/services/portal/portal-root-deploy.yaml @@ -87,11 +87,11 @@ spec: failureThreshold: 10 resources: requests: - cpu: 0.6 - memory: 512Mi + cpu: 1 + memory: 1Gi limits: # portal pigs out on resources at startup, then settles down - cpu: 2 + cpu: 4 memory: 4096Mi ports: - containerPort: 80 From c3405c67bdb12290609e9c09c58a25b73dbdbd41 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 28 Feb 2023 09:30:15 -0700 Subject: [PATCH 081/362] removing the system-probe container for datadog due to container crash with fips (please refer to GPE-783 for more information. (#2166) --- kube/services/datadog/values.yaml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 0ea0fd573..75be6c281 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -13,7 +13,7 @@ datadog: #Enables Optional Universal Service Monitoring ## ref: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=helm serviceMonitoring: - enabled: true + enabled: false # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. ## If set, this parameter takes precedence over "apiKey". @@ -89,6 +89,13 @@ datadog: ## Enable systemProbe agent and provide custom configs systemProbe: + resources: + requests: + cpu: 100m + memory: 200Mi + limits: + cpu: 100m + memory: 200Mi # datadog.systemProbe.debugPort -- Specify the port to expose pprof and expvar for system-probe agent debugPort: 0 @@ -161,7 +168,7 @@ datadog: networkMonitoring: # datadog.networkMonitoring.enabled -- Enable network performance monitoring - enabled: true + enabled: false ## Enable security agent and provide custom configs From 862a432e88995762b166e1258ae2ba0c0d8d4603 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 1 Mar 2023 08:51:27 -0700 Subject: [PATCH 082/362] reducing the disk size for Prometheus PVC (#2167) --- kube/services/monitoring/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml index e033d8801..ffdf92bd9 100644 --- a/kube/services/monitoring/values.yaml +++ b/kube/services/monitoring/values.yaml @@ -2595,7 +2595,7 @@ prometheus: accessModes: ["ReadWriteOnce"] resources: requests: - storage: 500Gi + storage: 80Gi #selector: {} ## Using tmpfs volume From 54845a3c30583523d395b75bf017f608c013c23d Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Thu, 2 Mar 2023 06:56:59 -0800 Subject: [PATCH 083/362] Remove limits in portal kube config (#2170) * Remove limits in portal kube config * Update portal-root-deploy.yaml --- kube/services/portal/portal-deploy.yaml | 4 ---- kube/services/portal/portal-root-deploy.yaml | 4 ---- 2 files changed, 8 deletions(-) diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml index 0b53a3bad..8087efbdc 100644 --- a/kube/services/portal/portal-deploy.yaml +++ b/kube/services/portal/portal-deploy.yaml @@ -89,10 +89,6 @@ spec: requests: cpu: 1 memory: 1Gi - limits: - # portal pigs out on resources at startup, then settles down - cpu: 4 - memory: 4096Mi ports: - containerPort: 80 - containerPort: 443 diff --git a/kube/services/portal/portal-root-deploy.yaml b/kube/services/portal/portal-root-deploy.yaml index 92b531615..4d67268bb 100644 --- a/kube/services/portal/portal-root-deploy.yaml +++ b/kube/services/portal/portal-root-deploy.yaml @@ -89,10 +89,6 @@ spec: requests: cpu: 1 memory: 1Gi - limits: - # portal pigs out on resources at startup, then settles down - cpu: 4 - memory: 4096Mi ports: - containerPort: 80 - containerPort: 443 From 66040449c83d8b5fb1a0f3035a8f85e237312704 Mon Sep 17 00:00:00 2001 From: regular bill Date: Fri, 3 Mar 2023 13:20:19 -0600 Subject: [PATCH 084/362] cis paly book 03/03/2023 --- ansible/playbooks/ciis-ubuntu.yaml | 37 ++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 ansible/playbooks/ciis-ubuntu.yaml diff --git a/ansible/playbooks/ciis-ubuntu.yaml b/ansible/playbooks/ciis-ubuntu.yaml new file mode 100644 index 000000000..804e1a3da --- /dev/null +++ b/ansible/playbooks/ciis-ubuntu.yaml @@ -0,0 +1,37 @@ +--- +- name: Apply CIS Ubuntu Benchmark + hosts: ec2-ubuntu-host + gather_facts: yes + tasks: + - name: 1.1.1 Ensure mounting of cramfs filesystems is disabled + mount: + name: cramfs + fstype: cramfs + state: unmounted + enabled: no + + - name: 1.1.2 Ensure mounting of freevxfs filesystems is disabled + mount: + name: freevxfs + fstype: freevxfs + state: unmounted + enabled: no + + # Add tasks for other recommendations in the benchmark + + - name: 5.1.8 Ensure at/cron is restricted to authorized users + file: + path: /etc/cron.deny + state: absent + become: yes + become_method: sudo + + - name: 5.2.1 Ensure permissions on /etc/crontab are configured + file: + path: /etc/crontab + mode: '0600' + owner: root + group: root + become: yes + become_method: sudo + From 25c7c0efd12d7b87f8318efcaf74c2e8fe7da756 Mon Sep 17 00:00:00 2001 From: regular bill Date: Fri, 3 Mar 2023 13:26:41 -0600 Subject: [PATCH 085/362] cis paly book 03/03/2023 --- ansible/playbooks/{ciis-ubuntu.yaml => cis-ubuntu.yaml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename ansible/playbooks/{ciis-ubuntu.yaml => cis-ubuntu.yaml} (95%) diff --git a/ansible/playbooks/ciis-ubuntu.yaml b/ansible/playbooks/cis-ubuntu.yaml similarity index 95% rename from ansible/playbooks/ciis-ubuntu.yaml rename to ansible/playbooks/cis-ubuntu.yaml index 804e1a3da..ffc7b9c35 100644 --- a/ansible/playbooks/ciis-ubuntu.yaml +++ b/ansible/playbooks/cis-ubuntu.yaml @@ -1,5 +1,5 @@ --- -- name: Apply CIS Ubuntu Benchmark +- name: Apply CIS Linux 2 Benchmark hosts: ec2-ubuntu-host gather_facts: yes tasks: From 0a780498768a8c89a47ad025a6095c1dee0372e8 Mon Sep 17 00:00:00 2001 From: regular bill Date: Fri, 3 Mar 2023 13:30:04 -0600 Subject: [PATCH 086/362] cis paly book 03/03/2023:01 --- ansible/playbooks/cis-ubuntu.yaml | 37 ------------------------------- 1 file changed, 37 deletions(-) delete mode 100644 ansible/playbooks/cis-ubuntu.yaml diff --git a/ansible/playbooks/cis-ubuntu.yaml b/ansible/playbooks/cis-ubuntu.yaml deleted file mode 100644 index ffc7b9c35..000000000 --- a/ansible/playbooks/cis-ubuntu.yaml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: Apply CIS Linux 2 Benchmark - hosts: ec2-ubuntu-host - gather_facts: yes - tasks: - - name: 1.1.1 Ensure mounting of cramfs filesystems is disabled - mount: - name: cramfs - fstype: cramfs - state: unmounted - enabled: no - - - name: 1.1.2 Ensure mounting of freevxfs filesystems is disabled - mount: - name: freevxfs - fstype: freevxfs - state: unmounted - enabled: no - - # Add tasks for other recommendations in the benchmark - - - name: 5.1.8 Ensure at/cron is restricted to authorized users - file: - path: /etc/cron.deny - state: absent - become: yes - become_method: sudo - - - name: 5.2.1 Ensure permissions on /etc/crontab are configured - file: - path: /etc/crontab - mode: '0600' - owner: root - group: root - become: yes - become_method: sudo - From 8d086871f33670244b7db2ad20f04715a83c58d0 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 3 Mar 2023 15:08:32 -0600 Subject: [PATCH 087/362] chore(datadog-exclusions): Excluded some namespaces/containers from being ingested (#2172) Co-authored-by: Edward Malinowski --- kube/services/datadog/values.yaml | 1 + kube/services/jobs/usersync-job.yaml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 75be6c281..6df70d0ec 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -218,6 +218,7 @@ datadog: # - send_distribution_buckets: true # timeout: 5 + containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:usersync" ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements diff --git a/kube/services/jobs/usersync-job.yaml b/kube/services/jobs/usersync-job.yaml index 058f49bf6..384f68b0d 100644 --- a/kube/services/jobs/usersync-job.yaml +++ b/kube/services/jobs/usersync-job.yaml @@ -92,7 +92,7 @@ spec: configMap: name: "projects" containers: - - name: fence + - name: usersync GEN3_FENCE_IMAGE imagePullPolicy: Always env: From fae613eafede229f0166a194e4bbb67824846091 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 3 Mar 2023 15:08:55 -0600 Subject: [PATCH 088/362] chore(karpenter-fluentd): Force karpenter deployments to deploy new fluentd configuration and hardcode image to prevent old cdis-manifest repos from breaking fluentd version (#2171) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-karpenter.sh | 3 ++- kube/services/fluentd/fluentd-karpenter.yaml | 3 ++- kube/services/fluentd/fluentd.yaml | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index f22d7824a..5a4faa7de 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -104,7 +104,8 @@ gen3_deploy_karpenter() { gen3_log_info "Remove cluster-autoscaler" gen3 kube-setup-autoscaler --remove - + # Ensure that fluentd is updated if karpenter is deployed to prevent containerd logging issues + gen3 kube-setup-fluentd gen3_log_info "Adding node templates for karpenter" g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateDefault.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateJupyter.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - diff --git a/kube/services/fluentd/fluentd-karpenter.yaml b/kube/services/fluentd/fluentd-karpenter.yaml index 8949a734f..807ef1198 100644 --- a/kube/services/fluentd/fluentd-karpenter.yaml +++ b/kube/services/fluentd/fluentd-karpenter.yaml @@ -43,7 +43,8 @@ spec: effect: "NoSchedule" containers: - name: fluentd - GEN3_FLUENTD_IMAGE + # Hardcode fluentd version to ensure we don't run into containerd logging issues + image: fluent/fluentd-kubernetes-daemonset:v1.15.3-debian-cloudwatch-1.0 env: # See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes - name: K8S_NODE_NAME diff --git a/kube/services/fluentd/fluentd.yaml b/kube/services/fluentd/fluentd.yaml index dc2bbf05b..112a0cab2 100644 --- a/kube/services/fluentd/fluentd.yaml +++ b/kube/services/fluentd/fluentd.yaml @@ -43,7 +43,8 @@ spec: effect: "NoSchedule" containers: - name: fluentd - GEN3_FLUENTD_IMAGE + # Hardcode fluentd version to match karpenter daemonset + image: fluent/fluentd-kubernetes-daemonset:v1.15.3-debian-cloudwatch-1.0 env: # See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes - name: K8S_NODE_NAME From 42cd914a9c405360001458d0282efffc4c7cffe3 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Mon, 6 Mar 2023 07:06:33 -0600 Subject: [PATCH 089/362] remove google-cloud-sdk (#2174) --- Docker/awshelper/Dockerfile | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile index d85d23082..231870670 100644 --- a/Docker/awshelper/Dockerfile +++ b/Docker/awshelper/Dockerfile @@ -56,15 +56,9 @@ RUN export CLOUD_SDK_REPO="cloud-sdk" && \ curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ curl -sL https://deb.nodesource.com/setup_14.x | bash - && \ apt-get update && \ - apt-get install -y google-cloud-sdk \ - google-cloud-sdk-cbt \ - kubectl && \ + apt-get install -y kubectl && \ apt-get install -y --no-install-recommends nodejs && \ - rm -rf /var/lib/apt/lists/* \ - gcloud config set core/disable_usage_reporting true && \ - gcloud config set component_manager/disable_update_check true && \ - gcloud config set metrics/environment github_docker_image && \ - gcloud --version && \ + rm -rf /var/lib/apt/lists/* && \ kubectl version --client && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* /var/log/* From d6edf267a2d6291c4a1868c0d7ec847a8d4a5a60 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 7 Mar 2023 16:55:59 -0600 Subject: [PATCH 090/362] Whitelist biorender.com => marketing.biorender.com (#2177) --- files/squid_whitelist/web_whitelist | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 3f009534f..fe0fcac8d 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -8,7 +8,7 @@ apache.github.io api.epigraphdb.org api.monqcle.com biodata-integration-tests.net -biorender.com +marketing.biorender.com clinicaltrials.gov ctds-planx.atlassian.net data.cityofchicago.org From c991d34d84a0c91607601f3a253547de27545e4d Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Wed, 8 Mar 2023 16:40:28 -0600 Subject: [PATCH 091/362] Fix/cedar typo (#2161) * fix typo * fix typo * fix typo --- files/scripts/healdata/heal-cedar-data-ingest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 586b43249..f571f33e4 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -25,7 +25,8 @@ "Trans Female": "Male-to-female transsexual", "Agender, Non-binary, gender non-conforming": "Other", "Gender Queer": "Other", - "Intersex": "Intersexed" + "Intersex": "Intersexed", + "Buisness Development": "Business Development" } # Defines field that we don't want to include in the filters From e3dc6ee7c211f50ef201e23f277557202fd467f3 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 13 Mar 2023 15:26:44 -0400 Subject: [PATCH 092/362] Feat/argo workflow metrics (#2168) * Added annotations to get argo controller to send metrics to Datadog * Added annotations and settings needed to get the Argo controller to export metrics to Datadog * Added options to make the Argo workflow controller export metrics to Datadog. * Added changes to deploy Argo and Datadog so that Argo exports Prometheus metrics that Datadog can scrape. For now, we're going to leave this as a separate branch while we figure out the cost-benefit on exporting these metrics, and if it makes sense to do this by default. * Added annotations to allow Datadog to import Prometheus metrics emitted by the Argo workflow controller. * Added a version pin to kube-setup-argo so running it again will not unexpectedly upgrade an Argo workflows deployment * Added the ability for kube-setup-argo to read a desired argo version from the Argo manifest, and fixed a bug in g3k_config_lookup * Update kube-setup-argo.sh * Update kube-setup-argo.sh * Update kube-setup-argo.sh * Update kube-setup-argo.sh * Update kube-setup-argo.sh * Update kube-setup-argo.sh --- gen3/bin/kube-setup-argo.sh | 21 ++++++++++++++++----- gen3/lib/g3k_manifest.sh | 16 ++++++++++++++-- 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index beba520aa..c554b9892 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -16,7 +16,6 @@ function setup_argo_buckets { local policyFile="$XDG_RUNTIME_DIR/policy_$$.json" local bucketLifecyclePolicyFile="$XDG_RUNTIME_DIR/bucket_lifecycle_policy_$$.json" - if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then gen3_log_err "could not determine account numer" return 1 @@ -29,6 +28,7 @@ function setup_argo_buckets { # try to come up with a unique but composable bucket name bucketName="gen3-argo-${accountNumber}-${environment//_/-}" userName="gen3-argo-${environment//_/-}-user" + if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."s3-bucket"') ]]; then if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" @@ -38,7 +38,8 @@ function setup_argo_buckets { bucketName=$(g3k_config_lookup '.argo."s3-bucket"') fi fi - if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."internal-s3-bucket"') ]]; then + + if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."internal-s3-bucket"')]]; then if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" internalBucketName=$(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) @@ -46,6 +47,7 @@ function setup_argo_buckets { gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" internalBucketName=$(g3k_config_lookup '.argo."internal-s3-bucket"') fi + gen3_log_info "Using internal S3 bucket found in manifest: ${internalBucketName}" local internalBucketPolicyFile="$XDG_RUNTIME_DIR/internal_bucket_policy_$$.json" cat > "$internalBucketPolicyFile" < ${valuesFile} + # Attempt to retrieve the desired Argo version from the manifest. If it's not found, we'll instead fall back to + # version 0.22.11 as a default + + if g3k_config_lookup '.version' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json &>/dev/null; then + argo_version=$(g3k_config_lookup '.version' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + fi + argo_version=${argo_version:-0.22.11} + helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version 0.22.11 + helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version $argo_version + else gen3_log_info "kube-setup-argo exiting - argo already deployed, use --force to redeploy" fi else gen3_log_info "kube-setup-argo exiting - only deploys from default namespace" -fi \ No newline at end of file +fi diff --git a/gen3/lib/g3k_manifest.sh b/gen3/lib/g3k_manifest.sh index ae42e84ba..a1dfff0c5 100644 --- a/gen3/lib/g3k_manifest.sh +++ b/gen3/lib/g3k_manifest.sh @@ -304,9 +304,21 @@ g3k_config_lookup() { configPath="$1" fi if [[ "$configPath" =~ .json$ ]]; then - jq -r -e "$queryStr" < "$configPath" + output=$(jq -r -e "$queryStr" < "$configPath") + + if [[ "$output" == "null" ]]; then + echo "" + else + echo "$output" + fi elif [[ "$configPath" =~ .yaml ]]; then - yq -r -e "$queryStr" < "$configPath" + output=$(yq -r -e "$queryStr" < "$configPath") + + if [[ "$output" == "null" ]]; then + echo "" + else + echo "$output" + fi else gen3_log_err "file is not .json or .yaml: $configPath" return 1 From e9aab705ded682f034335b8e93438bc715275137 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Tue, 14 Mar 2023 08:52:44 -0500 Subject: [PATCH 093/362] Revert "Feat/argo workflow metrics (#2168)" (#2184) This reverts commit e3dc6ee7c211f50ef201e23f277557202fd467f3. --- gen3/bin/kube-setup-argo.sh | 21 +++++---------------- gen3/lib/g3k_manifest.sh | 16 ++-------------- 2 files changed, 7 insertions(+), 30 deletions(-) diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index c554b9892..beba520aa 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -16,6 +16,7 @@ function setup_argo_buckets { local policyFile="$XDG_RUNTIME_DIR/policy_$$.json" local bucketLifecyclePolicyFile="$XDG_RUNTIME_DIR/bucket_lifecycle_policy_$$.json" + if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then gen3_log_err "could not determine account numer" return 1 @@ -28,7 +29,6 @@ function setup_argo_buckets { # try to come up with a unique but composable bucket name bucketName="gen3-argo-${accountNumber}-${environment//_/-}" userName="gen3-argo-${environment//_/-}-user" - if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."s3-bucket"') ]]; then if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" @@ -38,8 +38,7 @@ function setup_argo_buckets { bucketName=$(g3k_config_lookup '.argo."s3-bucket"') fi fi - - if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."internal-s3-bucket"')]]; then + if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."internal-s3-bucket"') ]]; then if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" internalBucketName=$(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) @@ -47,7 +46,6 @@ function setup_argo_buckets { gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" internalBucketName=$(g3k_config_lookup '.argo."internal-s3-bucket"') fi - gen3_log_info "Using internal S3 bucket found in manifest: ${internalBucketName}" local internalBucketPolicyFile="$XDG_RUNTIME_DIR/internal_bucket_policy_$$.json" cat > "$internalBucketPolicyFile" < ${valuesFile} - # Attempt to retrieve the desired Argo version from the manifest. If it's not found, we'll instead fall back to - # version 0.22.11 as a default - - if g3k_config_lookup '.version' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json &>/dev/null; then - argo_version=$(g3k_config_lookup '.version' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) - fi - argo_version=${argo_version:-0.22.11} - helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version $argo_version - + helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version 0.22.11 else gen3_log_info "kube-setup-argo exiting - argo already deployed, use --force to redeploy" fi else gen3_log_info "kube-setup-argo exiting - only deploys from default namespace" -fi +fi \ No newline at end of file diff --git a/gen3/lib/g3k_manifest.sh b/gen3/lib/g3k_manifest.sh index a1dfff0c5..ae42e84ba 100644 --- a/gen3/lib/g3k_manifest.sh +++ b/gen3/lib/g3k_manifest.sh @@ -304,21 +304,9 @@ g3k_config_lookup() { configPath="$1" fi if [[ "$configPath" =~ .json$ ]]; then - output=$(jq -r -e "$queryStr" < "$configPath") - - if [[ "$output" == "null" ]]; then - echo "" - else - echo "$output" - fi + jq -r -e "$queryStr" < "$configPath" elif [[ "$configPath" =~ .yaml ]]; then - output=$(yq -r -e "$queryStr" < "$configPath") - - if [[ "$output" == "null" ]]; then - echo "" - else - echo "$output" - fi + yq -r -e "$queryStr" < "$configPath" else gen3_log_err "file is not .json or .yaml: $configPath" return 1 From 4fbbb51f2dbe3f94d7a62f4abc44dd0eb3349d71 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 17 Mar 2023 10:21:38 -0700 Subject: [PATCH 094/362] Revert "Use jenkins-niaid for service PRs (#2162)" (#2176) This reverts commit 7daedb1316d496d102d34aeeadb641a848bb3d58. --- files/scripts/ci-env-pool-reset.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index 115774a78..a142fd7c2 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -29,11 +29,11 @@ source "${GEN3_HOME}/gen3/gen3setup.sh" cat - > jenkins-envs-services.txt < jenkins-envs-releases.txt < Date: Fri, 17 Mar 2023 15:21:41 -0600 Subject: [PATCH 095/362] =?UTF-8?q?adding=20changes=20necessary=20for=20cl?= =?UTF-8?q?uster=20version=201.24=20for=20Karpenter=20and=20F=E2=80=A6=20(?= =?UTF-8?q?#2175)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * adding changes necessary for cluster version 1.24 for Karpenter and Fluentd * added the "--force" flag to ensure existing deployments are updated for fluentd * correcting the name so it will override the existing fluentd daemonset if it is deployed --- gen3/bin/kube-setup-fluentd.sh | 9 ++- gen3/bin/kube-setup-karpenter.sh | 9 ++- kube/services/fluentd/fluentd-eks-1.24.yaml | 86 +++++++++++++++++++++ 3 files changed, 100 insertions(+), 4 deletions(-) create mode 100644 kube/services/fluentd/fluentd-eks-1.24.yaml diff --git a/gen3/bin/kube-setup-fluentd.sh b/gen3/bin/kube-setup-fluentd.sh index c1d15bb80..28a7011a8 100644 --- a/gen3/bin/kube-setup-fluentd.sh +++ b/gen3/bin/kube-setup-fluentd.sh @@ -45,8 +45,13 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then if g3kubectl --namespace=logging get daemonset fluentd > /dev/null 2>&1; then g3kubectl "--namespace=logging" delete daemonset fluentd fi - (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f - - (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd-karpenter.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f - + export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor` + if [ "${clusterversion}" = "24+" ]; then + (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd-eks-1.24.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f - + else + (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f - + (unset KUBECTL_NAMESPACE; gen3 gitops filter "${GEN3_HOME}/kube/services/fluentd/fluentd-karpenter.yaml" GEN3_LOG_GROUP_NAME "${vpc_name}") | g3kubectl "--namespace=logging" apply -f - + fi # We need this serviceaccount to be in the default namespace for the job and cronjob to properly work g3kubectl apply -f "${GEN3_HOME}/kube/services/fluentd/fluent-jobs-serviceaccount.yaml" -n default if [ ${fluentdVersion} == "v1.10.2-debian-cloudwatch-1.0" ]; diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 5a4faa7de..2f9d0a2c8 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -16,7 +16,12 @@ gen3_deploy_karpenter() { # Ensure the spot instance service linked role is setup # It is required for running spot instances aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true - karpenter=${karpenter:-v0.22.0} + export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor` + if [ "${clusterversion}" = "24+" ]; then + karpenter=${karpenter:-v0.24.0} + else + karpenter=${karpenter:-v0.22.0} + fi echo '{ "Statement": [ { @@ -105,7 +110,7 @@ gen3_deploy_karpenter() { gen3_log_info "Remove cluster-autoscaler" gen3 kube-setup-autoscaler --remove # Ensure that fluentd is updated if karpenter is deployed to prevent containerd logging issues - gen3 kube-setup-fluentd + gen3 kube-setup-fluentd --force gen3_log_info "Adding node templates for karpenter" g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateDefault.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateJupyter.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - diff --git a/kube/services/fluentd/fluentd-eks-1.24.yaml b/kube/services/fluentd/fluentd-eks-1.24.yaml new file mode 100644 index 000000000..1fb748840 --- /dev/null +++ b/kube/services/fluentd/fluentd-eks-1.24.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd + namespace: logging + labels: + k8s-app: fluentd-eks-1.24-logging + version: v1 + GEN3_DATE_LABEL + kubernetes.io/cluster-service: "true" +spec: + selector: + matchLabels: + k8s-app: fluentd-eks-1.24-logging + version: v1 + template: + metadata: + labels: + k8s-app: fluentd-eks-1.24-logging + version: v1 + kubernetes.io/cluster-service: "true" + spec: + priorityClassName: system-cluster-critical + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: "role" + operator: "Equal" + value: "jupyter" + effect: "NoSchedule" + - key: "role" + operator: "Equal" + value: "workflow" + effect: "NoSchedule" + containers: + - name: fluentd + # Hardcode fluentd version to ensure we don't run into containerd logging issues + image: fluent/fluentd-kubernetes-daemonset:v1.15.3-debian-cloudwatch-1.0 + env: + # See https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes + - name: K8S_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Deploy with kube-setup-fluentd.sh ... + - name: LOG_GROUP_NAME + GEN3_LOG_GROUP_NAME + - name: AWS_REGION + value: "us-east-1" + - name: FLUENTD_CONF + value: "gen3.conf" + - name: FLUENT_CONTAINER_TAIL_PARSER_TYPE + value: "cri" + resources: + limits: + memory: 1Gi + requests: + cpu: 100m + memory: 1Gi + volumeMounts: + - name: fluentd-gen3 + mountPath: /fluentd/etc/gen3.conf + subPath: gen3.conf + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + command: ["/bin/bash" ] + args: + - "-c" + # Script always succeeds if it runs (echo exits with 0) + - | + /fluentd/entrypoint.sh + terminationGracePeriodSeconds: 30 + serviceAccountName: fluentd + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + - name: fluentd-gen3 + configMap: + name: fluentd-gen3 From a43fa0ad4a4bb6653fed3221fdb6aeb8af74f618 Mon Sep 17 00:00:00 2001 From: George Thomas <98996322+george42-ctds@users.noreply.github.com> Date: Mon, 20 Mar 2023 09:15:24 -0700 Subject: [PATCH 096/362] (HP-1062): add slack status messages to mds-agg-sync (#2182) Co-authored-by: Mingfei Shao <2475897+mfshao@users.noreply.github.com> --- .../jobs/metadata-aggregate-sync-job.yaml | 47 ++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml index d62ab0c77..8ef33532f 100644 --- a/kube/services/jobs/metadata-aggregate-sync-job.yaml +++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml @@ -37,7 +37,9 @@ spec: configMap: name: manifest-metadata optional: true - containers: + - name: shared-data + emptyDir: {} + initContainers: - name: metadata GEN3_METADATA_IMAGE volumeMounts: @@ -53,6 +55,8 @@ spec: readOnly: true mountPath: /metadata.json subPath: json + - name: shared-data + mountPath: /mnt/shared env: - name: GEN3_DEBUG GEN3_DEBUG_FLAG|-value: "False"-| @@ -76,4 +80,45 @@ spec: - "-c" - | /env/bin/python /src/src/mds/populate.py --config /aggregate_config.json + if [ $? -ne 0 ]; then + echo "WARNING: non zero exit code: $?" + else + touch /mnt/shared/success + fi + containers: + - name: awshelper + env: + - name: slackWebHook + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + - name: gen3Env + valueFrom: + configMapKeyRef: + name: manifest-global + key: hostname + GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: + - "-c" + - | + if [[ ! "$slackWebHook" =~ ^http ]]; then + echo "Slack webhook not set" + exit 0 + fi + if ! [ -f /mnt/shared/success ]; then + success="FAILED" + color="ff0000" + else + success="SUCCESS" + color="2EB67D" + fi + echo "Sending ${success} message to slack..." + payload="{\"attachments\": [{\"fallback\": \"JOB ${success}: metadata-aggregate-sync cronjob on ${gen3Env}\",\"color\": \"#${color}\",\"title\": \"JOB ${success}: metadata-aggregate-sync cronjob on ${gen3Env}\",\"text\": \"Pod name: ${HOSTNAME}\",\"ts\": \"$(date +%s)\"}]}" + echo "Payload=${payload}" + curl -X POST --data-urlencode "payload=${payload}" "${slackWebHook}" restartPolicy: Never From 575dd8aecad224c30237bf90f60c17a8591f6178 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 20 Mar 2023 13:28:20 -0500 Subject: [PATCH 097/362] Reduce pod sizes based on metrics (#2081) * Reduce pod sizes based on Datadog metrics --- gen3/lib/testData/default/expectedFenceResult.yaml | 11 +++++------ gen3/lib/testData/default/expectedSheepdogResult.yaml | 7 +++---- .../test1.manifest.g3k/expectedFenceResult.yaml | 7 +++---- .../test1.manifest.g3k/expectedSheepdogResult.yaml | 7 +++---- .../access-backend/access-backend-deploy.yaml | 5 ++--- kube/services/acronymbot/acronymbot-deploy.yaml | 6 +++--- .../ambassador-gen3/ambassador-gen3-deploy.yaml | 3 +-- kube/services/ambassador/ambassador-deploy.yaml | 2 +- kube/services/arborist/arborist-deploy-2.yaml | 7 +++---- kube/services/audit-service/audit-service-deploy.yaml | 7 +++---- kube/services/auspice/auspice-deploy.yaml | 7 +++---- .../autoscaler/cluster-autoscaler-autodiscover.yaml | 1 - kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml | 2 +- kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml | 5 ++--- .../cohort-middleware/cohort-middleware-deploy.yaml | 5 ++--- kube/services/dashboard/dashboard-deploy.yaml | 5 ++--- kube/services/fence/fence-deploy.yaml | 7 +++---- kube/services/fenceshib/fenceshib-deploy.yaml | 7 +++---- .../frontend-framework/frontend-framework-deploy.yaml | 3 +-- kube/services/guppy/guppy-deploy.yaml | 7 +++---- kube/services/indexd/indexd-deploy.yaml | 7 +++---- .../manifestservice/manifestservice-deploy.yaml | 5 ++--- kube/services/metadata/metadata-deploy.yaml | 7 +++---- kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml | 1 - kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml | 8 +++----- kube/services/peregrine/peregrine-deploy.yaml | 3 +-- kube/services/pidgin/pidgin-deploy.yaml | 4 +++- .../presigned-url-fence-deploy.yaml | 7 +++---- kube/services/requestor/requestor-deploy.yaml | 7 +++---- kube/services/revproxy/revproxy-deploy.yaml | 7 +++---- kube/services/sheepdog/sheepdog-deploy.yaml | 7 +++---- kube/services/sower/sower-deploy.yaml | 7 +++---- kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml | 7 +++---- kube/services/wts/wts-deploy.yaml | 7 +++---- 34 files changed, 83 insertions(+), 112 deletions(-) diff --git a/gen3/lib/testData/default/expectedFenceResult.yaml b/gen3/lib/testData/default/expectedFenceResult.yaml index 62dc751d4..ddc21f0a5 100644 --- a/gen3/lib/testData/default/expectedFenceResult.yaml +++ b/gen3/lib/testData/default/expectedFenceResult.yaml @@ -200,12 +200,11 @@ spec: mountPath: "/fence/jwt-keys.tar" subPath: "jwt-keys.tar" resources: - requests: - cpu: 0.4 - memory: 1200Mi - limits: - cpu: 1.0 - memory: 2400Mi + requests: + cpu: 100m + memory: 500Mi + limits: + memory: 1024Mi command: ["/bin/bash"] args: - "-c" diff --git a/gen3/lib/testData/default/expectedSheepdogResult.yaml b/gen3/lib/testData/default/expectedSheepdogResult.yaml index f40a698f6..b9db85a36 100644 --- a/gen3/lib/testData/default/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/default/expectedSheepdogResult.yaml @@ -157,8 +157,7 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 0.8 - memory: 1024Mi + cpu: 100m + memory: 200Mi limits: - cpu: 2 - memory: 2048Mi \ No newline at end of file + memory: 800Mi \ No newline at end of file diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index d4196c070..90a329c84 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -233,11 +233,10 @@ spec: subPath: "jwt-keys.tar" resources: requests: - cpu: 0.4 - memory: 1200Mi + cpu: 100m + memory: 500Mi limits: - cpu: 1.0 - memory: 2400Mi + memory: 1024Mi command: ["/bin/bash"] args: - "-c" diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml index 5ebdc1bb1..f54fd3e03 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml @@ -191,8 +191,7 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 0.8 - memory: 1024Mi + cpu: 100m + memory: 200Mi limits: - cpu: 2 - memory: 2048Mi + memory: 800Mi diff --git a/kube/services/access-backend/access-backend-deploy.yaml b/kube/services/access-backend/access-backend-deploy.yaml index 9f46176d5..61e88a1c7 100644 --- a/kube/services/access-backend/access-backend-deploy.yaml +++ b/kube/services/access-backend/access-backend-deploy.yaml @@ -80,8 +80,7 @@ spec: subPath: "user.yaml" resources: requests: - cpu: 0.4 - memory: 512Mi + cpu: 100m + memory: 128Mi limits: - cpu: 1 memory: 2048Mi diff --git a/kube/services/acronymbot/acronymbot-deploy.yaml b/kube/services/acronymbot/acronymbot-deploy.yaml index ce1a53dd2..065f55da4 100644 --- a/kube/services/acronymbot/acronymbot-deploy.yaml +++ b/kube/services/acronymbot/acronymbot-deploy.yaml @@ -36,10 +36,10 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 1 + cpu: 100m limits: - cpu: 2 - memory: 512Mi + cpu: 500m + memory: 128Mi volumeMounts: - name: slacktoken mountPath: "/secret/slacktoken.json" diff --git a/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml b/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml index c7a49dfd7..20fdbb484 100644 --- a/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml +++ b/kube/services/ambassador-gen3/ambassador-gen3-deploy.yaml @@ -50,10 +50,9 @@ spec: GEN3_AMBASSADOR_IMAGE resources: limits: - cpu: 1 memory: 400Mi requests: - cpu: 200m + cpu: 100m memory: 100Mi env: - name: AMBASSADOR_NAMESPACE diff --git a/kube/services/ambassador/ambassador-deploy.yaml b/kube/services/ambassador/ambassador-deploy.yaml index 073f032ac..8788cef13 100644 --- a/kube/services/ambassador/ambassador-deploy.yaml +++ b/kube/services/ambassador/ambassador-deploy.yaml @@ -59,7 +59,7 @@ spec: cpu: 1 memory: 400Mi requests: - cpu: 200m + cpu: 100m memory: 100Mi env: - name: AMBASSADOR_NAMESPACE diff --git a/kube/services/arborist/arborist-deploy-2.yaml b/kube/services/arborist/arborist-deploy-2.yaml index fbd017caa..f88ad6df5 100644 --- a/kube/services/arborist/arborist-deploy-2.yaml +++ b/kube/services/arborist/arborist-deploy-2.yaml @@ -87,11 +87,10 @@ spec: subPath: dbcreds.json resources: requests: - cpu: 0.4 - memory: 1200Mi + cpu: 100m + memory: 200Mi limits: - cpu: 0.8 - memory: 2400Mi + memory: 1000Mi command: ["sh"] args: - "-c" diff --git a/kube/services/audit-service/audit-service-deploy.yaml b/kube/services/audit-service/audit-service-deploy.yaml index be31e7a4c..935cab408 100644 --- a/kube/services/audit-service/audit-service-deploy.yaml +++ b/kube/services/audit-service/audit-service-deploy.yaml @@ -95,11 +95,10 @@ spec: subPath: "audit-service-config.yaml" resources: requests: - cpu: 0.4 - memory: 512Mi + cpu: 100m + memory: 100Mi limits: - cpu: 0.8 - memory: 1024Mi + memory: 512Mi initContainers: - name: audit-db-migrate GEN3_AUDIT-SERVICE_IMAGE diff --git a/kube/services/auspice/auspice-deploy.yaml b/kube/services/auspice/auspice-deploy.yaml index 63eeba922..ce228be9f 100644 --- a/kube/services/auspice/auspice-deploy.yaml +++ b/kube/services/auspice/auspice-deploy.yaml @@ -80,8 +80,7 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 0.5 - memory: 1024Mi + cpu: 100m + memory: 128Mi limits: - cpu: 1 - memory: 2400Mi + memory: 1024Mi diff --git a/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml b/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml index 2e1b94fcd..e99e3fd15 100644 --- a/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml +++ b/kube/services/autoscaler/cluster-autoscaler-autodiscover.yaml @@ -153,7 +153,6 @@ spec: name: cluster-autoscaler resources: limits: - cpu: 1000m memory: 1600Mi requests: cpu: 100m diff --git a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml index 59690b375..ad74fc25b 100644 --- a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml +++ b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml @@ -84,5 +84,5 @@ spec: cpu: 250m memory: 256Mi limits: - cpu: 1 + cpu: 1000m memory: 2Gi diff --git a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml index e7f89f8b8..fa6b741a2 100644 --- a/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml +++ b/kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml @@ -80,10 +80,9 @@ spec: failureThreshold: 6 resources: requests: - cpu: 0.6 - memory: 512Mi + cpu: 100m + memory: 64Mi limits: - cpu: 2 memory: 4096Mi ports: - containerPort: 8000 diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index cb2634424..db906af35 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -117,8 +117,7 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 500m - memory: 4Gi + cpu: 100m + memory: 128mi limits: - cpu: 500m memory: 4Gi diff --git a/kube/services/dashboard/dashboard-deploy.yaml b/kube/services/dashboard/dashboard-deploy.yaml index ebbbdfa11..451d99552 100644 --- a/kube/services/dashboard/dashboard-deploy.yaml +++ b/kube/services/dashboard/dashboard-deploy.yaml @@ -78,10 +78,9 @@ spec: mountPath: "/etc/gen3" resources: requests: - cpu: 0.3 - memory: 200Mi + cpu: 100m + memory: 20Mi limits: - cpu: 0.5 memory: 500Mi imagePullPolicy: Always livenessProbe: diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index 1722676e0..2ceb68ee7 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -240,11 +240,10 @@ spec: subPath: "jwt-keys.tar" resources: requests: - cpu: 0.4 - memory: 1200Mi + cpu: 100m + memory: 500Mi limits: - cpu: 1.0 - memory: 2400Mi + memory: 1024Mi command: ["/bin/bash"] args: - "-c" diff --git a/kube/services/fenceshib/fenceshib-deploy.yaml b/kube/services/fenceshib/fenceshib-deploy.yaml index 0b74bd767..ed5d67535 100644 --- a/kube/services/fenceshib/fenceshib-deploy.yaml +++ b/kube/services/fenceshib/fenceshib-deploy.yaml @@ -226,11 +226,10 @@ spec: subPath: "incommon-login.bionimbus.org.crt" resources: requests: - cpu: 0.8 - memory: 2400Mi + cpu: 100m + memory: 500Mi limits: - cpu: 2.0 - memory: 6400Mi + memory: 2400Mi command: ["/bin/bash"] args: - "-c" diff --git a/kube/services/frontend-framework/frontend-framework-deploy.yaml b/kube/services/frontend-framework/frontend-framework-deploy.yaml index 3a36bfe7a..f0da277dc 100644 --- a/kube/services/frontend-framework/frontend-framework-deploy.yaml +++ b/kube/services/frontend-framework/frontend-framework-deploy.yaml @@ -84,10 +84,9 @@ spec: failureThreshold: 6 resources: requests: - cpu: 0.6 + cpu: 100m memory: 512Mi limits: - cpu: 2 memory: 4096Mi ports: - containerPort: 3000 diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml index 55cd17e41..01a8905de 100644 --- a/kube/services/guppy/guppy-deploy.yaml +++ b/kube/services/guppy/guppy-deploy.yaml @@ -154,8 +154,7 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 0.5 - memory: 1024Mi + cpu: 100m + memory: 128Mi limits: - cpu: 1 - memory: 2400Mi + memory: 1200Mi diff --git a/kube/services/indexd/indexd-deploy.yaml b/kube/services/indexd/indexd-deploy.yaml index 5ef123b19..239079058 100644 --- a/kube/services/indexd/indexd-deploy.yaml +++ b/kube/services/indexd/indexd-deploy.yaml @@ -168,8 +168,7 @@ spec: subPath: "ca.pem" resources: requests: - cpu: 0.5 - memory: 1024Mi + cpu: 100m + memory: 512Mi limits: - cpu: 1.0 - memory: 2048Mi + memory: 1024Mi diff --git a/kube/services/manifestservice/manifestservice-deploy.yaml b/kube/services/manifestservice/manifestservice-deploy.yaml index 8cb285f28..1638d669a 100644 --- a/kube/services/manifestservice/manifestservice-deploy.yaml +++ b/kube/services/manifestservice/manifestservice-deploy.yaml @@ -99,10 +99,9 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 0.5 - memory: 512Mi + cpu: 100m + memory: 300Mi limits: - cpu: 1 memory: 1024Mi livenessProbe: httpGet: diff --git a/kube/services/metadata/metadata-deploy.yaml b/kube/services/metadata/metadata-deploy.yaml index c520577a2..9bb6ac9c5 100644 --- a/kube/services/metadata/metadata-deploy.yaml +++ b/kube/services/metadata/metadata-deploy.yaml @@ -120,11 +120,10 @@ spec: subPath: json resources: requests: - cpu: 0.4 - memory: 512Mi + cpu: 100m + memory: 128Mi limits: - cpu: 1 - memory: 2048Mi + memory: 512Mi initContainers: - name: metadata-db-migrate GEN3_METADATA_IMAGE diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml index 1eef4f92d..bf128920e 100644 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml +++ b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml @@ -80,5 +80,4 @@ spec: cpu: 100m memory: 100Mi limits: - cpu: 500m memory: 500Mi diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 175761c78..65d6ed38c 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -78,10 +78,9 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 250m - memory: 500Mi + cpu: 100m + memory: 1500Mi limits: - cpu: 500m memory: 4Gi - name: ohdsi-webapi-reverse-proxy image: nginx:1.23 @@ -98,5 +97,4 @@ spec: cpu: 100m memory: 100Mi limits: - cpu: 500m - memory: 500Mi + memory: 500Mi \ No newline at end of file diff --git a/kube/services/peregrine/peregrine-deploy.yaml b/kube/services/peregrine/peregrine-deploy.yaml index 3bef06d87..20bba64ad 100644 --- a/kube/services/peregrine/peregrine-deploy.yaml +++ b/kube/services/peregrine/peregrine-deploy.yaml @@ -183,10 +183,9 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 1 + cpu: 100m memory: 1024Mi limits: - cpu: 2 memory: 2048Mi livenessProbe: httpGet: diff --git a/kube/services/pidgin/pidgin-deploy.yaml b/kube/services/pidgin/pidgin-deploy.yaml index eef4856c1..8448f66f9 100644 --- a/kube/services/pidgin/pidgin-deploy.yaml +++ b/kube/services/pidgin/pidgin-deploy.yaml @@ -123,6 +123,8 @@ spec: subPath: "ca.pem" imagePullPolicy: Always resources: + requests: + cpu: 100m + memory: 50Mi limits: - cpu: 0.8 memory: 512Mi diff --git a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml index 44e951f26..45e6daaea 100644 --- a/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml +++ b/kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml @@ -235,11 +235,10 @@ spec: subPath: "jwt-keys.tar" resources: requests: - cpu: 0.4 - memory: 1200Mi + cpu: 100m + memory: 600Mi limits: - cpu: 1.0 - memory: 2400Mi + memory: 1024Mi command: ["/bin/bash"] args: - "-c" diff --git a/kube/services/requestor/requestor-deploy.yaml b/kube/services/requestor/requestor-deploy.yaml index 1485190f9..fb5ce173f 100644 --- a/kube/services/requestor/requestor-deploy.yaml +++ b/kube/services/requestor/requestor-deploy.yaml @@ -92,11 +92,10 @@ spec: subPath: "requestor-config.yaml" resources: requests: - cpu: 0.4 - memory: 512Mi + cpu: 100m + memory: 100Mi limits: - cpu: 0.8 - memory: 1024Mi + memory: 300Mi initContainers: - name: requestor-db-migrate GEN3_REQUESTOR_IMAGE diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index ad00e225c..9d5caab1b 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -196,11 +196,10 @@ spec: subPath: "ca.pem" resources: requests: - cpu: 0.5 - memory: 1024Mi + cpu: 100m + memory: 100Mi limits: - cpu: 1.0 - memory: 2048Mi + memory: 800Mi command: ["/bin/sh" ] args: - "-c" diff --git a/kube/services/sheepdog/sheepdog-deploy.yaml b/kube/services/sheepdog/sheepdog-deploy.yaml index f3df8cecd..a260c8741 100644 --- a/kube/services/sheepdog/sheepdog-deploy.yaml +++ b/kube/services/sheepdog/sheepdog-deploy.yaml @@ -198,8 +198,7 @@ spec: imagePullPolicy: Always resources: requests: - cpu: 0.8 - memory: 1024Mi + cpu: 100m + memory: 200Mi limits: - cpu: 2 - memory: 2048Mi + memory: 800Mi diff --git a/kube/services/sower/sower-deploy.yaml b/kube/services/sower/sower-deploy.yaml index 4d7a1c93b..b66739d06 100644 --- a/kube/services/sower/sower-deploy.yaml +++ b/kube/services/sower/sower-deploy.yaml @@ -98,9 +98,8 @@ spec: subPath: sower_config.json resources: requests: - cpu: 0.4 - memory: 1200Mi + cpu: 100m + memory: 20Mi limits: - cpu: 1.0 - memory: 2400Mi + memory: 400Mi \ No newline at end of file diff --git a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml index 3b7bb1de9..554c60cb5 100644 --- a/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml +++ b/kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml @@ -101,8 +101,7 @@ spec: - containerPort: 8000 resources: requests: - cpu: 0.4 - memory: 512Mi + cpu: 100m + memory: 20Mi limits: - cpu: 1 - memory: 2400Mi + memory: 100Mi diff --git a/kube/services/wts/wts-deploy.yaml b/kube/services/wts/wts-deploy.yaml index dd24f8808..e54a9cfc4 100644 --- a/kube/services/wts/wts-deploy.yaml +++ b/kube/services/wts/wts-deploy.yaml @@ -132,11 +132,10 @@ spec: port: 80 resources: requests: - cpu: 0.8 - memory: 512Mi + cpu: 100m + memory: 200Mi limits: - cpu: 2 - memory: 2048Mi + memory: 512Mi initContainers: - name: wts-db-migrate GEN3_WTS_IMAGE From 4e31093a1ed180ae362234b69b3c983c2d5fdc29 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 20 Mar 2023 15:27:29 -0500 Subject: [PATCH 098/362] feat(gpu): added gpu support to karpenter (#2160) * feat(gpu): added gpu support to karpenter * feat(gpu): added gpu support to karpenter * feat(gpu): added gpu support to karpenter * feat(gpu): added gpu support to karpenter * feat(gpu): added gpu support to karpenter * feat(gpu): added gpu support to karpenter * feat(gpu): added gpu support to karpenter --------- Co-authored-by: Edward Malinowski --- files/squid_whitelist/web_whitelist | 1 + gen3/bin/kube-setup-karpenter.sh | 160 +++++++++++++----- .../karpenter/karpenter-global-settings.yaml | 7 + .../karpenter/nodeTemplateDefault.yaml | 9 +- kube/services/karpenter/nodeTemplateGPU.yaml | 57 +++++++ .../karpenter/nodeTemplateJupyter.yaml | 9 +- .../karpenter/nodeTemplateWorkflow.yaml | 9 +- kube/services/karpenter/nvdp.yaml | 33 ++++ kube/services/karpenter/provisionerGPU.yaml | 29 ++++ .../karpenter/provisionerGPUShared.yaml | 30 ++++ tf_files/aws/modules/eks/cloud.tf | 6 + 11 files changed, 304 insertions(+), 46 deletions(-) create mode 100644 kube/services/karpenter/karpenter-global-settings.yaml create mode 100644 kube/services/karpenter/nodeTemplateGPU.yaml create mode 100644 kube/services/karpenter/nvdp.yaml create mode 100644 kube/services/karpenter/provisionerGPU.yaml create mode 100644 kube/services/karpenter/provisionerGPUShared.yaml diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index fe0fcac8d..3a0b82a77 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -116,6 +116,7 @@ mran.microsoft.com neuro.debian.net neurodeb.pirsquared.org nginx.org +nvidia.github.io opportunityinsights.org orcid.org pgp.mit.edu diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 2f9d0a2c8..3688194ab 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -11,17 +11,21 @@ ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.na gen3_deploy_karpenter() { gen3_log_info "Deploying karpenter" # If the karpenter namespace doesn't exist or the force flag isn't in place then deploy - if [[( -z $(g3kubectl get namespaces | grep karpenter) || $FORCE == "true" ) && ("$ctxNamespace" == "default" || "$ctxNamespace" == "null")]]; then + if [[ ( -z $(g3kubectl get namespaces | grep karpenter) || $FORCE == "true" ) && ("$ctxNamespace" == "default" || "$ctxNamespace" == "null") ]]; then gen3_log_info "Ensuring that the spot instance service linked role is setup" # Ensure the spot instance service linked role is setup # It is required for running spot instances + gen3_create_karpenter_sqs_eventbridge aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true + if g3k_config_lookup .global.karpenter_version; then + karpenter=$(g3k_config_lookup .global.karpenter_version) + fi export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor` - if [ "${clusterversion}" = "24+" ]; then + if [ "${clusterversion}" = "24+" ]; then karpenter=${karpenter:-v0.24.0} - else + else karpenter=${karpenter:-v0.22.0} - fi + fi echo '{ "Statement": [ { @@ -48,6 +52,17 @@ gen3_deploy_karpenter() { "Resource": "*", "Sid": "Karpenter" }, + { + "Action": [ + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage" + ], + "Effect": "Allow", + "Resource": "arn:aws:sqs:*:'$(aws sts get-caller-identity --output text --query "Account")':karpenter-sqs-$(echo vpc_name)", + "Sid": "Karpenter2" + }, { "Action": "ec2:TerminateInstances", "Condition": { @@ -61,76 +76,132 @@ gen3_deploy_karpenter() { } ], "Version": "2012-10-17" - }' > controller-policy.json + }' > $XDG_RUNTIME_DIR/controller-policy.json gen3_log_info "Creating karpenter namespace" g3kubectl create namespace karpenter 2> /dev/null || true gen3_log_info "Creating karpenter AWS role and k8s service accounts" gen3 awsrole create "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true + gen3 awsrole sa-annotate "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true # Have to delete SA because helm chart will create the SA and there will be a conflict gen3_log_info "Have to delete SA because helm chart will create the SA and there will be a conflict" - g3kubectl delete sa karpenter -n karpenter + #g3kubectl delete sa karpenter -n karpenter - - gen3_log_info "aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true" - aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true - + gen3_log_info "aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true" + aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true gen3_log_info "Need to tag the subnets/sg's so that karpenter can discover them automatically" # Need to tag the subnets/sg's so that karpenter can discover them automatically subnets=$(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_private_*' --query 'Subnets[].SubnetId' --output text) - security_groups=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg,ssh_eks_'$vpc_name'' --query 'SecurityGroups[].GroupId' --output text) - security_groups_jupyter=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-jupyter,ssh_eks_'$vpc_name'-nodepool-jupyter' --query 'SecurityGroups[].GroupId' --output text) - security_groups_workflow=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-workflow,ssh_eks_'$vpc_name'-nodepool-workflow' --query 'SecurityGroups[].GroupId' --output text) + # Will apprend secondary CIDR block subnets to be tagged as well, and if none are found then will not append anything to list + subnets+=" $(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_secondary_cidr_subnet_*' --query 'Subnets[].SubnetId' --output text)" + security_groups=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg,ssh_eks_'$vpc_name'' --query 'SecurityGroups[].GroupId' --output text) || true + security_groups_jupyter=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-jupyter,ssh_eks_'$vpc_name'-nodepool-jupyter' --query 'SecurityGroups[].GroupId' --output text) || true + security_groups_workflow=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-workflow,ssh_eks_'$vpc_name'-nodepool-workflow' --query 'SecurityGroups[].GroupId' --output text) || true cluster_endpoint="$(aws eks describe-cluster --name ${vpc_name} --query "cluster.endpoint" --output text)" - aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${security_groups} - aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${subnets} - aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-jupyter" --resources ${security_groups_jupyter} - aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-worfklow" --resources ${security_groups_workflow} - - + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${security_groups} || true + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${subnets} || true + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-jupyter" --resources ${security_groups_jupyter} || true + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-worfklow" --resources ${security_groups_workflow} || true + echo '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Condition": { + "ArnLike": { + "aws:SourceArn": "arn:aws:eks:us-east-1:707767160287:fargateprofile/$(echo $vpc_name)/*" + } + }, + "Principal": { + "Service": "eks-fargate-pods.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + }' > $XDG_RUNTIME_DIR/fargate-policy.json + aws iam create-role --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} --assume-role-policy-document file://"$XDG_RUNTIME_DIR/fargate-policy.json" || true + aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} || true + aws eks create-fargate-profile --fargate-profile-name karpenter-profile --cluster-name $vpc_name --pod-execution-role-arn arn:aws:iam::707767160287:role/AmazonEKSFargatePodExecutionRole-${vpc_name} --subnets $subnets --selectors '{"namespace": "karpenter"}' || true gen3_log_info "Installing karpenter using helm" - - helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter \ + helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter --wait \ --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \ --set settings.aws.clusterEndpoint="${cluster_endpoint}" \ --set settings.aws.clusterName=${vpc_name} \ - --set serviceAccount.annotations."eks\.amazonaws\.com/role-arn"="arn:aws:iam::$(aws sts get-caller-identity --output text --query 'Account'):role/gen3_service/karpenter-controller-role-${vpc_name}" - - gen3_log_info "sleep for a little bit so CRD's can be created for the provisioner/node template" - # sleep for a little bit so CRD's can be created for the provisioner/node template - sleep 10 - gen3_log_info "Deploy AWS node termination handler so that spot instances can be preemptively spun up before old instances stop" - # Deploy AWS node termination handler so that spot instances can be preemptively spun up before old instances stop - kubectl apply -f https://github.com/aws/aws-node-termination-handler/releases/download/v1.18.1/all-resources.yaml + --set serviceAccount.name=karpenter \ + --set serviceAccount.create=false \ + --set controller.env[0].name=AWS_REGION \ + --set controller.env[0].value=us-east-1 fi - gen3_log_info "Remove cluster-autoscaler" gen3 kube-setup-autoscaler --remove # Ensure that fluentd is updated if karpenter is deployed to prevent containerd logging issues gen3 kube-setup-fluentd --force - gen3_log_info "Adding node templates for karpenter" - g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateDefault.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - - g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateJupyter.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - - g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateWorkflow.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - - if [[ $ARM ]]; then - gen3_log_info "Deploy binfmt daemonset so the emulation tools run on arm nodes" - # Deploy binfmt daemonset so the emulation tools run on arm nodes - g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/binfmt.yaml - g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerArm.yaml + gen3_update_karpenter_configs +} + +gen3_update_karpenter_configs() { + # depoloy node templates and provisioners if not set in manifest + if [[ -d $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter ]]; then + gen3_log_info "karpenter manifest found, skipping node template and provisioner deployment" + # apply each manifest in the karpenter folder + for manifest in $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter/*.yaml; do + g3k_kv_filter $manifest VPC_NAME ${vpc_name} | g3kubectl apply -f - + done else - g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerDefault.yaml + gen3_log_info "Adding node templates for karpenter" + g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateDefault.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - + g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateJupyter.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - + g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/nodeTemplateWorkflow.yaml VPC_NAME ${vpc_name} | g3kubectl apply -f - + if [[ $ARM ]]; then + gen3_log_info "Deploy binfmt daemonset so the emulation tools run on arm nodes" + # Deploy binfmt daemonset so the emulation tools run on arm nodes + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/binfmt.yaml + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerArm.yaml + else + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerDefault.yaml + fi + if [[ $GPU ]]; then + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerGPU.yaml + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerGPUShared.yaml + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/nodeTemplateGPU.yaml + helm repo add nvdp https://nvidia.github.io/k8s-device-plugin + helm repo update + helm upgrade -i nvdp nvdp/nvidia-device-plugin \ + --namespace nvidia-device-plugin \ + --create-namespace -f ${GEN3_HOME}/kube/services/karpenter/nvdp.yaml + fi + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerJupyter.yaml + g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerWorkflow.yaml fi - g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerJupyter.yaml - g3kubectl apply -f ${GEN3_HOME}/kube/services/karpenter/provisionerWorkflow.yaml +} + +gen3_create_karpenter_sqs_eventbridge() { + local queue_name="karpenter-sqs-${vpc_name}" + local eventbridge_rule_name="karpenter-eventbridge-${vpc_name}" + gen3 sqs create-queue-if-not-exist $queue_name >> "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" + local queue_url=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.url') + local queue_arn=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.arn') + # Create eventbridge rules + aws events put-rule --name "Karpenter-${vpc_name}-SpotInterruptionRule" --event-pattern '{"source": ["aws.ec2"], "detail-type": ["EC2 Spot Instance Interruption Warning"]}' 2> /dev/null + aws events put-rule --name "Karpenter-${vpc_name}-RebalanceRule" --event-pattern '{"source": ["aws.ec2"], "detail-type": ["EC2 Instance Rebalance Recommendation"]}' 2> /dev/null + aws events put-rule --name "Karpenter-${vpc_name}-ScheduledChangeRule" --event-pattern '{"source": ["aws.health"], "detail-type": ["AWS Health Event"]}' 2> /dev/null + aws events put-rule --name "Karpenter-${vpc_name}-InstanceStateChangeRule" --event-pattern '{"source": ["aws.ec2"], "detail-type": ["EC2 Instance State-change Notification"]}' 2> /dev/null + # Add SQS as a target for the eventbridge rules + aws events put-targets --rule "Karpenter-${vpc_name}-SpotInterruptionRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true + aws events put-targets --rule "Karpenter-${vpc_name}-RebalanceRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true + aws events put-targets --rule "Karpenter-${vpc_name}-ScheduledChangeRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true + aws events put-targets --rule "Karpenter-${vpc_name}-InstanceStateChangeRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true + aws sqs set-queue-attributes --queue-url "${queue_url}" --attributes "Policy"="$(aws sqs get-queue-attributes --queue-url "${queue_url}" --attribute-names "Policy" --query "Attributes.Policy" --output text | jq -r '.Statement += [{"Sid": "AllowKarpenter", "Effect": "Allow", "Principal": {"Service": ["sqs.amazonaws.com","events.amazonaws.com"]}, "Action": "sqs:SendMessage", "Resource": "'${queue_arn}'"}]')" 2> /dev/null || true + g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/karpenter-global-settings.yaml SQS_NAME ${queue_name} | g3kubectl apply -f - } gen3_remove_karpenter() { aws iam delete-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-name "karpenter-controller-policy" 1>&2 || true aws iam delete-role --role-name "karpenter-controller-role-$vpc_name" - helm uninstall karpenter -n karpenter + helm uninstall karpenter -n karpenter g3kubectl delete namespace karpenter gen3 kube-setup-autoscaler } @@ -162,6 +233,9 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then "remove") gen3_remove_karpenter ;; + "update") + gen3_update_karpenter_configs + ;; *) gen3_deploy_karpenter ;; diff --git a/kube/services/karpenter/karpenter-global-settings.yaml b/kube/services/karpenter/karpenter-global-settings.yaml new file mode 100644 index 000000000..4c09a465d --- /dev/null +++ b/kube/services/karpenter/karpenter-global-settings.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: karpenter-global-settings + namespace: karpenter +data: + aws.interruptionQueueName: SQS_NAME \ No newline at end of file diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml index 2026f8dfa..0f76a392f 100644 --- a/kube/services/karpenter/nodeTemplateDefault.yaml +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -11,6 +11,11 @@ spec: karpenter.sh/discovery: VPC_NAME Environment: VPC_NAME Name: eks-VPC_NAME-karpenter + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: optional userData: | MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="BOUNDARY" @@ -19,7 +24,9 @@ spec: Content-Type: text/x-shellscript; charset="us-ascii" #!/bin/bash -xe - + instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys sysctl -w fs.inotify.max_user_watches=12000 diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml new file mode 100644 index 000000000..b41e6441c --- /dev/null +++ b/kube/services/karpenter/nodeTemplateGPU.yaml @@ -0,0 +1,57 @@ +apiVersion: karpenter.k8s.aws/v1alpha1 +kind: AWSNodeTemplate +metadata: + name: gpu +spec: + subnetSelector: + karpenter.sh/discovery: VPC_NAME + securityGroupSelector: + karpenter.sh/discovery: VPC_NAME-gpu + tags: + Environment: VPC_NAME + Name: eks-VPC_NAME-gpu-karpenter + karpenter.sh/discovery: VPC_NAME + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: optional + userData: | + MIME-Version: 1.0 + Content-Type: multipart/mixed; boundary="BOUNDARY" + + --BOUNDARY + Content-Type: text/x-shellscript; charset="us-ascii" + + #!/bin/bash -xe + instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + sysctl -w fs.inotify.max_user_watches=12000 + + sudo yum update -y + sudo yum install -y dracut-fips openssl >> /opt/fips-install.log + sudo dracut -f + # configure grub + sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" + + power_state: + delay: now + mode: reboot + message: Powering off + timeout: 2 + condition: true + + --BOUNDARY-- + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 200Gi + volumeType: gp2 + encrypted: true + deleteOnTermination: true diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml index 629eac24e..579ac1aa3 100644 --- a/kube/services/karpenter/nodeTemplateJupyter.yaml +++ b/kube/services/karpenter/nodeTemplateJupyter.yaml @@ -11,6 +11,11 @@ spec: Environment: VPC_NAME Name: eks-VPC_NAME-jupyter-karpenter karpenter.sh/discovery: VPC_NAME + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: optional userData: | MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="BOUNDARY" @@ -19,7 +24,9 @@ spec: Content-Type: text/x-shellscript; charset="us-ascii" #!/bin/bash -xe - + instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys sysctl -w fs.inotify.max_user_watches=12000 diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml index 7e0cbf481..60481b4fc 100644 --- a/kube/services/karpenter/nodeTemplateWorkflow.yaml +++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml @@ -11,6 +11,11 @@ spec: Environment: VPC_NAME Name: eks-VPC_NAME-workflow-karpenter karpenter.sh/discovery: VPC_NAME + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: optional userData: | MIME-Version: 1.0 Content-Type: multipart/mixed; boundary="BOUNDARY" @@ -19,7 +24,9 @@ spec: Content-Type: text/x-shellscript; charset="us-ascii" #!/bin/bash -xe - + instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys sysctl -w fs.inotify.max_user_watches=12000 diff --git a/kube/services/karpenter/nvdp.yaml b/kube/services/karpenter/nvdp.yaml new file mode 100644 index 000000000..4c37a9c27 --- /dev/null +++ b/kube/services/karpenter/nvdp.yaml @@ -0,0 +1,33 @@ +config: + # ConfigMap name if pulling from an external ConfigMap + name: "" + # Set of named configs to build an integrated ConfigMap from + map: + default: |- + version: v1 + flags: + migStrategy: "none" + failOnInitError: true + nvidiaDriverRoot: "/" + plugin: + passDeviceSpecs: false + deviceListStrategy: envvar + deviceIDStrategy: uuid + shared_gpu: |- + version: v1 + flags: + migStrategy: "none" + failOnInitError: true + nvidiaDriverRoot: "/" + plugin: + passDeviceSpecs: false + deviceListStrategy: envvar + deviceIDStrategy: uuid + sharing: + timeSlicing: + renameByDefault: false + resources: + - name: nvidia.com/gpu + replicas: 10 +nodeSelector: + jina.ai/gpu-type: nvidia \ No newline at end of file diff --git a/kube/services/karpenter/provisionerGPU.yaml b/kube/services/karpenter/provisionerGPU.yaml new file mode 100644 index 000000000..77a6b3876 --- /dev/null +++ b/kube/services/karpenter/provisionerGPU.yaml @@ -0,0 +1,29 @@ +apiVersion: karpenter.sh/v1alpha5 +kind: Provisioner +metadata: + name: gpu +spec: + ttlSecondsAfterEmpty: 300 + labels: + jina.ai/node-type: gpu + jina.ai/gpu-type: nvidia + requirements: + - key: node.kubernetes.io/instance-type + operator: In + values: ["g4dn.xlarge", "g4dn.2xlarge", "g4dn.4xlarge", "g4dn.12xlarge"] + - key: karpenter.sh/capacity-type + operator: In + values: ["spot", "on-demand"] + - key: kubernetes.io/arch + operator: In + values: ["amd64"] + taints: + - key: nvidia.com/gpu + effect: "NoSchedule" + limits: + resources: + cpu: 1000 + # Use the default node template + providerRef: + name: gpu + ttlSecondsAfterEmpty: 30 diff --git a/kube/services/karpenter/provisionerGPUShared.yaml b/kube/services/karpenter/provisionerGPUShared.yaml new file mode 100644 index 000000000..fa108c512 --- /dev/null +++ b/kube/services/karpenter/provisionerGPUShared.yaml @@ -0,0 +1,30 @@ +apiVersion: karpenter.sh/v1alpha5 +kind: Provisioner +metadata: + name: gpu-shared +spec: + ttlSecondsAfterEmpty: 300 + labels: + jina.ai/node-type: gpu-shared + jina.ai/gpu-type: nvidia + nvidia.com/device-plugin.config: shared_gpu + requirements: + - key: karpenter.k8s.aws/instance-family + operator: In + values: ["g4dn", "g5","p4","p3"] + - key: karpenter.sh/capacity-type + operator: In + values: ["spot", "on-demand"] + - key: kubernetes.io/arch + operator: In + values: ["amd64"] + taints: + - key: nvidia.com/gpu-shared + effect: "NoSchedule" + limits: + resources: + cpu: 1000 + # Use the default node template + providerRef: + name: gpu + ttlSecondsAfterEmpty: 30 diff --git a/tf_files/aws/modules/eks/cloud.tf b/tf_files/aws/modules/eks/cloud.tf index c1a582dfe..784196c5a 100644 --- a/tf_files/aws/modules/eks/cloud.tf +++ b/tf_files/aws/modules/eks/cloud.tf @@ -398,6 +398,12 @@ resource "aws_iam_policy" "asg_access" { "autoscaling:DescribeLaunchConfigurations" ], "Resource": "*" + }, + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": "ec2:CreateTags", + "Resource": "arn:aws:ec2:*:${data.aws_caller_identity.current.account_id}:instance/*" } ] } From 35b617dfd45cf35e8459c3b73e7a377d35d83b5d Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Tue, 21 Mar 2023 14:54:07 -0500 Subject: [PATCH 099/362] cedar ingestion revproxy (#2186) --- files/scripts/healdata/heal-cedar-data-ingest.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index f571f33e4..79f5c7eb8 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -81,7 +81,7 @@ def update_filter_metadata(metadata_to_update): # Get the metadata from cedar to register print("Querying CEDAR...") -cedar = requests.get(f"https://{hostname}/cedar/get-instance-by-directory/{dir_id}", headers=token_header) +cedar = requests.get(f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}", headers=token_header) # If we get metadata back now register with MDS if cedar.status_code == 200: @@ -98,7 +98,7 @@ def update_filter_metadata(metadata_to_update): cedar_record_id = str(cedar_record["appl_id"]) # Get the metadata record for the nih_application_id - mds = requests.get(f"https://{hostname}/mds/metadata/{cedar_record_id}", + mds = requests.get(f"http://revproxy-service/mds/metadata/{cedar_record_id}", headers=token_header ) if mds.status_code == 200: @@ -116,7 +116,7 @@ def update_filter_metadata(metadata_to_update): mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" print("Metadata is now being registered.") - mds_put = requests.put(f"https://{hostname}/mds/metadata/{cedar_record_id}", + mds_put = requests.put(f"http://revproxy-service/mds/metadata/{cedar_record_id}", headers=token_header, json = mds_cedar_register_data_body ) From 05c2cab8cd2d415dfd11b1d20ec2d32049400672 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Fri, 24 Mar 2023 14:49:40 -0500 Subject: [PATCH 100/362] fix: incorrect resource specification for cohort-middleware (#2189) --- kube/services/cohort-middleware/cohort-middleware-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index db906af35..602924d26 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -118,6 +118,6 @@ spec: resources: requests: cpu: 100m - memory: 128mi + memory: 128Mi limits: memory: 4Gi From b19e3e8c6f7200e649375a23f4e6281a524ba912 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 30 Mar 2023 09:46:48 -0600 Subject: [PATCH 101/362] using the fips enabled ami for the jupyter and workflow nodepools (#2191) --- tf_files/aws/modules/eks-nodepool/cloud.tf | 2 +- tf_files/aws/modules/eks-nodepool/data.tf | 15 --------------- tf_files/aws/modules/eks-nodepool/variables.tf | 7 ++++++- tf_files/aws/modules/eks/cloud.tf | 2 ++ 4 files changed, 9 insertions(+), 17 deletions(-) diff --git a/tf_files/aws/modules/eks-nodepool/cloud.tf b/tf_files/aws/modules/eks-nodepool/cloud.tf index 3001ce9a5..589b9a429 100644 --- a/tf_files/aws/modules/eks-nodepool/cloud.tf +++ b/tf_files/aws/modules/eks-nodepool/cloud.tf @@ -273,7 +273,7 @@ resource "aws_security_group_rule" "nodes_interpool_communications" { resource "aws_launch_configuration" "eks_launch_configuration" { associate_public_ip_address = false iam_instance_profile = "${aws_iam_instance_profile.eks_node_instance_profile.name}" - image_id = "${data.aws_ami.eks_worker.id}" + image_id = "${var.fips_enabled_ami}" instance_type = "${var.nodepool_instance_type}" name_prefix = "eks-${var.vpc_name}-nodepool-${var.nodepool}" security_groups = ["${aws_security_group.eks_nodes_sg.id}", "${aws_security_group.ssh.id}"] diff --git a/tf_files/aws/modules/eks-nodepool/data.tf b/tf_files/aws/modules/eks-nodepool/data.tf index a695b84fd..3d1df3ee0 100644 --- a/tf_files/aws/modules/eks-nodepool/data.tf +++ b/tf_files/aws/modules/eks-nodepool/data.tf @@ -22,21 +22,6 @@ data "aws_availability_zones" "available" { state = "available" } - -# First, let us create a data source to fetch the latest Amazon Machine Image (AMI) that Amazon provides with an -# EKS compatible Kubernetes baked in. - -data "aws_ami" "eks_worker" { - filter { - name = "name" - # values = ["${var.eks_version == "1.10" ? "amazon-eks-node-1.10*" : "amazon-eks-node-1.11*"}"] - values = ["amazon-eks-node-${var.eks_version}*"] - } - - most_recent = true - owners = ["602401143452"] # Amazon Account ID -} - #data "aws_eks_cluster" "eks_cluster" { # name = "${var.vpc_name}" #} diff --git a/tf_files/aws/modules/eks-nodepool/variables.tf b/tf_files/aws/modules/eks-nodepool/variables.tf index 401866f6d..944d56623 100644 --- a/tf_files/aws/modules/eks-nodepool/variables.tf +++ b/tf_files/aws/modules/eks-nodepool/variables.tf @@ -79,4 +79,9 @@ variable "activation_id" { variable "customer_id" { default = "" -} \ No newline at end of file +} + +# This is the FIPS enabled AMI in cdistest account. +variable "fips_enabled_ami" { + default = "ami-074d352c8e753fc93" +} diff --git a/tf_files/aws/modules/eks/cloud.tf b/tf_files/aws/modules/eks/cloud.tf index 784196c5a..517606a8d 100644 --- a/tf_files/aws/modules/eks/cloud.tf +++ b/tf_files/aws/modules/eks/cloud.tf @@ -37,6 +37,7 @@ module "jupyter_pool" { nodepool_asg_min_size = "${var.jupyter_asg_min_size}" activation_id = "${var.activation_id}" customer_id = "${var.customer_id}" + fips_enabled_ami = "${local.ami}" } module "workflow_pool" { @@ -62,6 +63,7 @@ module "workflow_pool" { nodepool_asg_min_size = "${var.workflow_asg_min_size}" activation_id = "${var.activation_id}" customer_id = "${var.customer_id}" + fips_enabled_ami = "${local.ami}" } From 51651af0563d793fd54f22df8a8da4c786ab289f Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 30 Mar 2023 11:19:14 -0500 Subject: [PATCH 102/362] fix(karpenter): Removed hardcoded values (#2192) * fix(karpenter): Removed hardcoded values * fix(karpenter): Removed hardcoded values --------- Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-karpenter.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 3688194ab..e997fe3bf 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -60,7 +60,7 @@ gen3_deploy_karpenter() { "sqs:ReceiveMessage" ], "Effect": "Allow", - "Resource": "arn:aws:sqs:*:'$(aws sts get-caller-identity --output text --query "Account")':karpenter-sqs-$(echo vpc_name)", + "Resource": "arn:aws:sqs:*:'$(aws sts get-caller-identity --output text --query "Account")':karpenter-sqs-'$(echo vpc_name)'", "Sid": "Karpenter2" }, { @@ -83,7 +83,7 @@ gen3_deploy_karpenter() { gen3_log_info "Creating karpenter AWS role and k8s service accounts" gen3 awsrole create "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true - gen3 awsrole sa-annotate "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true + gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter || true # Have to delete SA because helm chart will create the SA and there will be a conflict gen3_log_info "Have to delete SA because helm chart will create the SA and there will be a conflict" @@ -112,7 +112,7 @@ gen3_deploy_karpenter() { "Effect": "Allow", "Condition": { "ArnLike": { - "aws:SourceArn": "arn:aws:eks:us-east-1:707767160287:fargateprofile/$(echo $vpc_name)/*" + "aws:SourceArn": "arn:aws:eks:us-east-1:'$(aws sts get-caller-identity --output text --query "Account")':fargateprofile/'$(echo $vpc_name)'/*" } }, "Principal": { @@ -124,7 +124,9 @@ gen3_deploy_karpenter() { }' > $XDG_RUNTIME_DIR/fargate-policy.json aws iam create-role --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} --assume-role-policy-document file://"$XDG_RUNTIME_DIR/fargate-policy.json" || true aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} || true - aws eks create-fargate-profile --fargate-profile-name karpenter-profile --cluster-name $vpc_name --pod-execution-role-arn arn:aws:iam::707767160287:role/AmazonEKSFargatePodExecutionRole-${vpc_name} --subnets $subnets --selectors '{"namespace": "karpenter"}' || true + # Wait for IAM changes to take effect + sleep 15 + aws eks create-fargate-profile --fargate-profile-name karpenter-profile --cluster-name $vpc_name --pod-execution-role-arn arn:aws:iam::$(aws sts get-caller-identity --output text --query "Account"):role/AmazonEKSFargatePodExecutionRole-${vpc_name} --subnets $subnets --selectors '{"namespace": "karpenter"}' || true gen3_log_info "Installing karpenter using helm" helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter --wait \ --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \ From 841573ad446b0c63742b6bb120cde42d6ca4e83d Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 5 Apr 2023 12:52:45 -0600 Subject: [PATCH 103/362] changing "parallelism" parameter from 10 to 3 per GPE-876 (#2202) --- kube/services/argo/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 329b058d7..08b85fa6d 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,5 +1,5 @@ controller: - parallelism: 10 + parallelism: 3 metricsConfig: # -- Enables prometheus metrics server enabled: true From b6de19f5634bb28cba219bfae58bb5b812eec70b Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 6 Apr 2023 14:08:32 -0500 Subject: [PATCH 104/362] chore(portal-requests): Upped portal requests to get around crashloops (#2207) Co-authored-by: Edward Malinowski --- kube/services/portal/portal-deploy.yaml | 4 ++-- kube/services/portal/portal-root-deploy.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/kube/services/portal/portal-deploy.yaml b/kube/services/portal/portal-deploy.yaml index 8087efbdc..742f1b71c 100644 --- a/kube/services/portal/portal-deploy.yaml +++ b/kube/services/portal/portal-deploy.yaml @@ -87,8 +87,8 @@ spec: failureThreshold: 30 resources: requests: - cpu: 1 - memory: 1Gi + cpu: 2 + memory: 3Gi ports: - containerPort: 80 - containerPort: 443 diff --git a/kube/services/portal/portal-root-deploy.yaml b/kube/services/portal/portal-root-deploy.yaml index 4d67268bb..f639a1e15 100644 --- a/kube/services/portal/portal-root-deploy.yaml +++ b/kube/services/portal/portal-root-deploy.yaml @@ -87,8 +87,8 @@ spec: failureThreshold: 10 resources: requests: - cpu: 1 - memory: 1Gi + cpu: 2 + memory: 3Gi ports: - containerPort: 80 - containerPort: 443 From d378addf0e9d6726597d26cd40da58035f20651f Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Thu, 6 Apr 2023 15:13:11 -0500 Subject: [PATCH 105/362] Update kube-setup-hatchery.sh (#2183) * Update kube-setup-hatchery.sh * Update kube-setup-hatchery.sh --- gen3/bin/kube-setup-hatchery.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index 1192c293e..07172aa1e 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -94,4 +94,4 @@ fi g3kubectl apply -f "${GEN3_HOME}/kube/services/hatchery/hatchery-service.yaml" gen3 roll hatchery -gen3 job cron hatchery-reaper '@daily' \ No newline at end of file +gen3 job cron hatchery-reaper "*/5 * * * *" From 8337fb7300278052afc84df9718c210c3d75b645 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Thu, 6 Apr 2023 16:03:42 -0700 Subject: [PATCH 106/362] Set ephemeral-storage for jenkins-ci-worker (#2208) * Set ephemeral-storage for jenkins-ci-worker * Update jenkins-ci-worker-deploy.yaml --- kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml index 9184cd336..4167df364 100644 --- a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml +++ b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml @@ -119,6 +119,7 @@ spec: limits: cpu: 0.9 memory: 4096Mi + ephemeral-storage: 100Mi imagePullPolicy: Always volumeMounts: - name: "cert-volume" From 4404a552a3099897b024c26b47d9f653c4e5ab08 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 7 Apr 2023 10:11:06 -0700 Subject: [PATCH 107/362] increase ephemeral storage on jenkins-ci-worker (#2209) --- kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml index 4167df364..466e4a7df 100644 --- a/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml +++ b/kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml @@ -119,7 +119,7 @@ spec: limits: cpu: 0.9 memory: 4096Mi - ephemeral-storage: 100Mi + ephemeral-storage: 500Mi imagePullPolicy: Always volumeMounts: - name: "cert-volume" From 91e9a7eea0ccbe279c240479b4270782dd8f9580 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Wed, 12 Apr 2023 13:19:01 -0500 Subject: [PATCH 108/362] Update hatchery-reaper job (#2214) --- kube/services/jobs/hatchery-reaper-job.yaml | 35 ++++++++++++++------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/kube/services/jobs/hatchery-reaper-job.yaml b/kube/services/jobs/hatchery-reaper-job.yaml index ec615c0d7..77d249e37 100644 --- a/kube/services/jobs/hatchery-reaper-job.yaml +++ b/kube/services/jobs/hatchery-reaper-job.yaml @@ -60,7 +60,8 @@ spec: source "$GEN3_HOME/gen3/gen3setup.sh" # 60 minute idle timeout max limit=3600 - remote_users=$(kubectl get svc -n jupyter-pods -o json | jq -r . | jq -r '.items[].metadata.annotations."getambassador.io/config"' | yq -r .headers.remote_user) + namespace=$(gen3 jupyter j-namespace) + remote_users=$(kubectl get svc -n $namespace -o json | jq -r . | jq -r '.items[].metadata.annotations."getambassador.io/config"' | yq -r .headers.remote_user) # helper function to construct service name function escape() { @@ -79,26 +80,38 @@ spec: echo $retString } - for user in $remote_users; do - echo $user + for user in $remote_users; do + gen3_log_info "Checking possible workspaces to reap for $user" status=$(curl -s -H "REMOTE_USER: $user" hatchery-service/status | jq -r .status) - if [[ $status == "Running" ]]; then - echo "$user has workspace that is $status" - serviceName=h-$(escape $user)-s - service=$(kubectl get svc -n jupyter-pods $serviceName -o json | jq -r '.metadata.annotations."getambassador.io/config"' | yq -r .service) - last_activity=$(curl -s -H "REMOTE_USER: $user" $service/lw-workspace/proxy/api/status | jq -r .last_activity ) + if [ "$status" == "Running" ] || [ "$status" == "Launching" ]; then + gen3_log_info "$user has workspace that is $status" + serviceName=h-$(escape $user)-s + service="ambassador-service" + status_code=$(curl -s -w '%{http_code}' -o status.json -H "REMOTE_USER: $user" $service/api/status) + if [ "$status_code" == "200" ]; then + last_activity=$(curl -s -H "REMOTE_USER: $user" $service/api/status | jq -r .last_activity ) now=$(date +%s) delta=$(expr $now - $(date -d "$last_activity" +%s)) - echo Workspace for $user has been idle for $delta seconds + gen3_log_info "Workspace for $user has been idle for $delta seconds" if [ "$delta" -gt "$limit" ]; then - echo "Workspace for $user has been running for $delta seconds, which is higher than the $limit... Terminating" + gen3_log_info "Workspace for $user has been running for $delta seconds, which is higher than the $limit... Terminating" curl -XPOST -s -H "REMOTE_USER: $user" hatchery-service/terminate fi + else + gen3_log_err "Error: Got HTTP status $status_code trying to get last_activity for $user. Not able to reap workspace" + fi + gen3_log_info "Checking if paymodel for $user is above limit" + pm_status=$(curl -s -H "REMOTE_USER: $user" hatchery-service/paymodels | jq -r .request_status) + if [ "$pm_status" == "above limit" ]; then + gen3_log_warn "Paymodel status is above limit for user: $user. Reaping the workspace" + curl -XPOST -s -H "REMOTE_USER: $user" hatchery-service/terminate + fi fi done # legacy reaper code + gen3_log_info "Running legacy reaper job (based on local cluster/ prometheus)" if appList="$(gen3 jupyter idle none "$(gen3 db namespace)" kill)" && [[ -n "$appList" && -n "$slackWebHook" && "$slackWebHook" != "None" ]]; then curl -X POST --data-urlencode "payload={\"text\": \"hatchery-reaper in $gen3Hostname: \n\`\`\`\n${appList}\n\`\`\`\"}" "${slackWebHook}" fi - echo "All Done!" \ No newline at end of file + gen3_log_info "All Done!" \ No newline at end of file From f0bbef857906976601d7f81922f1c93fb4920396 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Thu, 13 Apr 2023 08:45:37 -0700 Subject: [PATCH 109/362] Update jenkins-deploy.yaml (#2215) --- kube/services/jenkins/jenkins-deploy.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/jenkins/jenkins-deploy.yaml b/kube/services/jenkins/jenkins-deploy.yaml index 89be7ec5b..c0eae2040 100644 --- a/kube/services/jenkins/jenkins-deploy.yaml +++ b/kube/services/jenkins/jenkins-deploy.yaml @@ -111,8 +111,8 @@ spec: port: 8080 resources: limits: - cpu: 0.9 - memory: 8192Mi + cpu: 2 + memory: 6Gi imagePullPolicy: Always volumeMounts: - name: datadir From ef80e4e14d13c06dc02a2a19c0ebeb5c749237b2 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 13 Apr 2023 14:02:13 -0500 Subject: [PATCH 110/362] chore(dd-workflow-removal): Removed DD from workflow nodes (#2216) Co-authored-by: Edward Malinowski --- kube/services/datadog/values.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 6df70d0ec..ce662b6a1 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -274,10 +274,6 @@ agents: # agents.tolerations -- Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6) tolerations: - - effect: NoSchedule - key: role - operator: Equal - value: workflow - effect: NoSchedule key: role operator: Equal From b1fc0a4b63932c5494576f0e36af1be39c460c96 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Fri, 14 Apr 2023 14:13:24 -0600 Subject: [PATCH 111/362] updating the kube-setup-sheepdog script so it can grant access to the new qa peregrine users for the devplanetv2 migration. (#2217) --- gen3/bin/kube-setup-sheepdog.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gen3/bin/kube-setup-sheepdog.sh b/gen3/bin/kube-setup-sheepdog.sh index b72d36690..7eec86def 100644 --- a/gen3/bin/kube-setup-sheepdog.sh +++ b/gen3/bin/kube-setup-sheepdog.sh @@ -42,8 +42,8 @@ if [[ -z "$JENKINS_HOME" && -f "$(gen3_secrets_folder)/creds.json" ]]; then if gen3_time_since postgres_checkup is 120; then # Grant permissions to peregrine sqlList=( - "GRANT SELECT ON ALL TABLES IN SCHEMA public TO $peregrine_db_user;" - "ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO $peregrine_db_user;" + "GRANT SELECT ON ALL TABLES IN SCHEMA public TO \"$peregrine_db_user\";" + "ALTER DEFAULT PRIVILEGES GRANT SELECT ON TABLES TO \"$peregrine_db_user\";" ); for sql in "${sqlList[@]}"; do gen3_log_info "Running: $sql" From 6e64d4ec2f2ad676b26e28a07d72a10112e58edf Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 17 Apr 2023 09:01:16 -0600 Subject: [PATCH 112/362] increasing the requets and limits for Fence (#2206) --- gen3/lib/testData/default/expectedFenceResult.yaml | 7 ++++--- .../testData/test1.manifest.g3k/expectedFenceResult.yaml | 7 ++++--- kube/services/fence/fence-deploy.yaml | 7 ++++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/gen3/lib/testData/default/expectedFenceResult.yaml b/gen3/lib/testData/default/expectedFenceResult.yaml index ddc21f0a5..f6d76d790 100644 --- a/gen3/lib/testData/default/expectedFenceResult.yaml +++ b/gen3/lib/testData/default/expectedFenceResult.yaml @@ -201,10 +201,11 @@ spec: subPath: "jwt-keys.tar" resources: requests: - cpu: 100m - memory: 500Mi + cpu: 0.4 + memory: 1200Mi limits: - memory: 1024Mi + cpu: 1.0 + memory: 2400Mi command: ["/bin/bash"] args: - "-c" diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index 90a329c84..d4196c070 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -233,10 +233,11 @@ spec: subPath: "jwt-keys.tar" resources: requests: - cpu: 100m - memory: 500Mi + cpu: 0.4 + memory: 1200Mi limits: - memory: 1024Mi + cpu: 1.0 + memory: 2400Mi command: ["/bin/bash"] args: - "-c" diff --git a/kube/services/fence/fence-deploy.yaml b/kube/services/fence/fence-deploy.yaml index 2ceb68ee7..1722676e0 100644 --- a/kube/services/fence/fence-deploy.yaml +++ b/kube/services/fence/fence-deploy.yaml @@ -240,10 +240,11 @@ spec: subPath: "jwt-keys.tar" resources: requests: - cpu: 100m - memory: 500Mi + cpu: 0.4 + memory: 1200Mi limits: - memory: 1024Mi + cpu: 1.0 + memory: 2400Mi command: ["/bin/bash"] args: - "-c" From cc2ee4a57fe86db2e8ce18de7b40ea1758ba926f Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 17 Apr 2023 09:32:16 -0600 Subject: [PATCH 113/362] Feat/karpenter fix (#2200) * re-creating the service account after setting up karpenter to ensure it is not removed. * forgot to remove "|| true" --- gen3/bin/kube-setup-karpenter.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index e997fe3bf..113f64cdf 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -137,6 +137,7 @@ gen3_deploy_karpenter() { --set controller.env[0].name=AWS_REGION \ --set controller.env[0].value=us-east-1 fi + gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter gen3_log_info "Remove cluster-autoscaler" gen3 kube-setup-autoscaler --remove # Ensure that fluentd is updated if karpenter is deployed to prevent containerd logging issues From 35aeabd024966076eae78616ebf5512624587140 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 17 Apr 2023 12:26:11 -0400 Subject: [PATCH 114/362] Feat/vhdc prod workflows scaleup (#2218) * Added changes made to in vhdcperf to promote stability while running Argo workflows. * Needed to modify the command so it didn't just endlessly add the arguments. Now, it checks if they exist in the cluster-autoscaler deployment, and only adds them if they don't have them. --- ...be-setup-autoscaler-for-large-workflows.sh | 26 +++++++++++++++++++ kube/services/argo/values.yaml | 6 ++++- 2 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 gen3/bin/kube-setup-autoscaler-for-large-workflows.sh diff --git a/gen3/bin/kube-setup-autoscaler-for-large-workflows.sh b/gen3/bin/kube-setup-autoscaler-for-large-workflows.sh new file mode 100644 index 000000000..5bf4df8b7 --- /dev/null +++ b/gen3/bin/kube-setup-autoscaler-for-large-workflows.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Set the resources block for the deployment +kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/resources", "value": {"limits":{"cpu":"6","memory":"30Gi"},"requests":{"cpu":"1","memory":"4Gi"}}}]' + +# Add options to the command for the container, if they are not already present +if ! kubectl get deployment cluster-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.containers[0].command}' | yq eval '.[]' | grep -q -- '--scale-down-delay-after-delete=2m'; then + kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--scale-down-delay-after-delete=2m"}]' +else + echo "Flag --scale-down-delay-after-delete=2m already present" +fi + +if ! kubectl get deployment cluster-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.containers[0].command}' | yq eval '.[]' | grep -q -- '--scale-down-unneeded-time=2m'; then + kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--scale-down-unneeded-time=2m"}]' +else + echo "Flag --scale-down-unneeded-time=2m already present" +fi + +if ! kubectl get deployment cluster-autoscaler -n kube-system -o jsonpath='{.spec.template.spec.containers[0].command}' | yq eval '.[]' | grep -q -- '--scan-interval=60s'; then + kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/command/-", "value": "--scan-interval=60s"}]' +else + echo "Flag --scan-interval=60s already present" +fi + +# Add PriorityClass to the pod +kubectl patch deployment cluster-autoscaler -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/priorityClassName", "value": "system-node-critical"}]' diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 08b85fa6d..7c9ee3270 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -22,7 +22,11 @@ controller: } ] } - } + } + + resourceRateLimit: + limit: 40 + burst: 4 # -- enable persistence using postgres persistence: From fa37e1375a906acea10f175440cd00d89e05a135 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 17 Apr 2023 12:44:18 -0500 Subject: [PATCH 115/362] Update Jenkinsfile (#2219) * Update Jenkinsfile * Update Jenkinsfile --- Jenkinsfile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index c2d149681..7f70aeedf 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -43,6 +43,7 @@ metadata: app: ephemeral-ci-run netnolimit: "yes" annotations: + karpenter.sh/do-not-evict: true "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" spec: affinity: @@ -54,6 +55,11 @@ spec: operator: In values: - ONDEMAND + - matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand containers: - name: shell image: quay.io/cdis/gen3-ci-worker:master From d27166375b06fa52a5b71dbdc788d445bfb6e0c0 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 18 Apr 2023 10:21:40 -0500 Subject: [PATCH 116/362] fix(karpenter): Updated kube-setup-karpenter to only update configs in default namespace (#2220) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-karpenter.sh | 259 ++++++++++++++++--------------- 1 file changed, 131 insertions(+), 128 deletions(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 113f64cdf..3118586cb 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -9,140 +9,143 @@ ctx="$(g3kubectl config current-context)" ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" gen3_deploy_karpenter() { - gen3_log_info "Deploying karpenter" - # If the karpenter namespace doesn't exist or the force flag isn't in place then deploy - if [[ ( -z $(g3kubectl get namespaces | grep karpenter) || $FORCE == "true" ) && ("$ctxNamespace" == "default" || "$ctxNamespace" == "null") ]]; then - gen3_log_info "Ensuring that the spot instance service linked role is setup" - # Ensure the spot instance service linked role is setup - # It is required for running spot instances - gen3_create_karpenter_sqs_eventbridge - aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true - if g3k_config_lookup .global.karpenter_version; then - karpenter=$(g3k_config_lookup .global.karpenter_version) - fi - export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor` - if [ "${clusterversion}" = "24+" ]; then - karpenter=${karpenter:-v0.24.0} - else - karpenter=${karpenter:-v0.22.0} - fi - echo '{ - "Statement": [ - { - "Action": [ - "ssm:GetParameter", - "iam:PassRole", - "ec2:DescribeImages", - "ec2:RunInstances", - "ec2:DescribeSubnets", - "ec2:DescribeSecurityGroups", - "ec2:DescribeLaunchTemplates", - "ec2:DescribeInstances", - "ec2:DescribeInstanceTypes", - "ec2:DescribeInstanceTypeOfferings", - "ec2:DescribeAvailabilityZones", - "ec2:DeleteLaunchTemplate", - "ec2:CreateTags", - "ec2:CreateLaunchTemplate", - "ec2:CreateFleet", - "ec2:DescribeSpotPriceHistory", - "pricing:GetProducts" - ], - "Effect": "Allow", - "Resource": "*", - "Sid": "Karpenter" - }, - { - "Action": [ - "sqs:DeleteMessage", - "sqs:GetQueueAttributes", - "sqs:GetQueueUrl", - "sqs:ReceiveMessage" - ], - "Effect": "Allow", - "Resource": "arn:aws:sqs:*:'$(aws sts get-caller-identity --output text --query "Account")':karpenter-sqs-'$(echo vpc_name)'", - "Sid": "Karpenter2" - }, - { - "Action": "ec2:TerminateInstances", - "Condition": { - "StringLike": { - "ec2:ResourceTag/Name": "*karpenter*" - } - }, - "Effect": "Allow", - "Resource": "*", - "Sid": "ConditionalEC2Termination" - } - ], - "Version": "2012-10-17" - }' > $XDG_RUNTIME_DIR/controller-policy.json + # Only do cluster level changes in the default namespace to prevent conflicts + if [[ ("$ctxNamespace" == "default" || "$ctxNamespace" == "null") ]]; then + gen3_log_info "Deploying karpenter" + # If the karpenter namespace doesn't exist or the force flag isn't in place then deploy + if [[ ( -z $(g3kubectl get namespaces | grep karpenter) || $FORCE == "true" ) ]]; then + gen3_log_info "Ensuring that the spot instance service linked role is setup" + # Ensure the spot instance service linked role is setup + # It is required for running spot instances + gen3_create_karpenter_sqs_eventbridge + aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true + if g3k_config_lookup .global.karpenter_version; then + karpenter=$(g3k_config_lookup .global.karpenter_version) + fi + export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor` + if [ "${clusterversion}" = "24+" ]; then + karpenter=${karpenter:-v0.24.0} + else + karpenter=${karpenter:-v0.22.0} + fi + echo '{ + "Statement": [ + { + "Action": [ + "ssm:GetParameter", + "iam:PassRole", + "ec2:DescribeImages", + "ec2:RunInstances", + "ec2:DescribeSubnets", + "ec2:DescribeSecurityGroups", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeInstances", + "ec2:DescribeInstanceTypes", + "ec2:DescribeInstanceTypeOfferings", + "ec2:DescribeAvailabilityZones", + "ec2:DeleteLaunchTemplate", + "ec2:CreateTags", + "ec2:CreateLaunchTemplate", + "ec2:CreateFleet", + "ec2:DescribeSpotPriceHistory", + "pricing:GetProducts" + ], + "Effect": "Allow", + "Resource": "*", + "Sid": "Karpenter" + }, + { + "Action": [ + "sqs:DeleteMessage", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ReceiveMessage" + ], + "Effect": "Allow", + "Resource": "arn:aws:sqs:*:'$(aws sts get-caller-identity --output text --query "Account")':karpenter-sqs-'$(echo vpc_name)'", + "Sid": "Karpenter2" + }, + { + "Action": "ec2:TerminateInstances", + "Condition": { + "StringLike": { + "ec2:ResourceTag/Name": "*karpenter*" + } + }, + "Effect": "Allow", + "Resource": "*", + "Sid": "ConditionalEC2Termination" + } + ], + "Version": "2012-10-17" + }' > $XDG_RUNTIME_DIR/controller-policy.json - gen3_log_info "Creating karpenter namespace" - g3kubectl create namespace karpenter 2> /dev/null || true + gen3_log_info "Creating karpenter namespace" + g3kubectl create namespace karpenter 2> /dev/null || true - gen3_log_info "Creating karpenter AWS role and k8s service accounts" - gen3 awsrole create "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true - gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter || true - # Have to delete SA because helm chart will create the SA and there will be a conflict + gen3_log_info "Creating karpenter AWS role and k8s service accounts" + gen3 awsrole create "karpenter-controller-role-$vpc_name" karpenter "karpenter" || true + gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter || true + # Have to delete SA because helm chart will create the SA and there will be a conflict - gen3_log_info "Have to delete SA because helm chart will create the SA and there will be a conflict" - #g3kubectl delete sa karpenter -n karpenter + gen3_log_info "Have to delete SA because helm chart will create the SA and there will be a conflict" + #g3kubectl delete sa karpenter -n karpenter - gen3_log_info "aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true" - aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true - gen3_log_info "Need to tag the subnets/sg's so that karpenter can discover them automatically" - # Need to tag the subnets/sg's so that karpenter can discover them automatically - subnets=$(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_private_*' --query 'Subnets[].SubnetId' --output text) - # Will apprend secondary CIDR block subnets to be tagged as well, and if none are found then will not append anything to list - subnets+=" $(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_secondary_cidr_subnet_*' --query 'Subnets[].SubnetId' --output text)" - security_groups=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg,ssh_eks_'$vpc_name'' --query 'SecurityGroups[].GroupId' --output text) || true - security_groups_jupyter=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-jupyter,ssh_eks_'$vpc_name'-nodepool-jupyter' --query 'SecurityGroups[].GroupId' --output text) || true - security_groups_workflow=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-workflow,ssh_eks_'$vpc_name'-nodepool-workflow' --query 'SecurityGroups[].GroupId' --output text) || true - cluster_endpoint="$(aws eks describe-cluster --name ${vpc_name} --query "cluster.endpoint" --output text)" + gen3_log_info "aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true" + aws iam put-role-policy --role-name "karpenter-controller-role-$vpc_name" --policy-document file://$XDG_RUNTIME_DIR/controller-policy.json --policy-name "karpenter-controller-policy" 1>&2 || true + gen3_log_info "Need to tag the subnets/sg's so that karpenter can discover them automatically" + # Need to tag the subnets/sg's so that karpenter can discover them automatically + subnets=$(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_private_*' --query 'Subnets[].SubnetId' --output text) + # Will apprend secondary CIDR block subnets to be tagged as well, and if none are found then will not append anything to list + subnets+=" $(aws ec2 describe-subnets --filter 'Name=tag:Environment,Values='$vpc_name'' 'Name=tag:Name,Values=eks_secondary_cidr_subnet_*' --query 'Subnets[].SubnetId' --output text)" + security_groups=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg,ssh_eks_'$vpc_name'' --query 'SecurityGroups[].GroupId' --output text) || true + security_groups_jupyter=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-jupyter,ssh_eks_'$vpc_name'-nodepool-jupyter' --query 'SecurityGroups[].GroupId' --output text) || true + security_groups_workflow=$(aws ec2 describe-security-groups --filter 'Name=tag:Name,Values='$vpc_name'-nodes-sg-workflow,ssh_eks_'$vpc_name'-nodepool-workflow' --query 'SecurityGroups[].GroupId' --output text) || true + cluster_endpoint="$(aws eks describe-cluster --name ${vpc_name} --query "cluster.endpoint" --output text)" - aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${security_groups} || true - aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${subnets} || true - aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-jupyter" --resources ${security_groups_jupyter} || true - aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-worfklow" --resources ${security_groups_workflow} || true - echo '{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Condition": { - "ArnLike": { - "aws:SourceArn": "arn:aws:eks:us-east-1:'$(aws sts get-caller-identity --output text --query "Account")':fargateprofile/'$(echo $vpc_name)'/*" - } - }, - "Principal": { - "Service": "eks-fargate-pods.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] - }' > $XDG_RUNTIME_DIR/fargate-policy.json - aws iam create-role --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} --assume-role-policy-document file://"$XDG_RUNTIME_DIR/fargate-policy.json" || true - aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} || true - # Wait for IAM changes to take effect - sleep 15 - aws eks create-fargate-profile --fargate-profile-name karpenter-profile --cluster-name $vpc_name --pod-execution-role-arn arn:aws:iam::$(aws sts get-caller-identity --output text --query "Account"):role/AmazonEKSFargatePodExecutionRole-${vpc_name} --subnets $subnets --selectors '{"namespace": "karpenter"}' || true - gen3_log_info "Installing karpenter using helm" - helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter --wait \ - --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \ - --set settings.aws.clusterEndpoint="${cluster_endpoint}" \ - --set settings.aws.clusterName=${vpc_name} \ - --set serviceAccount.name=karpenter \ - --set serviceAccount.create=false \ - --set controller.env[0].name=AWS_REGION \ - --set controller.env[0].value=us-east-1 + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${security_groups} || true + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${subnets} || true + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-jupyter" --resources ${security_groups_jupyter} || true + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-worfklow" --resources ${security_groups_workflow} || true + echo '{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Condition": { + "ArnLike": { + "aws:SourceArn": "arn:aws:eks:us-east-1:'$(aws sts get-caller-identity --output text --query "Account")':fargateprofile/'$(echo $vpc_name)'/*" + } + }, + "Principal": { + "Service": "eks-fargate-pods.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + }' > $XDG_RUNTIME_DIR/fargate-policy.json + aws iam create-role --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} --assume-role-policy-document file://"$XDG_RUNTIME_DIR/fargate-policy.json" || true + aws iam attach-role-policy --policy-arn arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy --role-name AmazonEKSFargatePodExecutionRole-${vpc_name} || true + # Wait for IAM changes to take effect + sleep 15 + aws eks create-fargate-profile --fargate-profile-name karpenter-profile --cluster-name $vpc_name --pod-execution-role-arn arn:aws:iam::$(aws sts get-caller-identity --output text --query "Account"):role/AmazonEKSFargatePodExecutionRole-${vpc_name} --subnets $subnets --selectors '{"namespace": "karpenter"}' || true + gen3_log_info "Installing karpenter using helm" + helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter --wait \ + --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \ + --set settings.aws.clusterEndpoint="${cluster_endpoint}" \ + --set settings.aws.clusterName=${vpc_name} \ + --set serviceAccount.name=karpenter \ + --set serviceAccount.create=false \ + --set controller.env[0].name=AWS_REGION \ + --set controller.env[0].value=us-east-1 + fi + gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter + gen3_log_info "Remove cluster-autoscaler" + gen3 kube-setup-autoscaler --remove + # Ensure that fluentd is updated if karpenter is deployed to prevent containerd logging issues + gen3 kube-setup-fluentd --force + gen3_update_karpenter_configs fi - gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter - gen3_log_info "Remove cluster-autoscaler" - gen3 kube-setup-autoscaler --remove - # Ensure that fluentd is updated if karpenter is deployed to prevent containerd logging issues - gen3 kube-setup-fluentd --force - gen3_update_karpenter_configs } gen3_update_karpenter_configs() { From 534fbafba75e7bbe7e777e9a83cc4534bb9f572b Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Thu, 20 Apr 2023 10:21:07 -0500 Subject: [PATCH 117/362] Update values.yaml --- kube/services/datadog/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index ce662b6a1..3c5a79e4c 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -218,7 +218,7 @@ datadog: # - send_distribution_buckets: true # timeout: 5 - containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:usersync" + containerExcludeLogs: "kube_namespace:logging kube_namespace:argo" ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements From eef269508ddbc9e0948027637cddcc7eedff9907 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 25 Apr 2023 10:03:59 -0600 Subject: [PATCH 118/362] Feat/gpe 900 (#2225) * adding the "options" parameter to the squid.conf to help restrict tls versions to 1.2 and higher * removing an extra "/" so the script runs properly. * adding an annotation to the alb, so the proper security policy gets set that will require tls 1.2 and above. * removing the "options" parameter as it is no longer needed. --- gen3/bin/kube-setup-ingress.sh | 2 +- kube/services/ingress/ingress.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index 8def0451d..d0bcff9a4 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -298,7 +298,7 @@ EOM } gen3_ingress_deploy_helm_chart() { - kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller//crds?ref=master" + kubectl apply -k "github.com/aws/eks-charts/stable/aws-load-balancer-controller/crds?ref=master" if (! helm status aws-load-balancer-controller -n kube-system > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then helm repo add eks https://aws.github.io/eks-charts 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml index 9352005d7..3ceacf608 100644 --- a/kube/services/ingress/ingress.yaml +++ b/kube/services/ingress/ingress.yaml @@ -11,6 +11,7 @@ metadata: alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=600 alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' + alb.ingress.kubernetes.io/ssl-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" spec: ingressClassName: alb rules: From 046942e850de98623c8ea01e717696b52a3b826a Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Tue, 25 Apr 2023 13:22:02 -0700 Subject: [PATCH 119/362] Set resources for spark deployment (#2226) --- kube/services/spark/spark-deploy.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kube/services/spark/spark-deploy.yaml b/kube/services/spark/spark-deploy.yaml index 00487eb0f..b280cecf0 100644 --- a/kube/services/spark/spark-deploy.yaml +++ b/kube/services/spark/spark-deploy.yaml @@ -80,9 +80,9 @@ spec: volumeMounts: imagePullPolicy: Always resources: - limits: - cpu: 0.5 - memory: 2Gi + requests: + cpu: 3 + memory: 4Gi command: ["/bin/bash" ] args: - "-c" From 8b5c887f4264b32775c73e818e4d2917b6cfc27f Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Tue, 25 Apr 2023 16:03:15 -0500 Subject: [PATCH 120/362] adding biosystics to whitelist (#2227) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 3a0b82a77..f37180df8 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -112,6 +112,7 @@ mirrors.gigenet.com mirrors.lga7.us.voxel.net mirrors.nics.utk.edu mirrors.syringanetworks.net +mps.csb.pitt.edu mran.microsoft.com neuro.debian.net neurodeb.pirsquared.org From e6f40b978b07102aa5287bdf58828535129403eb Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 27 Apr 2023 09:59:05 -0500 Subject: [PATCH 121/362] feat: update Apache Superset to version based on Helm chart 0.9.2 and Apache Superset 2.1.0 (#2224) --- kube/services/superset/superset-deploy.yaml | 112 +++++++++++------- kube/services/superset/superset-redis.yaml | 62 +++++----- .../superset/superset-secrets-template.yaml | 2 +- 3 files changed, 102 insertions(+), 74 deletions(-) diff --git a/kube/services/superset/superset-deploy.yaml b/kube/services/superset/superset-deploy.yaml index 1312e7ea2..6a578eb33 100644 --- a/kube/services/superset/superset-deploy.yaml +++ b/kube/services/superset/superset-deploy.yaml @@ -22,19 +22,19 @@ metadata: name: superset-config labels: app: superset - chart: superset-0.7.1 + chart: superset-0.9.2 release: "superset" heritage: "Helm" type: Opaque stringData: superset_config.py: | - + import os from cachelib.redis import RedisCache - + def env(key, default=None): return os.getenv(key, default) - + MAPBOX_API_KEY = env('MAPBOX_API_KEY', '') CACHE_CONFIG = { 'CACHE_TYPE': 'redis', @@ -46,11 +46,11 @@ stringData: 'CACHE_REDIS_DB': env('REDIS_DB', 1), } DATA_CACHE_CONFIG = CACHE_CONFIG - + SQLALCHEMY_DATABASE_URI = f"postgresql+psycopg2://{env('DB_USER')}:{env('DB_PASS')}@{env('DB_HOST')}:{env('DB_PORT')}/{env('DB_NAME')}" SQLALCHEMY_TRACK_MODIFICATIONS = True - SECRET_KEY = env('SECRET_KEY', '') - + SECRET_KEY = env('SECRET_KEY', 'thisISaSECRET_1234') + # Flask-WTF flag for CSRF WTF_CSRF_ENABLED = True # Add endpoints that need to be exempt from CSRF protection @@ -62,15 +62,15 @@ stringData: CELERY_ANNOTATIONS = {'tasks.add': {'rate_limit': '10/s'}} BROKER_URL = f"redis://{env('REDIS_HOST')}:{env('REDIS_PORT')}/0" CELERY_RESULT_BACKEND = f"redis://{env('REDIS_HOST')}:{env('REDIS_PORT')}/0" - + CELERY_CONFIG = CeleryConfig RESULTS_BACKEND = RedisCache( host=env('REDIS_HOST'), port=env('REDIS_PORT'), key_prefix='superset_results' ) - - + + # Overrides # enable_proxy_fix # This will make sure the redirect_uri is properly computed, even with SSL offloading @@ -131,8 +131,16 @@ stringData: superset db upgrade echo "Initializing roles..." superset init - - + + # echo "Creating admin user..." + # superset fab create-admin \ + # --username admin \ + # --firstname Superset \ + # --lastname Admin \ + # --email admin@superset.com \ + # --password admin \ + # || true + if [ -f "/app/configs/import_datasources.yaml" ]; then echo "Importing database connections.... " superset import_datasources -p /app/configs/import_datasources.yaml @@ -171,10 +179,11 @@ metadata: name: superset labels: app: superset - chart: superset-0.7.1 + chart: superset-0.9.2 release: superset heritage: Helm spec: +# type: ClusterIP type: NodePort ports: - port: 8088 @@ -208,7 +217,7 @@ metadata: name: superset-worker labels: app: superset-worker - chart: superset-0.7.1 + chart: superset-0.9.2 release: superset heritage: Helm spec: @@ -220,14 +229,14 @@ spec: template: metadata: annotations: - checksum/superset_config.py: 955c3e88940f522fe4d9ad60d105ab4537e290697d135703c8a01aeb6c1a3d8d - checksum/connections: c44da43c5f3426c3c4a25f3235e3e23452ce1cf713ad059eaef7767e175a5eb4 + checksum/superset_config.py: 441901105d53c640c7612da3d7b751dc6f770c1796e733ee79c9322d27cd1b5d + checksum/connections: a91716d6d1088e870fbe02159dc0b066dd011885aa08a22fbe60ea1cd4720f82 checksum/extraConfigs: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/extraSecrets: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/extraSecretEnv: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a - checksum/configOverrides: 4b1ff4f862a95242ea509b5dc5f7d87c47faf1815de5ea21a46b3fde8e576bf4 + checksum/configOverrides: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/configOverridesFiles: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a - + labels: app: superset-worker release: superset @@ -258,17 +267,18 @@ spec: - command: - /bin/sh - -c - - until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -wait "tcp://$REDIS_HOST:$REDIS_PORT" + -timeout 120s envFrom: - secretRef: name: 'superset-env' - image: 'busybox:1.35' + image: 'jwilder/dockerize:latest' imagePullPolicy: 'IfNotPresent' - name: wait-for-postgres + name: wait-for-postgres-redis containers: - name: superset - image: "apache/superset:2.0.0" - imagePullPolicy: Always + image: "apache/superset:2.1.0" + imagePullPolicy: IfNotPresent command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; celery --app=superset.tasks.celery_app:app worker"] env: - name: "SUPERSET_PORT" @@ -280,6 +290,17 @@ spec: - name: superset-config mountPath: "/app/pythonpath" readOnly: true + livenessProbe: + exec: + command: + - sh + - -c + - celery -A superset.tasks.celery_app:app inspect ping -d celery@$HOSTNAME + failureThreshold: 3 + initialDelaySeconds: 120 + periodSeconds: 60 + successThreshold: 1 + timeoutSeconds: 60 resources: limits: cpu: 1 @@ -312,7 +333,7 @@ metadata: name: superset labels: app: superset - chart: superset-0.7.1 + chart: superset-0.9.2 release: superset heritage: Helm spec: @@ -325,14 +346,14 @@ spec: metadata: annotations: # Force reload on config changes - checksum/superset_config.py: 955c3e88940f522fe4d9ad60d105ab4537e290697d135703c8a01aeb6c1a3d8d - checksum/superset_init.sh: ff251d03d362c4a3ff1451d24893d5d12811f67edc84efa39484a84c59c3f883 - checksum/superset_bootstrap.sh: a6edf034118d68cef7203cc3181bb6c72b6244cdedf270ee4accc9ae9ff92b2e - checksum/connections: c44da43c5f3426c3c4a25f3235e3e23452ce1cf713ad059eaef7767e175a5eb4 + checksum/superset_config.py: 441901105d53c640c7612da3d7b751dc6f770c1796e733ee79c9322d27cd1b5d + checksum/superset_init.sh: e6b1e8eac1f7a79a07a6c72a0e2ee6e09654eeb439c6bbe61bfd676917c41e02 + checksum/superset_bootstrap.sh: dc9a47141051ced34960c313860a55e03eb48c1fa36a0ed25c03ad60cd3b5c48 + checksum/connections: a91716d6d1088e870fbe02159dc0b066dd011885aa08a22fbe60ea1cd4720f82 checksum/extraConfigs: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/extraSecrets: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/extraSecretEnv: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a - checksum/configOverrides: 4b1ff4f862a95242ea509b5dc5f7d87c47faf1815de5ea21a46b3fde8e576bf4 + checksum/configOverrides: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a checksum/configOverridesFiles: 44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a labels: app: superset @@ -347,17 +368,17 @@ spec: - command: - /bin/sh - -c - - until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s envFrom: - secretRef: name: 'superset-env' - image: 'busybox:1.35' + image: 'jwilder/dockerize:latest' imagePullPolicy: 'IfNotPresent' name: wait-for-postgres containers: - name: superset - image: "apache/superset:2.0.0" - imagePullPolicy: Always + image: "apache/superset:2.1.0" + imagePullPolicy: IfNotPresent command: ["/bin/sh","-c",". /app/pythonpath/superset_bootstrap.sh; /usr/bin/run-server.sh"] env: - name: "SUPERSET_PORT" @@ -373,24 +394,33 @@ spec: - name: http containerPort: 8088 protocol: TCP - livenessProbe: + startupProbe: + failureThreshold: 60 httpGet: path: /health port: http initialDelaySeconds: 15 - timeoutSeconds: 1 - failureThreshold: 3 - periodSeconds: 15 + periodSeconds: 5 successThreshold: 1 + timeoutSeconds: 1 readinessProbe: + failureThreshold: 3 httpGet: path: /health port: http initialDelaySeconds: 15 + periodSeconds: 15 + successThreshold: 1 timeoutSeconds: 1 + livenessProbe: failureThreshold: 3 + httpGet: + path: /health + port: http + initialDelaySeconds: 15 periodSeconds: 15 successThreshold: 1 + timeoutSeconds: 1 resources: limits: cpu: '0.25' @@ -438,20 +468,20 @@ spec: - command: - /bin/sh - -c - - until nc -zv $DB_HOST $DB_PORT -w1; do echo 'waiting for db'; sleep 1; done + - dockerize -wait "tcp://$DB_HOST:$DB_PORT" -timeout 120s envFrom: - secretRef: name: 'superset-env' - image: 'busybox:1.35' + image: 'jwilder/dockerize:latest' imagePullPolicy: 'IfNotPresent' name: wait-for-postgres containers: - name: superset-init-db - image: "apache/superset:2.0.0" + image: "apache/superset:2.1.0" envFrom: - secretRef: name: superset-env - imagePullPolicy: Always + imagePullPolicy: IfNotPresent volumeMounts: - name: superset-config mountPath: "/app/pythonpath" diff --git a/kube/services/superset/superset-redis.yaml b/kube/services/superset/superset-redis.yaml index 875e3030b..bd1e6b064 100644 --- a/kube/services/superset/superset-redis.yaml +++ b/kube/services/superset/superset-redis.yaml @@ -4,10 +4,10 @@ apiVersion: v1 kind: ServiceAccount automountServiceAccountToken: true metadata: - name: superset-redis + name: redis labels: app.kubernetes.io/name: redis - helm.sh/chart: redis-16.3.1 + helm.sh/chart: redis-17.9.4 app.kubernetes.io/instance: superset app.kubernetes.io/managed-by: Helm --- @@ -18,7 +18,7 @@ metadata: name: superset-redis-configuration labels: app.kubernetes.io/name: redis - helm.sh/chart: redis-16.3.1 + helm.sh/chart: redis-17.9.4 app.kubernetes.io/instance: superset app.kubernetes.io/managed-by: Helm data: @@ -37,7 +37,6 @@ data: # End of master configuration replica.conf: |- dir /data - slave-read-only yes # User-supplied replica configuration: rename-command FLUSHDB "" rename-command FLUSHALL "" @@ -50,7 +49,7 @@ metadata: name: superset-redis-health labels: app.kubernetes.io/name: redis - helm.sh/chart: redis-16.3.1 + helm.sh/chart: redis-17.9.4 app.kubernetes.io/instance: superset app.kubernetes.io/managed-by: Helm data: @@ -60,13 +59,13 @@ data: [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" response=$( - timeout -s 3 $1 \ + timeout -s 15 $1 \ redis-cli \ -h localhost \ -p $REDIS_PORT \ ping ) - if [ $? == 124 ]; then + if [ "$?" -eq "124" ]; then echo "Timed out" exit 1 fi @@ -80,13 +79,13 @@ data: [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" [[ -n "$REDIS_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_PASSWORD" response=$( - timeout -s 3 $1 \ + timeout -s 15 $1 \ redis-cli \ -h localhost \ -p $REDIS_PORT \ ping ) - if [ $? == 124 ]; then + if [ "$?" -eq "124" ]; then echo "Timed out" exit 1 fi @@ -101,13 +100,13 @@ data: [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" response=$( - timeout -s 3 $1 \ + timeout -s 15 $1 \ redis-cli \ -h $REDIS_MASTER_HOST \ -p $REDIS_MASTER_PORT_NUMBER \ ping ) - if [ $? == 124 ]; then + if [ "$?" -eq "124" ]; then echo "Timed out" exit 1 fi @@ -121,13 +120,13 @@ data: [[ -f $REDIS_MASTER_PASSWORD_FILE ]] && export REDIS_MASTER_PASSWORD="$(< "${REDIS_MASTER_PASSWORD_FILE}")" [[ -n "$REDIS_MASTER_PASSWORD" ]] && export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" response=$( - timeout -s 3 $1 \ + timeout -s 15 $1 \ redis-cli \ -h $REDIS_MASTER_HOST \ -p $REDIS_MASTER_PORT_NUMBER \ ping ) - if [ $? == 124 ]; then + if [ "$?" -eq "124" ]; then echo "Timed out" exit 1 fi @@ -156,7 +155,7 @@ metadata: name: superset-redis-scripts labels: app.kubernetes.io/name: redis - helm.sh/chart: redis-16.3.1 + helm.sh/chart: redis-17.9.4 app.kubernetes.io/instance: superset app.kubernetes.io/managed-by: Helm data: @@ -164,10 +163,10 @@ data: #!/bin/bash [[ -f $REDIS_PASSWORD_FILE ]] && export REDIS_PASSWORD="$(< "${REDIS_PASSWORD_FILE}")" - if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + if [[ -f /opt/bitnami/redis/mounted-etc/master.conf ]];then cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf fi - if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + if [[ -f /opt/bitnami/redis/mounted-etc/redis.conf ]];then cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf fi ARGS=("--port" "${REDIS_PORT}") @@ -183,7 +182,7 @@ metadata: name: superset-redis-headless labels: app.kubernetes.io/name: redis - helm.sh/chart: redis-16.3.1 + helm.sh/chart: redis-17.9.4 app.kubernetes.io/instance: superset app.kubernetes.io/managed-by: Helm annotations: @@ -208,13 +207,14 @@ metadata: name: superset-redis-master labels: app.kubernetes.io/name: redis - helm.sh/chart: redis-16.3.1 + helm.sh/chart: redis-17.9.4 app.kubernetes.io/instance: superset app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master spec: type: ClusterIP - + internalTrafficPolicy: Cluster + sessionAffinity: None ports: - name: tcp-redis port: 6379 @@ -225,7 +225,7 @@ spec: app.kubernetes.io/instance: superset app.kubernetes.io/component: master --- -# Source: superset/charts/redis/templates/master/statefulset.yaml +# Source: superset/charts/redis/templates/master/application.yaml apiVersion: apps/v1 kind: StatefulSet metadata: @@ -234,7 +234,7 @@ metadata: name: superset-redis-master labels: app.kubernetes.io/name: redis - helm.sh/chart: redis-16.3.1 + helm.sh/chart: redis-17.9.4 app.kubernetes.io/instance: superset app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master @@ -247,7 +247,6 @@ spec: app.kubernetes.io/component: master serviceName: superset-redis-headless updateStrategy: - rollingUpdate: {} type: RollingUpdate template: metadata: @@ -255,23 +254,23 @@ spec: app: superset-redis-master dbomop-data: "yes" app.kubernetes.io/name: redis - helm.sh/chart: redis-16.3.1 + helm.sh/chart: redis-17.9.4 app.kubernetes.io/instance: superset app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: master annotations: - checksum/configmap: 5fb78a3f5ce9ca1af5b7223f9cebe42f832ebc64f37e09a2fc8c8b29bb7101b0 - checksum/health: 2ea27c28e44af78b1d3dc1373aa2ac24ba2b215f788de4a0f0c9e02cbb79c533 - checksum/scripts: c351ebe638f6967b5bc76c2f38c28e2f7f65bc93846a1cd7786e2cbff9d51620 - checksum/secret: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + checksum/configmap: 3e37ce809cf97cf1904e4f06e78108dba34472423e16c5e416026bcb192895fb + checksum/health: ad98a9690e1f9c5784f1914c5b8e04b1ae2c1ddb7071d05acd6e7c7f0afa6e8f + checksum/scripts: 9b0e8fa5fffccc8a213cd402a29a8124753f879a5370299258cb762861c6fb8a + checksum/secret: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b spec: - + securityContext: fsGroup: 1001 serviceAccountName: superset-redis affinity: podAffinity: - + podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: @@ -283,11 +282,11 @@ spec: topologyKey: kubernetes.io/hostname weight: 1 nodeAffinity: - + terminationGracePeriodSeconds: 30 containers: - name: redis - image: docker.io/bitnami/redis:6.2.6-debian-10-r120 + image: docker.io/bitnami/redis:7.0.10-debian-11-r4 imagePullPolicy: "IfNotPresent" securityContext: runAsUser: 1001 @@ -343,7 +342,6 @@ spec: mountPath: /health - name: redis-data mountPath: /data - subPath: - name: config mountPath: /opt/bitnami/redis/mounted-etc - name: redis-tmp-conf diff --git a/kube/services/superset/superset-secrets-template.yaml b/kube/services/superset/superset-secrets-template.yaml index 8a3c7a2a6..774a63142 100644 --- a/kube/services/superset/superset-secrets-template.yaml +++ b/kube/services/superset/superset-secrets-template.yaml @@ -22,7 +22,7 @@ metadata: name: superset-env labels: app: superset - chart: superset-0.6.1 + chart: superset-0.9.2 release: "superset" heritage: "Helm" type: Opaque From d6d7648b0ebdfbe69f691faad06a57f7ef47602f Mon Sep 17 00:00:00 2001 From: Shawn O'Connor Date: Mon, 1 May 2023 09:07:42 -0500 Subject: [PATCH 122/362] feat/portalHeader: PPS-212: add header to portal to prevent clickjacking (#2229) --- .../gen3.nginx.conf/portal-as-root/portal-service.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf index e195d2fb2..9d38a2a99 100644 --- a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/portal-service.conf @@ -21,5 +21,8 @@ rewrite ^/(.*)$ /dashboard/Public/maintenance-page/index.html redirect; } + # added to avoid click-jacking attacks + add_header X-Frame-Options "SAMEORIGIN"; + proxy_pass $upstream; } From a9c6e9c61dc23792687a596ea620b839c7e40812 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 2 May 2023 14:07:12 -0600 Subject: [PATCH 123/362] enabling the system probe container for testing (#2233) --- kube/services/datadog/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 3c5a79e4c..9c01de035 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -13,7 +13,7 @@ datadog: #Enables Optional Universal Service Monitoring ## ref: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=helm serviceMonitoring: - enabled: false + enabled: true # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. ## If set, this parameter takes precedence over "apiKey". @@ -168,7 +168,7 @@ datadog: networkMonitoring: # datadog.networkMonitoring.enabled -- Enable network performance monitoring - enabled: false + enabled: true ## Enable security agent and provide custom configs From 54a888be56e74f32baad853cdcd68cb2e186223c Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 2 May 2023 15:53:37 -0600 Subject: [PATCH 124/362] Feat/gpe 935 (#2235) * enabling the system probe container for testing * reverting my changes * reverting my changes --- kube/services/datadog/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 9c01de035..3c5a79e4c 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -13,7 +13,7 @@ datadog: #Enables Optional Universal Service Monitoring ## ref: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=helm serviceMonitoring: - enabled: true + enabled: false # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. ## If set, this parameter takes precedence over "apiKey". @@ -168,7 +168,7 @@ datadog: networkMonitoring: # datadog.networkMonitoring.enabled -- Enable network performance monitoring - enabled: true + enabled: false ## Enable security agent and provide custom configs From 105dd9e1d0626e80f5bc7142be9735908544b27f Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 3 May 2023 13:40:14 -0500 Subject: [PATCH 125/362] Feat/kubecost spot instance feed (#2231) * GPE-823 * GPE-823 * GPE-823 * GPE-823 * add kubecost * GPE-823 --------- Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-kubecost.sh | 274 ++++-------------- gen3/bin/kube-setup-revproxy.sh | 8 + .../kubecost-alb.yaml | 0 .../object-store.yaml | 0 kube/services/kubecost/values.yaml | 183 ++++++++++++ tf_files/aws/kubecost/root.tf | 93 ++---- tf_files/aws/kubecost/sample.tfvars | 8 +- tf_files/aws/kubecost/variables.tf | 10 +- 8 files changed, 262 insertions(+), 314 deletions(-) rename kube/services/{kubecost-standalone => kubecost}/kubecost-alb.yaml (100%) rename kube/services/{kubecost-standalone => kubecost}/object-store.yaml (100%) create mode 100644 kube/services/kubecost/values.yaml diff --git a/gen3/bin/kube-setup-kubecost.sh b/gen3/bin/kube-setup-kubecost.sh index 07487672d..2166f051c 100644 --- a/gen3/bin/kube-setup-kubecost.sh +++ b/gen3/bin/kube-setup-kubecost.sh @@ -12,10 +12,11 @@ gen3_setup_kubecost_infrastructure() { gen3 workon default "${vpc_name}__kubecost" gen3 cd echo "vpc_name=\"$vpc_name\"" > config.tfvars - if [[ $deployment == "slave" ]]; then - echo "cur_s3_bucket=\"$s3Bucket\"" >> config.tfvars - elif [[ $deployment == "master" ]]; then - echo "slave_account_id=\"$slaveAccountId\"" >> config.tfvars + if [[ ! -z "$curBucketCreated" ]]; then + echo "cur_s3_bucket=\"$curBucket\"" >> config.tfvars + fi + if [[ ! -z "$reportBucketCreated" ]]; then + echo "reports_s3_bucket=\"$reportBucket\"" >> config.tfvars fi gen3 tfplan 2>&1 gen3 tfapply 2>&1 @@ -38,22 +39,19 @@ gen3_setup_kubecost_service_account() { aws iam attach-role-policy --role-name "$roleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2 #gen3 awsrole sa-annotate "$saName" "$roleName" "kubecost" kubectl delete sa -n kubecost $saName + # SA for reports reportsRoleName="$vpc_name-opencost-report-role" reportsSaName="reports-service-account" gen3 awsrole create "$reportsRoleName" "$reportsSaName" "kubecost" || return 1 - aws iam attach-role-policy --role-name "$reportsRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2 + aws iam attach-role-policy --role-name "$reportsRoleName" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-report-policy" 1>&2 gen3 awsrole sa-annotate "$reportsSaName" "$reportsRoleName" "kubecost" } gen3_delete_kubecost_service_account() { aws iam detach-role-policy --role-name "${vpc_name}-kubecost-user" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-CUR-policy" 1>&2 - aws iam detach-role-policy --role-name "${vpc_name}-thanos-user" --policy-arn "arn:aws:iam::$accountID:policy/$vpc_name-Kubecost-Thanos-policy" 1>&2 gen3 workon default "${vpc_name}-kubecost-user_role" gen3 tfplan --destroy 2>&1 gen3 tfapply 2>&1 - gen3 workon default "${vpc_name}-thanos-user_role" - gen3 tfplan --destroy 2>&1 - gen3 tfapply 2>&1 } gen3_delete_kubecost() { @@ -63,68 +61,40 @@ gen3_delete_kubecost() { } gen3_kubecost_create_alb() { - kubectl apply -f "${GEN3_HOME}/kube/services/kubecost-${deployment}/kubecost-alb.yaml" -n kubecost + kubectl apply -f "${GEN3_HOME}/kube/services/kubecost/kubecost-alb.yaml" -n kubecost } gen3_setup_kubecost() { kubectl create namespace kubecost || true + # If s3 bucket not supplied, create a new one + if [[ -z $curBucket ]]; then + curBucket="$vpc_name-kubecost-bucket" + fi + # If report bucket not supplied, use the same as cur bucket + if [[ -z $reportBucket ]]; then + reportBucket=$curBucket + fi gen3_setup_kubecost_infrastructure + aws ec2 create-spot-datafeed-subscription --bucket $curBucket --prefix spot-feed || true # Change the SA permissions based on slave/master/standalone if [[ -z $(kubectl get sa -n kubecost | grep $vpc_name-kubecost-user) ]]; then gen3_setup_kubecost_service_account fi - # If master setup and s3 bucket not supplied, set terraform master s3 bucket name for thanos secret - if [[ -z $s3Bucket ]]; then - s3Bucket="$vpc_name-kubecost-bucket" - fi if (! helm status kubecost -n kubecost > /dev/null 2>&1 ) || [[ ! -z "$FORCE" ]]; then - if [[ $deployment == "slave" ]]; then - valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" - valuesTemplate="${GEN3_HOME}/kube/services/kubecost-slave/values.yaml" - thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml" - thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-slave/object-store.yaml" - thanosValues="${GEN3_HOME}/kube/services/kubecost-slave/values-thanos.yaml" - g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile - elif [[ $deployment == "master" ]]; then - valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" - valuesTemplate="${GEN3_HOME}/kube/services/kubecost-master/values.yaml" - thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml" - thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-master/object-store.yaml" - g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile - gen3_kubecost_create_alb - else - valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" - valuesTemplate="${GEN3_HOME}/kube/services/kubecost-standalone/values.yaml" - #thanosValuesFile="$XDG_RUNTIME_DIR/object-store.yaml" - #thanosValuesTemplate="${GEN3_HOME}/kube/services/kubecost-standalone/object-store.yaml" - g3k_kv_filter $valuesTemplate KUBECOST_TOKEN "${kubecostToken}" KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" THANOS_SA "$thanosSaName" ATHENA_BUCKET "s3://$s3Bucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile - gen3_kubecost_create_alb - fi - #kubectl delete secret -n kubecost kubecost-thanos || true - #kubectl delete secret -n kubecost thanos || true - #g3k_kv_filter $thanosValuesTemplate AWS_REGION $awsRegion KUBECOST_S3_BUCKET $s3Bucket > $thanosValuesFile - #kubectl create secret generic kubecost-thanos -n kubecost --from-file=$thanosValuesFile - #kubectl create secret generic thanos -n kubecost --from-file=$thanosValuesFile - # Need to setup thanos config - gen3 kube-setup-certs - gen3 kube-setup-prometheus - g3kubectl delete secret -n kubecost cert-kubecost-cost-analyzer || true - g3kubectl create secret generic "cert-kubecost-cost-analyzer" "--from-file=tls.crt=$(gen3_secrets_folder)/credentials/kubecost-cost-analyzer-service.crt" "--from-file=tls.key=$(gen3_secrets_folder)/credentials/kubecost-cost-analyzer-service.key" -n kubecost || true + valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" + valuesTemplate="${GEN3_HOME}/kube/services/kubecost/values.yaml" + g3k_kv_filter $valuesTemplate KUBECOST_SA "eks.amazonaws.com/role-arn: arn:aws:iam::$accountID:role/gen3_service/$roleName" ATHENA_BUCKET "$curBucket" ATHENA_DATABASE "athenacurcfn_$vpc_name" ATHENA_TABLE "${vpc_name}_cur" AWS_ACCOUNT_ID "$accountID" AWS_REGION "$awsRegion" > $valuesFile helm repo add kubecost https://kubecost.github.io/cost-analyzer/ --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - if [[ -z $disablePrometheus ]]; then - helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile} - else - helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile} - fi + helm upgrade --install kubecost kubecost/cost-analyzer -n kubecost -f ${valuesFile} else gen3_log_info "kube-setup-kubecost exiting - kubecost already deployed, use --force true to redeploy" fi - gen3_setup_reports_cronjob + gen3_kubecost_create_alb } -gen3_setup_reports_cronjob { - gen3 job cron opencost-report '0 0 * * 0' BUCKET_NAME $s3Bucket +gen3_setup_reports_cronjob() { + gen3 job cron opencost-report-argo '0 0 * * 0' BUCKET_NAME $reportBucket } if [[ -z "$GEN3_SOURCE_ONLY" ]]; then @@ -135,170 +105,29 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then command="$1" shift case "$command" in - "master") - deployment="master" - subcommand="" - if [[ $# -gt 0 ]]; then - subcommand="$1" - shift - fi - case "$subcommand" in - "create") - for flag in $@; do - if [[ $# -gt 0 ]]; then - flag="$1" - shift - fi - case "$flag" in - "--slave-account-id") - slaveAccountId="$1" - ;; - "--kubecost-token") - kubecostToken="$1" - ;; - "--force") - if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then - FORCE=true - fi - ;; - "--disable-prometheus") - if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then - disablePrometheus=true - fi - ;; - "--prometheus-namespace") - prometheusNamespace="$1" - ;; - "--prometheus-service") - prometheusService="$1" - ;; - esac - done - if [[ -z $slaveAccountId || -z $kubecostToken ]]; then - gen3_log_err "Please ensure you set the required flags." - exit 1 - fi - if [[ $disablePrometheus == true && -z $prometheusNamespace && -z $prometheusService ]]; then - gen3_log_err "If you disable prometheus, set the flags for the local prometheus namespace and service name." - exit 1 - fi - gen3_setup_kubecost "$@" - ;; - "alb") - gen3_kubecost_create_alb - ;; - *) - gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost" - ;; - esac - ;; - "slave") - deployment="slave" - subcommand="" - if [[ $# -gt 0 ]]; then - subcommand="$1" - shift - fi - case "$subcommand" in - "create") - for flag in $@; do - if [[ $# -gt 0 ]]; then - flag="$1" - shift - fi - case "$flag" in - "--s3-bucket") - s3Bucket="$1" - ;; - "--kubecost-token") - kubecostToken="$1" - ;; - "--force") - if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then - FORCE=true - fi - ;; - "--disable-prometheus") - if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then - disablePrometheus=true - fi - ;; - "--prometheus-namespace") - prometheusNamespace="$1" - ;; - "--prometheus-service") - prometheusService="$1" - ;; - esac - done - if [[ -z $s3Bucket || -z $kubecostToken ]]; then - gen3_log_err "Please ensure you set the required flags." - exit 1 - fi - if [[ $disablePrometheus == true && -z $prometheusNamespace && -z $prometheusService ]]; then - gen3_log_err "If you disable prometheus, set the flags for the local prometheus namespace and service name." - exit 1 - fi - gen3_setup_kubecost "$@" - ;; - *) - gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost" - ;; - esac - ;; - "standalone") - deployment="standalone" - subcommand="" - if [[ $# -gt 0 ]]; then - subcommand="$1" - shift - fi - case "$subcommand" in - "create") - for flag in $@; do - if [[ $# -gt 0 ]]; then - flag="$1" - shift + "create") + for flag in $@; do + if [[ $# -gt 0 ]]; then + flag="$1" + shift + fi + case "$flag" in + "--force") + if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then + FORCE=true fi - case "$flag" in - "--kubecost-token") - kubecostToken="$1" - ;; - "--force") - if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then - FORCE=true - fi - ;; - "--disable-prometheus") - if [[ $(echo $1 | tr '[:upper:]' '[:lower:]') == "true" ]]; then - disablePrometheus=true - fi - ;; - "--prometheus-namespace") - prometheusNamespace="$1" - ;; - "--prometheus-service") - prometheusService="$1" - ;; - esac - done - if [[ -z $kubecostToken ]]; then - gen3_log_err "Please ensure you set the required flags." - exit 1 - fi - if [[ $disablePrometheus == true && -z $prometheusNamespace && -z $prometheusService ]]; then - gen3_log_err "If you disable prometheus, set the flags for the local prometheus namespace and service name." - exit 1 - fi - gen3_setup_kubecost "$@" - ;; - "alb") - gen3_kubecost_create_alb - ;; - *) - gen3_log_err "gen3_logs" "invalid history subcommand $subcommand - try: gen3 help kube-setup-kubecost" - ;; - esac + ;; + "--cur-bucket") + curBucket="$1" + curBucketCreated=true + ;; + "--report-bucket") + reportBucket="$1" + reportBucketCreated=true + ;; + esac + done + gen3_setup_kubecost "$@" ;; "cronjob") subcommand="" @@ -314,13 +143,13 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then shift fi case "$flag" in - "--s3-bucket") - s3Bucket="$1" + "--report-bucket") + reportBucket="$1" ;; esac done - if [[ -z $s3Bucket ]]; then - gen3_log_err "Please ensure you set the s3Bucket for setting up cronjob without full opencost deployment." + if [[ -z $reportBucket ]]; then + gen3_log_err "Please ensure you set the reportBucket for setting up cronjob without full opencost deployment." exit 1 fi gen3_setup_reports_cronjob @@ -334,8 +163,7 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then gen3_delete_kubecost ;; *) - gen3_log_err "gen3_logs" "invalid command $command" - gen3_kubecost_help + gen3_setup_kubecost "$@" ;; esac fi diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 9d60c62cb..fcc2ef3b7 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -138,6 +138,14 @@ then fi fi +if g3kubectl get namespace kubecost > /dev/null 2>&1; +then + filePath="$scriptDir/gen3.nginx.conf/kubecost-service.conf" + if [[ -f "$filePath" ]]; then + confFileList+=("--from-file" "$filePath") + fi +fi + # #echo "${confFileList[@]}" $BASHPID # if [[ $current_namespace == "default" ]]; then # if g3kubectl get namespace grafana > /dev/null 2>&1; then diff --git a/kube/services/kubecost-standalone/kubecost-alb.yaml b/kube/services/kubecost/kubecost-alb.yaml similarity index 100% rename from kube/services/kubecost-standalone/kubecost-alb.yaml rename to kube/services/kubecost/kubecost-alb.yaml diff --git a/kube/services/kubecost-standalone/object-store.yaml b/kube/services/kubecost/object-store.yaml similarity index 100% rename from kube/services/kubecost-standalone/object-store.yaml rename to kube/services/kubecost/object-store.yaml diff --git a/kube/services/kubecost/values.yaml b/kube/services/kubecost/values.yaml new file mode 100644 index 000000000..d815f6aca --- /dev/null +++ b/kube/services/kubecost/values.yaml @@ -0,0 +1,183 @@ +#kubecostToken: KUBECOST_TOKEN + +global: + grafana: + enabled: false + proxy: false +pricingCsv: + enabled: false + location: + provider: "AWS" + region: "us-east-1" + URI: s3://kc-csv-test/pricing_schema.csv # a valid file URI + csvAccessCredentials: pricing-schema-access-secret + +tolerations: +- key: "role" + operator: "Equal" + value: "prometheus" + effect: "NoSchedule" + +nodeSelector: {} + +affinity: {} + +# If true, creates a PriorityClass to be used by the cost-analyzer pod +priority: + enabled: false + +# If true, enable creation of NetworkPolicy resources. +networkPolicy: + enabled: false + +podSecurityPolicy: + enabled: false + +# Enable this flag if you need to install with specfic image tags +# imageVersion: prod-1.97.0 + +kubecostFrontend: + image: public.ecr.aws/kubecost/frontend + imagePullPolicy: Always + resources: + requests: + cpu: "10m" + memory: "55Mi" + #limits: + # cpu: "100m" + # memory: "256Mi" + +kubecostModel: + image: public.ecr.aws/kubecost/cost-model + imagePullPolicy: Always + warmCache: true + warmSavingsCache: true + etl: true + # The total number of days the ETL storage will build + etlStoreDurationDays: 120 + maxQueryConcurrency: 5 + # utcOffset represents a timezone in hours and minutes east (+) or west (-) + # of UTC, itself, which is defined as +00:00. + # See the tz database of timezones to look up your local UTC offset: + # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + utcOffset: "+00:00" + resources: + requests: + cpu: "500m" + memory: "256Mi" + #limits: + # cpu: "800m" + # memory: "256Mi" + +# Define persistence volume for cost-analyzer +persistentVolume: + size: 32Gi + dbSize: 32.0Gi + enabled: true # Note that setting this to false means configurations will be wiped out on pod restart. + # storageClass: "-" # + # existingClaim: kubecost-cost-analyzer # a claim in the same namespace as kubecost + +service: + type: ClusterIP + port: 9090 + targetPort: 9090 + # nodePort: + labels: {} + annotations: {} + +prometheus: + server: + # If clusterIDConfigmap is defined, instead use user-generated configmap with key CLUSTER_ID + # to use as unique cluster ID in kubecost cost-analyzer deployment. + # This overrides the cluster_id set in prometheus.server.global.external_labels. + # NOTE: This does not affect the external_labels set in prometheus config. + # clusterIDConfigmap: cluster-id-configmap + image: + repository: public.ecr.aws/kubecost/prometheus + tag: v2.35.0 + resources: + requests: + memory: 30Gi + # requests: + # cpu: 500m + # memory: 30Gi + global: + scrape_interval: 1m + scrape_timeout: 10s + evaluation_interval: 1m + external_labels: + cluster_id: kubecost + persistentVolume: + size: 32Gi + enabled: true + extraArgs: + query.max-concurrency: 1 + query.max-samples: 100000000 + tolerations: + - key: "role" + operator: "Equal" + value: "prometheus" + effect: "NoSchedule" + + configmapReload: + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: false + + ## configmap-reload container name + ## + name: configmap-reload + ## configmap-reload container image + ## + image: + repository: public.ecr.aws/bitnami/configmap-reload + tag: 0.7.1 + pullPolicy: IfNotPresent + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + + kube-state-metrics: + disabled: false + nodeExporter: + enabled: false + +reporting: + productAnalytics: false + +serviceAccount: + create: true # Set this to false if you're bringing your own service account. + annotations: + KUBECOST_SA + +kubecostProductConfigs: + athenaBucketName: s3://ATHENA_BUCKET + athenaRegion: AWS_REGION + athenaDatabase: ATHENA_DATABASE + athenaTable: ATHENA_TABLE + athenaProjectID: AWS_ACCOUNT_ID + clusterName: kubecost + #serviceKeySecretName: aws-service-key , might work with SA attached instead + projectID: AWS_ACCOUNT_ID + awsSpotDataRegion: AWS_REGION + awsSpotDataBucket: ATHENA_BUCKET + awsSpotDataPrefix: "spot-feed" + +networkCosts: + enabled: true diff --git a/tf_files/aws/kubecost/root.tf b/tf_files/aws/kubecost/root.tf index 261f4419b..aa03555e3 100644 --- a/tf_files/aws/kubecost/root.tf +++ b/tf_files/aws/kubecost/root.tf @@ -10,9 +10,10 @@ terraform { } locals { - account_id = data.aws_caller_identity.current.account_id - region = data.aws_region.current.name - cur_bucket = var.cur_s3_bucket != "" ? var.cur_s3_bucket : aws_s3_bucket.cur-bucket.0.id + account_id = data.aws_caller_identity.current.account_id + region = data.aws_region.current.name + cur_bucket = var.cur_s3_bucket != "" ? var.cur_s3_bucket : aws_s3_bucket.cur-bucket.0.id + report_bucket = var.report_s3_bucket != "" ? var.report_s3_bucket : local.cur_bucket } # The Cost and Usage report, create in any configuration @@ -29,7 +30,7 @@ resource "aws_cur_report_definition" "kubecost-cur" { report_versioning = "OVERWRITE_REPORT" } -# The bucket used by the Cost and Usage report, will be created in master/standalone setup +# The bucket used by the Cost and Usage report resource "aws_s3_bucket" "cur-bucket" { count = var.cur_s3_bucket != "" ? 0 : 1 bucket = "${var.vpc_name}-kubecost-bucket" @@ -52,7 +53,7 @@ resource "aws_s3_bucket" "cur-bucket" { } -# The Policy attached to the Cost and Usage report bucket, Will attach permissions to each for master/slave account and allow permissions to root slave account so SA's can read/write to bucket +# The Policy attached to the Cost and Usage report bucket resource "aws_s3_bucket_policy" "cur-bucket-policy" { count = var.cur_s3_bucket != "" ? 0 : 1 bucket = aws_s3_bucket.cur-bucket[count.index].id @@ -89,55 +90,11 @@ resource "aws_s3_bucket_policy" "cur-bucket-policy" { "aws:SourceAccount" = local.account_id } } - }, - { - Sid = "Stmt1335892150623" - Effect = "Allow" - Principal = { - Service = "billingreports.amazonaws.com" - } - Action = ["s3:GetBucketAcl","s3:GetBucketPolicy"] - Resource = "arn:aws:s3:::${aws_s3_bucket.cur-bucket[count.index].id}" - Condition = { - StringEquals = { - "aws:SourceArn" = "arn:aws:cur:us-east-1:${var.slave_account_id != "" ? var.slave_account_id : local.account_id}}:definition/*" - "aws:SourceAccount" = var.slave_account_id - } - } - }, - { - Sid = "Stmt1335892526598" - Effect = "Allow" - Principal = { - Service = "billingreports.amazonaws.com" - } - Action = "s3:PutObject" - Resource = "arn:aws:s3:::${aws_s3_bucket.cur-bucket[count.index].id}/*" - Condition = { - StringEquals = { - "aws:SourceArn" = "arn:aws:cur:us-east-1:${var.slave_account_id != "" ? var.slave_account_id : local.account_id}:definition/*" - "aws:SourceAccount" = local.account_id - } - } - }, - { - Sid = "Stmt1335892526597" - Effect = "Allow" - Principal = { - AWS = "arn:aws:iam::${var.slave_account_id != "" ? var.slave_account_id : local.account_id}:root" - } - Action = ["s3:GetBucketAcl","s3:GetBucketPolicy","s3:PutObject","s3:ListBucket","s3:GetObject","s3:DeleteObject","s3:PutObjectAcl"] - Resource = ["arn:aws:s3:::${aws_s3_bucket.cur-bucket[count.index].id}/*","arn:aws:s3:::${aws_s3_bucket.cur-bucket[count.index].id}"] } ] }) } - - - - - # An IAM user used to connect kubecost to CUR/Glue/Athena, not used for SA setup #resource "aws_iam_user" "kubecost-user" { # name = "${var.vpc_name}-kubecost-user" @@ -153,7 +110,7 @@ resource "aws_s3_bucket_policy" "cur-bucket-policy" { # user = aws_iam_user.kubecost-user.name #} -# Policy to attach to the user, will attach permissions to terraform created bucket if master/standalone or to specified bucket if slave +# Policy to attach to the user resource "aws_iam_policy" "thanos-user-policy" { name = "${var.vpc_name}-Kubecost-CUR-policy" path = "/" @@ -185,16 +142,16 @@ resource "aws_iam_policy" "thanos-user-policy" { { Sid = "S3ReadAccessToAwsBillingData" Effect = "Allow" - Action = ["s3:Get*","s3:List*"] + Action = ["s3:*"] Resource = ["arn:aws:s3:::${local.cur_bucket}","arn:aws:s3:::${local.cur_bucket}/*"] } ] }) } -# Policy to attach to the user, will attach permissions to terraform created bucket if master/standalone or to specified bucket if slave -resource "aws_iam_policy" "kubecost-user-policy" { - name = "${var.vpc_name}-Kubecost-Thanos-policy" +# Policy to attach to the reports user +resource "aws_iam_policy" "report-user-policy" { + name = "${var.vpc_name}-Kubecost-report-policy" path = "/" description = "Policy for Thanos to have access to centralized bucket." @@ -207,28 +164,13 @@ resource "aws_iam_policy" "kubecost-user-policy" { Sid = "Statement", Effect = "Allow", Action = ["s3:ListBucket","s3:GetObject","s3:DeleteObject","s3:PutObject","s3:PutObjectAcl"], - Resource = ["arn:aws:s3:::${local.cur_bucket}/*","arn:aws:s3:::${local.cur_bucket}"] + Resource = ["arn:aws:s3:::${local.report_bucket}/*","arn:aws:s3:::${local.report_bucket}"] } ] }) } - -# Policy attachment of the kubecost user policy to the kubecost user -#resource "aws_iam_user_policy_attachment" "kubecost-user-policy-attachment" { -# user = aws_iam_user.kubecost-user.name -# policy_arn = aws_iam_policy.kubecost-user-policy.arn -#} - - - - - - - - - -# Role for the glue crawler, used for every configuration, s3 bucket will either be from terraform, or specified master bucket +# Role for the glue crawler, used for every configuration, s3 bucket will either be from terraform, or specified resource "aws_iam_role" "glue-crawler-role" { name = "AWSCURCrawlerComponentFunction-${var.vpc_name}" managed_policy_arns = ["arn:aws:iam::aws:policy/service-role/AWSGlueServiceRole"] @@ -326,7 +268,7 @@ resource "aws_iam_role" "cur-initializer-lambda-role" { } } -# Role for the s3 notification lambda, used for every configuration, s3 bucket will either be from terraform, or specified master bucket +# Role for the s3 notification lambda, used for every configuration, s3 bucket will either be from terraform, or specified bucket resource "aws_iam_role" "cur-s3-notification-lambda-role" { name = "AWSS3CURLambdaExecutor-${var.vpc_name}" @@ -370,7 +312,7 @@ resource "aws_glue_catalog_database" "cur-glue-database" { name = "athenacurcfn_${var.vpc_name}" } -# Glue crawler, used for every configuration, s3 bucket will either be from terraform, or specified master bucket +# Glue crawler, used for every configuration, s3 bucket will either be from terraform, or specified bucket resource "aws_glue_crawler" "cur-glue-crawler" { database_name = aws_glue_catalog_database.cur-glue-database.name name = "${var.vpc_name}-AWSCURCrawler" @@ -383,7 +325,7 @@ resource "aws_glue_crawler" "cur-glue-crawler" { } } -# Glue catalog table, used for every configuration, s3 bucket will either be from terraform, or specified master bucket +# Glue catalog table, used for every configuration, s3 bucket will either be from terraform, or specified bucket resource "aws_glue_catalog_table" "cur-glue-catalog" { database_name = aws_glue_catalog_database.cur-glue-database.name name = "${var.vpc_name}-cost_and_usage_data_status" @@ -421,7 +363,7 @@ resource "aws_lambda_function" "cur-initializer-lambda" { runtime = "nodejs12.x" } -# permissions for lambda, used for every configuration, s3 bucket will either be from terraform, or specified master bucket +# permissions for lambda, used for every configuration, s3 bucket will either be from terraform, or specified bucket resource "aws_lambda_permission" "cur-initializer-lambda-permission" { action = "lambda:InvokeFunction" function_name = aws_lambda_function.cur-initializer-lambda.function_name @@ -445,4 +387,3 @@ resource "aws_lambda_function" "cur-s3-notification-lambda" { } } } - diff --git a/tf_files/aws/kubecost/sample.tfvars b/tf_files/aws/kubecost/sample.tfvars index 540bd88a1..659136efa 100644 --- a/tf_files/aws/kubecost/sample.tfvars +++ b/tf_files/aws/kubecost/sample.tfvars @@ -3,12 +3,8 @@ #The name of the VPC to bring these resources up in vpc_name = "" -#This is used if the resource is set up as a secondary node -parent_account_id = "" - #The S3 bucket in which to store the generated Cost and Usage report cur_s3_bucket = "" -#This is used if the resource is set up as a primary node. It specifies the account ID for the linked secondary node -slave_account_id = "" - +#The S3 bucket in which to store the kubecost daily reports +report_s3_bucket = "" diff --git a/tf_files/aws/kubecost/variables.tf b/tf_files/aws/kubecost/variables.tf index 786c82083..d1335847a 100644 --- a/tf_files/aws/kubecost/variables.tf +++ b/tf_files/aws/kubecost/variables.tf @@ -2,18 +2,10 @@ variable "vpc_name" { default = "" } -# If slave setup - -variable "parent_account_id" { - default = "" -} - variable "cur_s3_bucket" { default = "" } -# If master setup - -variable "slave_account_id" { +variable "report_s3_bucket" { default = "" } From 4c14e115966616f3fa38a07f375a7784d01af29c Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 4 May 2023 11:18:00 -0600 Subject: [PATCH 126/362] adding pod disruption budgets for every service that will get applied via the roll all or by running kube-setup-pdb. The pdb will only be applied to the cluster if the service in question has more than 1 replica. (#2239) --- gen3/bin/kube-roll-all.sh | 1 + gen3/bin/kube-setup-pdb.sh | 38 +++++++++++++++++++ .../pod-disruption-budget/ambassador.yaml | 9 +++++ .../pod-disruption-budget/arborist.yaml | 9 +++++ .../pod-disruption-budget/argo-wrapper.yaml | 9 +++++ .../pod-disruption-budget/arranger.yaml | 9 +++++ .../pod-disruption-budget/audit-service.yaml | 9 +++++ .../pod-disruption-budget/aws-es-proxy.yaml | 9 +++++ .../pod-disruption-budget/dicom-server.yaml | 9 +++++ .../pod-disruption-budget/dicom-viewer.yaml | 9 +++++ .../services/pod-disruption-budget/fence.yaml | 9 +++++ .../services/pod-disruption-budget/guppy.yaml | 9 +++++ .../pod-disruption-budget/hatchery.yaml | 9 +++++ .../pod-disruption-budget/indexd.yaml | 9 +++++ .../manifestservice.yaml | 9 +++++ .../pod-disruption-budget/metadata.yaml | 9 +++++ .../pod-disruption-budget/peregrine.yaml | 9 +++++ .../pod-disruption-budget/pidgin.yaml | 9 +++++ .../pod-disruption-budget/portal.yaml | 9 +++++ .../pod-disruption-budget/requestor.yaml | 9 +++++ .../pod-disruption-budget/revproxy.yaml | 9 +++++ .../pod-disruption-budget/sheepdog.yaml | 9 +++++ .../pod-disruption-budget/ssjdispatcher.yaml | 9 +++++ kube/services/pod-disruption-budget/wts.yaml | 9 +++++ 24 files changed, 237 insertions(+) create mode 100644 gen3/bin/kube-setup-pdb.sh create mode 100644 kube/services/pod-disruption-budget/ambassador.yaml create mode 100644 kube/services/pod-disruption-budget/arborist.yaml create mode 100644 kube/services/pod-disruption-budget/argo-wrapper.yaml create mode 100644 kube/services/pod-disruption-budget/arranger.yaml create mode 100644 kube/services/pod-disruption-budget/audit-service.yaml create mode 100644 kube/services/pod-disruption-budget/aws-es-proxy.yaml create mode 100644 kube/services/pod-disruption-budget/dicom-server.yaml create mode 100644 kube/services/pod-disruption-budget/dicom-viewer.yaml create mode 100644 kube/services/pod-disruption-budget/fence.yaml create mode 100644 kube/services/pod-disruption-budget/guppy.yaml create mode 100644 kube/services/pod-disruption-budget/hatchery.yaml create mode 100644 kube/services/pod-disruption-budget/indexd.yaml create mode 100644 kube/services/pod-disruption-budget/manifestservice.yaml create mode 100644 kube/services/pod-disruption-budget/metadata.yaml create mode 100644 kube/services/pod-disruption-budget/peregrine.yaml create mode 100644 kube/services/pod-disruption-budget/pidgin.yaml create mode 100644 kube/services/pod-disruption-budget/portal.yaml create mode 100644 kube/services/pod-disruption-budget/requestor.yaml create mode 100644 kube/services/pod-disruption-budget/revproxy.yaml create mode 100644 kube/services/pod-disruption-budget/sheepdog.yaml create mode 100644 kube/services/pod-disruption-budget/ssjdispatcher.yaml create mode 100644 kube/services/pod-disruption-budget/wts.yaml diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 68a0bd47d..552c27708 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -268,6 +268,7 @@ if [[ "$GEN3_ROLL_FAST" != "true" ]]; then # gen3 kube-setup-networkpolicy disable & gen3 kube-setup-networkpolicy & + gen3 kube-setup-pdb else gen3_log_info "roll fast mode - skipping k8s base services and netpolicy setup" fi diff --git a/gen3/bin/kube-setup-pdb.sh b/gen3/bin/kube-setup-pdb.sh new file mode 100644 index 000000000..e29b2e1e6 --- /dev/null +++ b/gen3/bin/kube-setup-pdb.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# +# Apply pods diruption budgets to the core services of the commons +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +serverVersion="$(g3kubectl version -o json | jq -r '.serverVersion.major + "." + .serverVersion.minor' | head -c4)" +echo "Server version $serverVersion" +if [ "$serverVersion" \< "1.21" ]; then + gen3_log_info "kube-setup-pdb" "K8s server version $serverVersion does not support pod disruption budgets. Server must be version 1.21 or higher" + exit 0 +fi + +deployments=$(kubectl get deployments | awk '{print $1}' | tail -n +2) + +if [[ "$(g3k_manifest_lookup .global.pdb)" == "on" ]]; then + for deployment in $deployments + do + replicas=$(kubectl get deployment $deployment -o=jsonpath='{.spec.replicas}') + if [[ "$replicas" -gt "1" ]]; then + echo "There were $replicas replicas" + service=$(echo "$deployment" | awk -F '-' '{print $1}') + echo "We are on the $service service" + filePath="${GEN3_HOME}/kube/services/pod-disruption-budget/${service}.yaml" + if [[ -f "$filePath" ]]; then + g3kubectl apply -f "$filePath" + else + echo "No PDB file found for service $service" + fi + else + echo "Skipping PDB for deployment $deployment because it has only 1 replica" + fi + done + else + echo "You need to set pdb = 'on' in the manifest.json" +fi \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/ambassador.yaml b/kube/services/pod-disruption-budget/ambassador.yaml new file mode 100644 index 000000000..72a02e175 --- /dev/null +++ b/kube/services/pod-disruption-budget/ambassador.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: ambassador-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: ambassador \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/arborist.yaml b/kube/services/pod-disruption-budget/arborist.yaml new file mode 100644 index 000000000..3b736a8e0 --- /dev/null +++ b/kube/services/pod-disruption-budget/arborist.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: arborist-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: arborist \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/argo-wrapper.yaml b/kube/services/pod-disruption-budget/argo-wrapper.yaml new file mode 100644 index 000000000..bae800886 --- /dev/null +++ b/kube/services/pod-disruption-budget/argo-wrapper.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: argo-wrapper-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: argo-wrapper \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/arranger.yaml b/kube/services/pod-disruption-budget/arranger.yaml new file mode 100644 index 000000000..b98dc1ea5 --- /dev/null +++ b/kube/services/pod-disruption-budget/arranger.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: arranger-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: arranger \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/audit-service.yaml b/kube/services/pod-disruption-budget/audit-service.yaml new file mode 100644 index 000000000..1ee8054c9 --- /dev/null +++ b/kube/services/pod-disruption-budget/audit-service.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: audit-service-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: audit-service \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/aws-es-proxy.yaml b/kube/services/pod-disruption-budget/aws-es-proxy.yaml new file mode 100644 index 000000000..fc844a0f9 --- /dev/null +++ b/kube/services/pod-disruption-budget/aws-es-proxy.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: esproxy-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: esproxy \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/dicom-server.yaml b/kube/services/pod-disruption-budget/dicom-server.yaml new file mode 100644 index 000000000..5755a0275 --- /dev/null +++ b/kube/services/pod-disruption-budget/dicom-server.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: dicom-server-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: dicom-server \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/dicom-viewer.yaml b/kube/services/pod-disruption-budget/dicom-viewer.yaml new file mode 100644 index 000000000..8b2717a19 --- /dev/null +++ b/kube/services/pod-disruption-budget/dicom-viewer.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: dicom-viewer-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: dicom-viewer \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/fence.yaml b/kube/services/pod-disruption-budget/fence.yaml new file mode 100644 index 000000000..62a26135f --- /dev/null +++ b/kube/services/pod-disruption-budget/fence.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: fence-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: fence \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/guppy.yaml b/kube/services/pod-disruption-budget/guppy.yaml new file mode 100644 index 000000000..7850a4a68 --- /dev/null +++ b/kube/services/pod-disruption-budget/guppy.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: guppy-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: guppy \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/hatchery.yaml b/kube/services/pod-disruption-budget/hatchery.yaml new file mode 100644 index 000000000..9ddae7bdf --- /dev/null +++ b/kube/services/pod-disruption-budget/hatchery.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: hatchery-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: hatchery \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/indexd.yaml b/kube/services/pod-disruption-budget/indexd.yaml new file mode 100644 index 000000000..f0e33e774 --- /dev/null +++ b/kube/services/pod-disruption-budget/indexd.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: indexd-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: indexd \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/manifestservice.yaml b/kube/services/pod-disruption-budget/manifestservice.yaml new file mode 100644 index 000000000..af0392992 --- /dev/null +++ b/kube/services/pod-disruption-budget/manifestservice.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: manifestservice-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: manifestservice \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/metadata.yaml b/kube/services/pod-disruption-budget/metadata.yaml new file mode 100644 index 000000000..370977eb0 --- /dev/null +++ b/kube/services/pod-disruption-budget/metadata.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: metadata-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: metadata \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/peregrine.yaml b/kube/services/pod-disruption-budget/peregrine.yaml new file mode 100644 index 000000000..82bee5ef3 --- /dev/null +++ b/kube/services/pod-disruption-budget/peregrine.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: peregrine-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: peregrine \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/pidgin.yaml b/kube/services/pod-disruption-budget/pidgin.yaml new file mode 100644 index 000000000..975fc172f --- /dev/null +++ b/kube/services/pod-disruption-budget/pidgin.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: pidgin-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: pidgin \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/portal.yaml b/kube/services/pod-disruption-budget/portal.yaml new file mode 100644 index 000000000..04a91c123 --- /dev/null +++ b/kube/services/pod-disruption-budget/portal.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: portal-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: portal \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/requestor.yaml b/kube/services/pod-disruption-budget/requestor.yaml new file mode 100644 index 000000000..c342de5ac --- /dev/null +++ b/kube/services/pod-disruption-budget/requestor.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: requestor-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: requestor \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/revproxy.yaml b/kube/services/pod-disruption-budget/revproxy.yaml new file mode 100644 index 000000000..7632f6375 --- /dev/null +++ b/kube/services/pod-disruption-budget/revproxy.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: revproxy-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: revproxy \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/sheepdog.yaml b/kube/services/pod-disruption-budget/sheepdog.yaml new file mode 100644 index 000000000..7cf4d6c18 --- /dev/null +++ b/kube/services/pod-disruption-budget/sheepdog.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: sheepdog-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: sheepdog \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/ssjdispatcher.yaml b/kube/services/pod-disruption-budget/ssjdispatcher.yaml new file mode 100644 index 000000000..9c95cfc27 --- /dev/null +++ b/kube/services/pod-disruption-budget/ssjdispatcher.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: ssjdispatcher-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: ssjdispatcher \ No newline at end of file diff --git a/kube/services/pod-disruption-budget/wts.yaml b/kube/services/pod-disruption-budget/wts.yaml new file mode 100644 index 000000000..47eb1b9bf --- /dev/null +++ b/kube/services/pod-disruption-budget/wts.yaml @@ -0,0 +1,9 @@ +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: wts-pdb +spec: + minAvailable: 1 + selector: + matchLabels: + app: wts \ No newline at end of file From c3e2c93f454c54c9bf01afa5c8f60431b962e51e Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Fri, 5 May 2023 09:51:23 -0500 Subject: [PATCH 127/362] add *.gen3.org to web_wildcard_whitelist add *.gen3.org to web_wildcard_whitelist --- files/squid_whitelist/web_whitelist | 1 - files/squid_whitelist/web_wildcard_whitelist | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index f37180df8..bcc5eb155 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -68,7 +68,6 @@ ftp.usf.edu ftp.ussg.iu.edu fmwww.bc.edu gcr.io -gen3.org get.helm.sh git.io go.googlesource.com diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 1421f6d5d..a8c765814 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -40,6 +40,7 @@ .erlang-solutions.com .extjs.com .fedoraproject.org +.gen3.org .genome.jp .github.com .githubusercontent.com From 3e9352b5bf2e9181a4114cecf100e37fc95c4b3f Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 9 May 2023 09:02:52 -0500 Subject: [PATCH 128/362] Switch gen3Env to hostname instead of environment (#2242) --- Jenkinsfile | 33 +++++++++++++++++++ kube/services/jobs/etl-cronjob.yaml | 2 +- kube/services/jobs/fence-db-migrate-job.yaml | 10 ------ .../jobs/fence-visa-update-cronjob.yaml | 2 +- kube/services/jobs/fence-visa-update-job.yaml | 2 +- kube/services/jobs/s3sync-cronjob.yaml | 2 +- kube/services/jobs/usersync-job.yaml | 4 +-- 7 files changed, 39 insertions(+), 16 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 7f70aeedf..4e3470ded 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -60,7 +60,35 @@ spec: operator: In values: - on-demand + initContainers: + - name: wait-for-jenkins-connection + image: quay.io/cdis/gen3-ci-worker:master + command: ["/bin/sh","-c"] + args: ["while [ $(curl -sw '%{http_code}' http://jenkins-master-service:8080/tcpSlaveAgentListener/ -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for jenkins connection ...'; done"] containers: + - name: jnlp + command: ["/bin/sh","-c"] + args: ["sleep 30; /usr/local/bin/jenkins-agent"] + resources: + requests: + cpu: 500m + memory: 500Mi + ephemeral-storage: 500Mi + - name: selenium + image: selenium/standalone-chrome:112.0 + imagePullPolicy: Always + ports: + - containerPort: 4444 + readinessProbe: + httpGet: + path: /status + port: 4444 + timeoutSeconds: 60 + resources: + requests: + cpu: 500m + memory: 500Mi + ephemeral-storage: 500Mi - name: shell image: quay.io/cdis/gen3-ci-worker:master imagePullPolicy: Always @@ -68,6 +96,11 @@ spec: - sleep args: - infinity + resources: + requests: + cpu: 0.2 + memory: 200Mi + ephemeral-storage: 200Mi env: - name: AWS_DEFAULT_REGION value: us-east-1 diff --git a/kube/services/jobs/etl-cronjob.yaml b/kube/services/jobs/etl-cronjob.yaml index c68fc9fd8..463fbfb2e 100644 --- a/kube/services/jobs/etl-cronjob.yaml +++ b/kube/services/jobs/etl-cronjob.yaml @@ -79,7 +79,7 @@ spec: valueFrom: configMapKeyRef: name: global - key: environment + key: hostname volumeMounts: - name: "creds-volume" readOnly: true diff --git a/kube/services/jobs/fence-db-migrate-job.yaml b/kube/services/jobs/fence-db-migrate-job.yaml index 298d61a5d..53dda3e21 100644 --- a/kube/services/jobs/fence-db-migrate-job.yaml +++ b/kube/services/jobs/fence-db-migrate-job.yaml @@ -47,11 +47,6 @@ spec: GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| imagePullPolicy: Always env: - - name: gen3Env - valueFrom: - configMapKeyRef: - name: global - key: environment - name: JENKINS_HOME value: "" - name: GEN3_NOPROXY @@ -116,11 +111,6 @@ spec: GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| imagePullPolicy: Always env: - - name: gen3Env - valueFrom: - configMapKeyRef: - name: global - key: environment - name: JENKINS_HOME value: "" - name: GEN3_NOPROXY diff --git a/kube/services/jobs/fence-visa-update-cronjob.yaml b/kube/services/jobs/fence-visa-update-cronjob.yaml index 9e8628b26..6c58ef291 100644 --- a/kube/services/jobs/fence-visa-update-cronjob.yaml +++ b/kube/services/jobs/fence-visa-update-cronjob.yaml @@ -73,7 +73,7 @@ spec: valueFrom: configMapKeyRef: name: global - key: environment + key: hostname - name: FENCE_PUBLIC_CONFIG valueFrom: configMapKeyRef: diff --git a/kube/services/jobs/fence-visa-update-job.yaml b/kube/services/jobs/fence-visa-update-job.yaml index 45342c0d0..973ba2e3d 100644 --- a/kube/services/jobs/fence-visa-update-job.yaml +++ b/kube/services/jobs/fence-visa-update-job.yaml @@ -67,7 +67,7 @@ spec: valueFrom: configMapKeyRef: name: global - key: environment + key: hostname - name: FENCE_PUBLIC_CONFIG valueFrom: configMapKeyRef: diff --git a/kube/services/jobs/s3sync-cronjob.yaml b/kube/services/jobs/s3sync-cronjob.yaml index c0fb8196e..f05ab518a 100644 --- a/kube/services/jobs/s3sync-cronjob.yaml +++ b/kube/services/jobs/s3sync-cronjob.yaml @@ -57,7 +57,7 @@ spec: valueFrom: configMapKeyRef: name: global - key: environment + key: hostname - name: SOURCE_BUCKET GEN3_SOURCE_BUCKET - name: TARGET_BUCKET diff --git a/kube/services/jobs/usersync-job.yaml b/kube/services/jobs/usersync-job.yaml index 384f68b0d..8f148a3b0 100644 --- a/kube/services/jobs/usersync-job.yaml +++ b/kube/services/jobs/usersync-job.yaml @@ -125,7 +125,7 @@ spec: valueFrom: configMapKeyRef: name: global - key: environment + key: hostname - name: FENCE_PUBLIC_CONFIG valueFrom: configMapKeyRef: @@ -294,7 +294,7 @@ spec: valueFrom: configMapKeyRef: name: global - key: environment + key: hostname - name: slackWebHook valueFrom: configMapKeyRef: From a3835adca7f3d464ad7177a0b0ec04147557d3b2 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 10 May 2023 12:50:04 -0400 Subject: [PATCH 129/362] Feat/datadog aurora monitoring (#2230) * Added annotations to get argo controller to send metrics to Datadog * Initial commit of the script to set up DB monitoring for Aurora. Need some guidance on how we want to manage config. * Starting in on the password * Mostly finished now, but we'll need to test the full end-to-end process * Also had to update datadog files for the argo workflow metrics, as we'll need to modify the Helm release * Cleaned up some debugging items. We'll have to redo the method for connecting to the DBs, we're going to try and read from g3auto instead. * Need to run final tests. * Committing work to switch to master. * Switching over to set up revproy * Finalizing the ArgoCD setup script. * Fixed a few errors * Finalized the kube-setup-aurora-monitoring script. * Added logic for calling kube-setup-aurora-monitoring to kube-setup-datadog, so that it will get called even if we do a new setup. * Update values.yaml --- gen3/bin/kube-setup-aurora-monitoring.sh | 169 +++++++++++++++++++++ gen3/bin/kube-setup-datadog.sh | 36 ++++- kube/services/datadog/datadog_db_user.json | 4 + kube/services/datadog/postgres.yaml | 8 + kube/services/datadog/values.yaml | 3 + 5 files changed, 219 insertions(+), 1 deletion(-) create mode 100644 gen3/bin/kube-setup-aurora-monitoring.sh create mode 100644 kube/services/datadog/datadog_db_user.json create mode 100644 kube/services/datadog/postgres.yaml diff --git a/gen3/bin/kube-setup-aurora-monitoring.sh b/gen3/bin/kube-setup-aurora-monitoring.sh new file mode 100644 index 000000000..e8f4f672d --- /dev/null +++ b/gen3/bin/kube-setup-aurora-monitoring.sh @@ -0,0 +1,169 @@ +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +databaseArray=() +databaseFarmArray=() + +# This function is going to retrieve and return all the top-level entries from creds.json, that has the db items we want. +# This way, we can use this information while we're creating schemas and the like +get_all_dbs() { + databases=$(jq 'to_entries[] | select (.value.db_password) | .key' $(gen3_secrets_folder)/creds.json) + + OLD_IFS=$IFS + IFS=$'\n' databaseArray=($databases) + IFS=$OLD_IFS +} + +get_all_dbs_db_farm() { + databases=$(jq 'to_entries[] | .key' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json) + + OLD_IFS=$IFS + IFS=$'\n' databaseFarmArray=($databases) + IFS=$OLD_IFS +} + +create_new_datadog_user() { + # Generate a new password for the datadog user in psql + datadogPsqlPassword=$(random_alphanumeric) + + # update creds.json + if [ ! -d "$(gen3_secrets_folder)/datadog" ] + then + mkdir "$(gen3_secrets_folder)/datadog" + fi + + if [ ! -s "$(gen3_secrets_folder)/datadog/datadog_db_users" ] + then + echo "{}" > "$(gen3_secrets_folder)/datadog/datadog_db_users.json" + fi + + output=$(jq --arg host "$1" --arg password "$datadogPsqlPassword" '.[$host].datadog_db_password=$password' "$(gen3_secrets_folder)/datadog/datadog_db_users.json") + echo "$output" > "$(gen3_secrets_folder)/datadog/datadog_db_users.json" + + # Instead of grabbing username, password, and all that, and doing our connection, we'll just figure out + # which short name (i.e., server1, server2, etc) corresponds to our host, and connect that way. + # Saves a few lines of code. + shortname=$(jq --arg host "$1" 'to_entries[] | select (.value.db_host == $host) | .key' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') + + # Create the Datadog user in the database + if gen3 psql $shortname -c "SELECT 1 FROM pg_roles WHERE rolname='datadog'" | grep -q 1; + then + gen3 psql $shortname -c "ALTER USER datadog WITH password '$datadogPsqlPassword';" + else + gen3 psql $shortname -c "CREATE USER datadog WITH password '$datadogPsqlPassword';" + fi + + echo $datadogPsqlPassword +} + +get_datadog_db_password() { + # Create the Datadog user + datadogPsqlPassword="$(jq --arg host "$1" '.[$host].datadog_db_password' < $(gen3_secrets_folder)/datadog/datadog_db_users.json)" + if [[ -z "$datadogPsqlPassword" ]] + then + datadogPsqlPassword=$(create_new_datadog_user $1) + fi + + echo $datadogPsqlPassword +} + +create_schema_and_function() { + svc=$(echo $1 | tr -d '"') + host=$(jq --arg service "$svc" '.[$service].db_host' $(gen3_secrets_folder)/creds.json | tr -d '"') + database=$(jq --arg service "$svc" '.[$service].db_database' $(gen3_secrets_folder)/creds.json | tr -d '"') + + username=$(jq --arg host "$host" 'map(select(.db_host==$host))[0] | .db_username' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') + password=$(jq --arg host "$host" 'map(select(.db_host==$host))[0] | .db_password' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') + + ddPass=$(get_datadog_db_password $host) + + PGPASSWORD=$password psql -h $host -U $username -d $database -t < /dev/null +then + gen3_log_info "We detected an ArgoCD application named 'datadog-application,' so we're modifying that" + + patch=$(yq -n --yaml-output --arg confd "$confd" '.spec.source.helm.values = $confd') + + echo "$patch" > /tmp/confd.yaml + + kubectl patch applications.argoproj.io datadog-application --type merge -n argocd --patch-file /tmp/confd.yaml + +else + gen3_log_info "We didn't detect an ArgoCD application named 'datadog-application,' so we're going to reinstall the DD Helm chart" + + (cat kube/services/datadog/values.yaml | yq --arg endpoints "$postgresString" --yaml-output '.clusterAgent.confd."postgres.yaml" = $endpoints | .clusterChecksRunner.enabled = true') > $(gen3_secrets_folder)/datadog/datadog_values.yaml + helm repo add datadog https://helm.datadoghq.com --force-update 2> >(grep -v 'This is insecure' >&2) + helm repo update 2> >(grep -v 'This is insecure' >&2) + helm upgrade --install datadog -f "$(gen3_secrets_folder)/datadog/datadog_values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2) +fi \ No newline at end of file diff --git a/gen3/bin/kube-setup-datadog.sh b/gen3/bin/kube-setup-datadog.sh index 172d8c8da..3ff5d2e2b 100644 --- a/gen3/bin/kube-setup-datadog.sh +++ b/gen3/bin/kube-setup-datadog.sh @@ -26,7 +26,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then g3kubectl delete namespace datadog g3kubectl create namespace datadog fi - # create namespace if it doens't exist + # create namespace if it doesn't exist if (! g3kubectl get namespace datadog > /dev/null 2>&1); then gen3_log_info "Creating namespace datadog" g3kubectl create namespace datadog @@ -53,6 +53,40 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then else helm upgrade --install datadog -f "$GEN3_HOME/kube/services/datadog/values.yaml" datadog/datadog -n datadog --version 3.6.4 2> >(grep -v 'This is insecure' >&2) fi + + # Check the manifest to see if we want to set up database monitoring + # Get the name of the cluster + # Run the command + + if g3k_manifest_lookup .datadog.db_monitoring_enabled &> /dev/null; then + gen3_log_info "Detected that this commons is using database monitoring. Setting that up now." + clusters=$(aws rds describe-db-clusters --query "DBClusters[].DBClusterIdentifier" --output text) + clusterArray=($clusters) + + for i in "${!clusterArray[@]}"; do + echo "$((i+1)). ${clusterArray[i]}" + done + + selected="false" + selection="" + + until [ $selected == "true" ] + do + read -p "Enter the number of the cluster you want to monitor (1-${#clusterArray[@]}): " num + if [[ "$num" =~ ^[0-9]+$ ]] && ((num >= 1 && num <= ${#clusterArray[@]})); then + echo "You entered: $num" + selected="true" + selection=${clusterArray[$num - 1]} + else + echo "Invalid input: $num" + fi + done + + gen3 kube-setup-aurora-monitoring "$selection" + else + gen3_log_info "No database monitoring detected. We're done here." + fi + ) else gen3_log_info "kube-setup-datadog exiting - datadog already deployed, use --force to redeploy" diff --git a/kube/services/datadog/datadog_db_user.json b/kube/services/datadog/datadog_db_user.json new file mode 100644 index 000000000..0eca1be9f --- /dev/null +++ b/kube/services/datadog/datadog_db_user.json @@ -0,0 +1,4 @@ +{ + "datadog_db_user": "datadog", + "datadog_db_password": null +} \ No newline at end of file diff --git a/kube/services/datadog/postgres.yaml b/kube/services/datadog/postgres.yaml new file mode 100644 index 000000000..f85dc0970 --- /dev/null +++ b/kube/services/datadog/postgres.yaml @@ -0,0 +1,8 @@ +cluster_check: true +init_config: +instances: + - dbm: true + host: + port: 5432 + username: datadog + password: \ No newline at end of file diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 3c5a79e4c..6c7df39b4 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -10,6 +10,9 @@ datadog: useHostPort: true nonLocalTraffic: true + #This is used to configure a lot of checks that Datadog does. Normally, we would annotate a service, but since we + #use aurora, we'll have to configure from confd instead + #Enables Optional Universal Service Monitoring ## ref: https://docs.datadoghq.com/tracing/universal_service_monitoring/?tab=helm serviceMonitoring: From 70f4bc96e7f2c6453d923c379143072a8aeeb482 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Thu, 11 May 2023 10:56:09 -0400 Subject: [PATCH 130/362] Removed a hard-coded file path, and changed the connection to create the datadog user so that it uses Gen3Secrets, rather than gen3 psql (#2244) --- gen3/bin/kube-setup-aurora-monitoring.sh | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/gen3/bin/kube-setup-aurora-monitoring.sh b/gen3/bin/kube-setup-aurora-monitoring.sh index e8f4f672d..5029a87ca 100644 --- a/gen3/bin/kube-setup-aurora-monitoring.sh +++ b/gen3/bin/kube-setup-aurora-monitoring.sh @@ -40,17 +40,15 @@ create_new_datadog_user() { output=$(jq --arg host "$1" --arg password "$datadogPsqlPassword" '.[$host].datadog_db_password=$password' "$(gen3_secrets_folder)/datadog/datadog_db_users.json") echo "$output" > "$(gen3_secrets_folder)/datadog/datadog_db_users.json" - # Instead of grabbing username, password, and all that, and doing our connection, we'll just figure out - # which short name (i.e., server1, server2, etc) corresponds to our host, and connect that way. - # Saves a few lines of code. - shortname=$(jq --arg host "$1" 'to_entries[] | select (.value.db_host == $host) | .key' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') + username=$(jq --arg host "$1" 'map(select(.db_host==$host))[0] | .db_username' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') + password=$(jq --arg host "$1" 'map(select(.db_host==$host))[0] | .db_password' $(gen3_secrets_folder)/g3auto/dbfarm/servers.json | tr -d '"') # Create the Datadog user in the database - if gen3 psql $shortname -c "SELECT 1 FROM pg_roles WHERE rolname='datadog'" | grep -q 1; - then - gen3 psql $shortname -c "ALTER USER datadog WITH password '$datadogPsqlPassword';" + if PGPASSWORD=$password psql -h "$1" -U "$username" -c "SELECT 1 FROM pg_roles WHERE rolname='datadog'" | grep -q 1; + then + PGPASSWORD=$password psql -h "$1" -U "$username" -c "ALTER USER datadog WITH password '$datadogPsqlPassword';" else - gen3 psql $shortname -c "CREATE USER datadog WITH password '$datadogPsqlPassword';" + PGPASSWORD=$password psql -h "$1" -U "$username" -c "CREATE USER datadog WITH password '$datadogPsqlPassword';" fi echo $datadogPsqlPassword @@ -141,7 +139,7 @@ for instance in "${instances[@]}" do instanceArray=($instance) datadogUserPassword=$(jq --arg instance "$clusterEndpoint" '.[$instance].datadog_db_password' $(gen3_secrets_folder)/datadog/datadog_db_users.json | tr -d '"') - postgresString+=$(cat /home/aidan/cloud-automation/kube/services/datadog/postgres.yaml | yq --arg url ${instanceArray[0]} --yaml-output '.instances[0].host = $url' | yq --arg password $datadogUserPassword --yaml-output '.instances[0].password = $password') + postgresString+=$(cat ${GEN3_HOME}/kube/services/datadog/postgres.yaml | yq --arg url ${instanceArray[0]} --yaml-output '.instances[0].host = $url' | yq --arg password $datadogUserPassword --yaml-output '.instances[0].password = $password') done confd=$(yq -n --yaml-output --arg postgres "$postgresString" '.clusterAgent.confd."postgres.yaml" = $postgres | .clusterChecksRunner.enabled = true') From fb5e771dc43138f0d2d3ecbf280400672b2086b2 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 11 May 2023 11:50:20 -0500 Subject: [PATCH 131/362] fix(ecr-containerd): Added containerd support for ECR commands (#2246) Co-authored-by: Edward Malinowski --- gen3/bin/ecr.sh | 54 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 23254c5de..8f4aeccb0 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -71,18 +71,34 @@ ecrReg="707767160287.dkr.ecr.us-east-1.amazonaws.com" # lib ------------------------------- gen3_ecr_login() { - if gen3_time_since ecr-login is 36000; then + if [[ -S /var/run/docker.sock ]]; then + if gen3_time_since ecr-login is 36000; then # re-authenticate every 10 hours - aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin "707767160287.dkr.ecr.us-east-1.amazonaws.com" 1>&2 || exit 1 + aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin "707767160287.dkr.ecr.us-east-1.amazonaws.com" 1>&2 || exit 1 + fi + elif [[ -S /var/run/containerd/containerd.sock ]]; then + gen3_log_info "Containerd found, logging in during each ctr command" + loginCommand="-u AWS:$(aws ecr get-login-password --region us-east-1)" + else + gen3_log_err "No container runtime found. Exiting" + exit 1 fi } gen3_quay_login() { if [[ -f ~/Gen3Secrets/quay/login ]]; then - if gen3_time_since quay-login is 36000; then - cat ~/Gen3Secrets/quay/login | docker login --username cdis+gen3 --password-stdin quay.io + if [[ -S /var/run/docker.sock ]]; then + if gen3_time_since quay-login is 36000; then + cat ~/Gen3Secrets/quay/login | docker login --username cdis+gen3 --password-stdin quay.io + fi + elif [[ -S /var/run/containerd/containerd.sock ]]; then + gen3_log_info "Containerd found, logging in during each ctr command" + loginCommand="-u \"cdis+gen3\":\"$(cat ~/Gen3Secrets/quay/login)\"" + else + gen3_log_err "No container runtime found. Exiting" + exit 1 fi - else + else gen3_log_err "Place credentials for the quay robot account (cdis+gen3) in this file ~/Gen3Secrets/quay/login" exit 1 fi @@ -97,7 +113,8 @@ gen3_quay_login() { gen3_ecr_copy_image() { local srcTag="$1" local destTag="$2" - if [[ "$destTag" == *"quay.io"* ]]; then + loginCommand="" + if [[ "$destTag" == *"quay.io"* ]]; then gen3_quay_login || return 1 else gen3_ecr_login || return 1 @@ -108,12 +125,23 @@ gen3_ecr_copy_image() { fi shift shift - (docker pull "$srcTag" && \ - docker tag "$srcTag" "$destTag" && \ - docker push "$destTag" - ) || return 1 + if [[ -S /var/run/docker.sock ]]; then + (docker pull "$srcTag" && \ + docker tag "$srcTag" "$destTag" && \ + docker push "$destTag" + ) || return 1 + docker image rm "$srcTag" "$destTag" + elif [[ -S /var/run/containerd/containerd.sock ]]; then + (ctr image pull "$srcTag" $loginCommand && \ + ctr image tag "$srcTag" "$destTag" && \ + ctr image push "$destTag" $loginCommand + ) || return 1 + ctr image rm "$srcTag" "$destTag" + else + gen3_log_err "No container runtime found. Exiting" + exit 1 + fi # save disk space - docker image rm "$srcTag" "$destTag" return 0 } @@ -178,7 +206,7 @@ gen3_ecr_update_all() { echo $repoList for repo in $repoList; do gen3_ecr_update_policy $repo - done + done } # Check if the Quay image exists in ECR repository @@ -203,7 +231,7 @@ gen3_ecr_describe_image() { # @param repoName gen3_ecr_create_repo() { local repoName="gen3/$1" - aws ecr create-repository --repository-name ${repoName} --image-scanning-configuration scanOnPush=true + aws ecr create-repository --repository-name ${repoName} --image-scanning-configuration scanOnPush=true } From b7a3ca680aab7a4de50b779e538f40dec1bc53ec Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 11 May 2023 12:54:31 -0500 Subject: [PATCH 132/362] fix(s3-create): Updated s3 bucket resources to remove deprecated ACL flag (#2236) Co-authored-by: Edward Malinowski Co-authored-by: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> --- tf_files/aws/modules/account-management-logs/cloud.tf | 1 - tf_files/aws/modules/common-logging/logging.tf | 1 - tf_files/aws/modules/management-logs/logging.tf | 1 - tf_files/aws/modules/s3-bucket/cloud.tf | 1 - tf_files/aws/modules/s3-logs/cloud.tf | 1 - tf_files/aws/modules/upload-data-bucket/s3.tf | 4 ---- tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf | 1 - 7 files changed, 10 deletions(-) diff --git a/tf_files/aws/modules/account-management-logs/cloud.tf b/tf_files/aws/modules/account-management-logs/cloud.tf index e8dbe191e..2b54389a8 100644 --- a/tf_files/aws/modules/account-management-logs/cloud.tf +++ b/tf_files/aws/modules/account-management-logs/cloud.tf @@ -10,7 +10,6 @@ resource "aws_s3_bucket" "management-logs_bucket" { bucket = "${var.account_name}-management-logs" - acl = "private" tags = { Environment = "${var.account_name}" diff --git a/tf_files/aws/modules/common-logging/logging.tf b/tf_files/aws/modules/common-logging/logging.tf index 98103f243..e9d292a4d 100644 --- a/tf_files/aws/modules/common-logging/logging.tf +++ b/tf_files/aws/modules/common-logging/logging.tf @@ -3,7 +3,6 @@ resource "aws_s3_bucket" "common_logging_bucket" { bucket = "${var.common_name}-logging" - acl = "private" tags = { Environment = "${var.common_name}" diff --git a/tf_files/aws/modules/management-logs/logging.tf b/tf_files/aws/modules/management-logs/logging.tf index 80b4a7931..ef7db77e8 100644 --- a/tf_files/aws/modules/management-logs/logging.tf +++ b/tf_files/aws/modules/management-logs/logging.tf @@ -3,7 +3,6 @@ resource "aws_s3_bucket" "management-logs_bucket" { bucket = "${var.log_bucket_name}" - acl = "private" tags = { Environment = "ALL" diff --git a/tf_files/aws/modules/s3-bucket/cloud.tf b/tf_files/aws/modules/s3-bucket/cloud.tf index 110b8fe4c..cc8cc3fba 100644 --- a/tf_files/aws/modules/s3-bucket/cloud.tf +++ b/tf_files/aws/modules/s3-bucket/cloud.tf @@ -16,7 +16,6 @@ module "cdis_s3_logs" { resource "aws_s3_bucket" "mybucket" { bucket = "${local.clean_bucket_name}" - acl = "private" server_side_encryption_configuration { rule { diff --git a/tf_files/aws/modules/s3-logs/cloud.tf b/tf_files/aws/modules/s3-logs/cloud.tf index 5f7a45705..e4569bd9e 100644 --- a/tf_files/aws/modules/s3-logs/cloud.tf +++ b/tf_files/aws/modules/s3-logs/cloud.tf @@ -10,7 +10,6 @@ terraform { resource "aws_s3_bucket" "log_bucket" { bucket = "${local.clean_bucket_name}" - acl = "log-delivery-write" server_side_encryption_configuration { rule { diff --git a/tf_files/aws/modules/upload-data-bucket/s3.tf b/tf_files/aws/modules/upload-data-bucket/s3.tf index af9cc19cf..041d5184f 100644 --- a/tf_files/aws/modules/upload-data-bucket/s3.tf +++ b/tf_files/aws/modules/upload-data-bucket/s3.tf @@ -3,7 +3,6 @@ resource "aws_s3_bucket" "data_bucket" { bucket = "${var.vpc_name}-data-bucket" - acl = "private" server_side_encryption_configuration { rule { @@ -56,9 +55,6 @@ resource "aws_s3_bucket_notification" "bucket_notification" { resource "aws_s3_bucket" "log_bucket" { bucket = "${var.vpc_name}-data-bucket-logs" - acl = "bucket-owner-full-control" #log-delivery-write - acl = "log-delivery-write" - server_side_encryption_configuration { rule { diff --git a/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf b/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf index b2ebdb15a..a0c59f439 100644 --- a/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf +++ b/tf_files/aws/modules/vpn_nlb_central_csoc/cloud.tf @@ -485,7 +485,6 @@ resource "aws_route53_record" "vpn-nlb" { resource "aws_s3_bucket" "vpn-certs-and-files" { bucket = "vpn-certs-and-files-${var.env_vpn_nlb_name}" - acl = "private" versioning { enabled = true From 943301f9fce057374fa5be376635ecccef00739c Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 11 May 2023 13:52:20 -0600 Subject: [PATCH 133/362] adding datadog networkpolicy to fix Datadog tracer for cohort middleware (#2245) --- .../gen3/services/datadog_netpolicy.yaml | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml diff --git a/kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml b/kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml new file mode 100644 index 000000000..87b71392f --- /dev/null +++ b/kube/services/netpolicy/gen3/services/datadog_netpolicy.yaml @@ -0,0 +1,25 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: netpolicy-datadog +spec: + podSelector: + matchExpressions: + - key: app + operator: In + values: + - cohort-middleware + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + ports: + - port: 8126 + egress: + - to: + - namespaceSelector: + matchLabels: + app: datadog + policyTypes: + - Ingress + - Egress \ No newline at end of file From 31a46a35b6809c234298624b010ec9c34e513375 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Fri, 12 May 2023 11:47:40 -0500 Subject: [PATCH 134/362] making cronjob fence-visa-update optional (#2211) --- gen3/bin/kube-setup-fence.sh | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index f69f80066..a521f3078 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -84,10 +84,20 @@ if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then gen3 job cron fence-cleanup-expired-ga4gh-info "*/5 * * * *" fi - # Setup visa update cronjob - if ! g3kubectl get cronjob fence-visa-update >/dev/null 2>&1; then - echo "fence-visa-update being added as a cronjob b/c fence >= 6.0.0 or 2022.07" - gen3 job cron fence-visa-update "30 * * * *" + # Extract the value of ENABLE_VISA_UPDATE_CRON from the configmap manifest-fence (fence-config-public.yaml) + ENABLE_VISA_UPDATE_CRON=$(kubectl get cm manifest-fence -o=jsonpath='{.data.fence-config-public\.yaml}' | yq -r .ENABLE_VISA_UPDATE_CRON) + + # Delete the fence-visa-update cronjob if ENABLE_VISA_UPDATE_CRON is set to false or not set or null in the configmap manifest-fence + if [[ "$ENABLE_VISA_UPDATE_CRON" == "false" ]] || [[ -z "$ENABLE_VISA_UPDATE_CRON" ]]; then + echo "Deleting fence-visa-update cronjob" + kubectl delete cronjob fence-visa-update + elif [[ "$ENABLE_VISA_UPDATE_CRON" == "true" ]]; then + if ! g3kubectl get cronjob fence-visa-update >/dev/null 2>&1; then + echo "fence-visa-update being added as a cronjob b/c fence >= 6.0.0 or 2022.07" + gen3 job cron fence-visa-update "30 * * * *" + fi + else + echo "ENABLE_VISA_UPDATE_CRON has an unexpected value in the configmap manifest-fence. Skipping fence-visa-update cronjob setup." fi fi From f227257bcb177757508e408547c822508f63590d Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 12 May 2023 09:51:24 -0700 Subject: [PATCH 135/362] Add jenkins-new-1 to CI pool (#2247) --- files/scripts/ci-env-pool-reset.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index a142fd7c2..1ceb0ccb5 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -29,13 +29,14 @@ source "${GEN3_HOME}/gen3/gen3setup.sh" cat - > jenkins-envs-services.txt < jenkins-envs-releases.txt < Date: Fri, 12 May 2023 14:08:47 -0700 Subject: [PATCH 136/362] Add containerd support for jenkins worker (#2248) * Add containerd support for jenkins worker * fix path --- .../jenkins-worker/jenkins-worker-deploy.yaml | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml index bb0775df2..aea836a4f 100644 --- a/kube/services/jenkins-worker/jenkins-worker-deploy.yaml +++ b/kube/services/jenkins-worker/jenkins-worker-deploy.yaml @@ -38,7 +38,8 @@ spec: - args: - -c - | - # fix permissions for /var/run/docker.sock + # fix permissions for docker and containerd + chmod 666 /var/run/containerd/containerd.sock chmod 666 /var/run/docker.sock echo "done" command: @@ -53,6 +54,8 @@ spec: terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: + - mountPath: /var/run/containerd/containerd.sock + name: containerdsock - mountPath: /var/run/docker.sock name: dockersock containers: @@ -121,7 +124,7 @@ spec: fieldPath: status.hostIP resources: limits: - cpu: 0.6 + cpu: "0.6" memory: 2048Mi imagePullPolicy: Always volumeMounts: @@ -139,7 +142,8 @@ spec: subPath: "ca.pem" - name: dockersock mountPath: "/var/run/docker.sock" - imagePullPolicy: Always + - name: containerdsock + mountPath: "/var/run/containerd/containerd.sock" volumes: - name: cert-volume secret: @@ -150,3 +154,6 @@ spec: - name: dockersock hostPath: path: /var/run/docker.sock + - name: containerdsock + hostPath: + path: "/var/run/containerd/containerd.sock" From b634af8621fcb75e76c75563e4e7ac95040eb15d Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 15 May 2023 13:25:21 -0500 Subject: [PATCH 137/362] fix(ctr-ecr): Added all platforms flag to ctr pull to fix manifest pushing issues (#2249) Co-authored-by: Edward Malinowski --- gen3/bin/ecr.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 8f4aeccb0..930202a87 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -132,7 +132,7 @@ gen3_ecr_copy_image() { ) || return 1 docker image rm "$srcTag" "$destTag" elif [[ -S /var/run/containerd/containerd.sock ]]; then - (ctr image pull "$srcTag" $loginCommand && \ + (ctr image pull "$srcTag" --all-platforms $loginCommand && \ ctr image tag "$srcTag" "$destTag" && \ ctr image push "$destTag" $loginCommand ) || return 1 From 3d4586d56462fd2896b85b350a6d7a2235e6bdc4 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 15 May 2023 16:25:51 -0500 Subject: [PATCH 138/362] chore(kubecost-prometheus): Lowered prometheus requests (#2250) Co-authored-by: Edward Malinowski --- kube/services/kubecost/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/kubecost/values.yaml b/kube/services/kubecost/values.yaml index d815f6aca..d1ac47246 100644 --- a/kube/services/kubecost/values.yaml +++ b/kube/services/kubecost/values.yaml @@ -97,7 +97,7 @@ prometheus: tag: v2.35.0 resources: requests: - memory: 30Gi + memory: 3Gi # requests: # cpu: 500m # memory: 30Gi From c3dc83ff10b8f738133613929cd5a2b702e0b687 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Tue, 16 May 2023 06:52:01 -0700 Subject: [PATCH 139/362] Set AWS default region in Thor deployment (#2251) --- kube/services/thor/thor-deploy.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kube/services/thor/thor-deploy.yaml b/kube/services/thor/thor-deploy.yaml index 419dd561e..46fc529af 100644 --- a/kube/services/thor/thor-deploy.yaml +++ b/kube/services/thor/thor-deploy.yaml @@ -52,6 +52,8 @@ spec: - name: thor GEN3_THOR_IMAGE env: + - name: AWS_DEFAULT_REGION + value: us-east-1 - name: RUNNING_IN_QAPLANETV1 value: "true" - name: JENKINS_USERNAME From f6afc23a7b27258bd230732c3d18942dcec4a474 Mon Sep 17 00:00:00 2001 From: Atharva Rane <41084525+atharvar28@users.noreply.github.com> Date: Thu, 18 May 2023 16:49:23 -0400 Subject: [PATCH 140/362] update jenkins dockers to python 3.9 (#2252) * update jenkins dockers to python 3.9 * change only Jenkins-Worker for now * remove 3.9 install scripts as well --------- Co-authored-by: Hara Prasad Juvvala --- Docker/jenkins/Jenkins-Worker/Dockerfile | 20 +++++++++---------- .../Jenkins-Worker/install-python3.8.sh | 8 -------- .../Jenkins-Worker/install-python3.9.sh | 8 ++++++++ 3 files changed, 18 insertions(+), 18 deletions(-) delete mode 100755 Docker/jenkins/Jenkins-Worker/install-python3.8.sh create mode 100755 Docker/jenkins/Jenkins-Worker/install-python3.9.sh diff --git a/Docker/jenkins/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile index 0ad941def..7b1d460cc 100644 --- a/Docker/jenkins/Jenkins-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-Worker/Dockerfile @@ -111,25 +111,25 @@ RUN DISTRO="$(lsb_release -c -s)" \ && rm -rf /var/lib/apt/lists/* # Copy sh script responsible for installing Python -COPY install-python3.8.sh /root/tmp/install-python3.8.sh +COPY install-python3.9.sh /root/tmp/install-python3.9.sh -# Run the script responsible for installing Python 3.8.0 and link it to /usr/bin/python -RUN chmod +x /root/tmp/install-python3.8.sh; sync && \ - bash /root/tmp/install-python3.8.sh && \ - rm -rf /root/tmp/install-python3.8.sh && \ +# Run the script responsible for installing Python 3.9.0 and link it to /usr/bin/python +RUN chmod +x /root/tmp/install-python3.9.sh; sync && \ + bash /root/tmp/install-python3.9.sh && \ + rm -rf /root/tmp/install-python3.9.sh && \ unlink /usr/bin/python3 && \ - ln -s /usr/local/bin/python3.8 /usr/bin/python3 + ln -s /usr/local/bin/python3.9 /usr/bin/python3 RUN env RUN which python -RUN which python3.8 +RUN which python3.9 # Fix shebang for lsb_release -RUN sed -i 's/python3/python3.7/' /usr/bin/lsb_release && \ - sed -i 's/python3/python3.7/' /usr/bin/add-apt-repository +RUN sed -i 's/python3/python3.9/' /usr/bin/lsb_release && \ + sed -i 's/python3/python3.9/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3.8 -m pip install --upgrade pip && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade +RUN set -xe && python3.9 -m pip install --upgrade pip && python3.9 -m pip install awscli --upgrade && python3.9 -m pip install pytest --upgrade && python3.9 -m pip install poetry && python3.9 -m pip install PyYAML --upgrade && python3.9 -m pip install lxml --upgrade && python3.9 -m pip install yq --upgrade # update /etc/sudoers RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ diff --git a/Docker/jenkins/Jenkins-Worker/install-python3.8.sh b/Docker/jenkins/Jenkins-Worker/install-python3.8.sh deleted file mode 100755 index a01d59420..000000000 --- a/Docker/jenkins/Jenkins-Worker/install-python3.8.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -wget https://www.python.org/ftp/python/3.8.0/Python-3.8.0.tar.xz -tar xf Python-3.8.0.tar.xz -rm Python-3.8.0.tar.xz -cd Python-3.8.0 -./configure -make -make altinstall diff --git a/Docker/jenkins/Jenkins-Worker/install-python3.9.sh b/Docker/jenkins/Jenkins-Worker/install-python3.9.sh new file mode 100755 index 000000000..30ee05993 --- /dev/null +++ b/Docker/jenkins/Jenkins-Worker/install-python3.9.sh @@ -0,0 +1,8 @@ +#!/bin/bash +wget https://www.python.org/ftp/python/3.9.0/Python-3.9.0.tar.xz +tar xf Python-3.9.0.tar.xz +rm Python-3.9.0.tar.xz +cd Python-3.9.0 +./configure +make +make altinstall From 704578789d4d05899017af9717aa182e88803557 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Tue, 23 May 2023 11:41:53 -0500 Subject: [PATCH 141/362] Update kube-setup-fence.sh (#2256) check if ENABLE_VISA_UPDATE_CRON is null --- gen3/bin/kube-setup-fence.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index a521f3078..03edabbf4 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -88,7 +88,7 @@ if isServiceVersionGreaterOrEqual "fence" "6.0.0" "2022.07"; then ENABLE_VISA_UPDATE_CRON=$(kubectl get cm manifest-fence -o=jsonpath='{.data.fence-config-public\.yaml}' | yq -r .ENABLE_VISA_UPDATE_CRON) # Delete the fence-visa-update cronjob if ENABLE_VISA_UPDATE_CRON is set to false or not set or null in the configmap manifest-fence - if [[ "$ENABLE_VISA_UPDATE_CRON" == "false" ]] || [[ -z "$ENABLE_VISA_UPDATE_CRON" ]]; then + if [[ "$ENABLE_VISA_UPDATE_CRON" == "false" ]] || [[ "$ENABLE_VISA_UPDATE_CRON" == "null" ]] || [[ -z "$ENABLE_VISA_UPDATE_CRON" ]]; then echo "Deleting fence-visa-update cronjob" kubectl delete cronjob fence-visa-update elif [[ "$ENABLE_VISA_UPDATE_CRON" == "true" ]]; then From b004d84ede923d07acf37b562ec05df6f18426df Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 26 May 2023 11:25:48 -0500 Subject: [PATCH 142/362] fix(kubecost-netpolicy): Updated networkpolicy to work with kubecost (#2258) Co-authored-by: Edward Malinowski --- .../services/netpolicy/gen3/services/revproxy_netpolicy.yaml | 5 +++++ kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml b/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml index 2f1462385..2afecf049 100644 --- a/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml +++ b/kube/services/netpolicy/gen3/services/revproxy_netpolicy.yaml @@ -29,6 +29,7 @@ spec: - port: 82 - port: 443 - port: 8088 + - port: 9090 egress: - to: - namespaceSelector: @@ -46,6 +47,10 @@ spec: - namespaceSelector: matchLabels: app: argocd + - to: + - namespaceSelector: + matchLabels: + name: kubecost policyTypes: - Ingress - Egress diff --git a/kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf b/kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf index a620d836d..20d57d595 100644 --- a/kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/kubecost-service.conf @@ -18,7 +18,7 @@ # if not using the jupyterhub service # this isn't dev namespace friendly, must be manually updated set $proxy_service "kubecost"; - set $upstream http://kubecost-cost-analyzer.kubecost.svc.cluster.local:443; + set $upstream http://kubecost-cost-analyzer.kubecost.svc.cluster.local:9090; rewrite ^/kubecost/(.*) /$1 break; proxy_pass $upstream; proxy_set_header Authorization "$access_token"; From d8a8b787e7b54af5fc32932f7db93c125fbc7527 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 26 May 2023 11:26:20 -0500 Subject: [PATCH 143/362] fix(gitops-clusterrole): Added cluster role to gitops sync to let it view namespaces/services (#2257) * fix(gitops-clusterrole): Added cluster role to gitops sync to let it view namespaces/services * fix(gitops-clusterrole): Added cluster role to gitops sync to let it view namespaces/services --------- Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-roles.sh | 4 +-- kube/services/jenkins/rolebinding-devops.yaml | 29 +++++++++++++++++-- 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/gen3/bin/kube-setup-roles.sh b/gen3/bin/kube-setup-roles.sh index c7d484679..aba7bf402 100644 --- a/gen3/bin/kube-setup-roles.sh +++ b/gen3/bin/kube-setup-roles.sh @@ -31,10 +31,10 @@ if [[ -z "$JENKINS_HOME" ]]; then roleName="$(gen3 api safe-name gitops)" gen3 awsrole create "$roleName" gitops-sa # do this here, since we added the new role to this binding - g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "namespace: $namespace"|g3kubectl apply -f - + g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "$namespace"|g3kubectl apply -f - fi if ! g3kubectl get rolebindings/devops-binding > /dev/null 2>&1; then - g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "namespace: $namespace"|g3kubectl apply -f - + g3k_kv_filter ${GEN3_HOME}/kube/services/jenkins/rolebinding-devops.yaml CURRENT_NAMESPACE "$namespace"|g3kubectl apply -f - fi ctx="$(g3kubectl config current-context)" diff --git a/kube/services/jenkins/rolebinding-devops.yaml b/kube/services/jenkins/rolebinding-devops.yaml index 579da9863..dd99bdd86 100644 --- a/kube/services/jenkins/rolebinding-devops.yaml +++ b/kube/services/jenkins/rolebinding-devops.yaml @@ -15,14 +15,37 @@ roleRef: kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: - name: argo-role-binding + name: argo-role-binding-CURRENT_NAMESPACE namespace: argo subjects: - kind: ServiceAccount name: gitops-sa - CURRENT_NAMESPACE + namespace: CURRENT_NAMESPACE apiGroup: "" roleRef: kind: ClusterRole name: admin - apiGroup: "" \ No newline at end of file + apiGroup: "" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: gitops-cluster-role-CURRENT_NAMESPACE +rules: +- apiGroups: [""] + resources: ["namespaces","services"] + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: gitops-cluster-binding-CURRENT_NAMESPACE +subjects: +- kind: ServiceAccount + name: gitops-sa + namespace: CURRENT_NAMESPACE + apiGroup: "" +roleRef: + kind: ClusterRole + name: gitops-cluster-role-CURRENT_NAMESPACE + apiGroup: rbac.authorization.k8s.io From 75ddb2d8f4fda10d3a8e660f97a3d1282d62c529 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 26 May 2023 11:27:20 -0500 Subject: [PATCH 144/362] feat(manifestservice-sa): Updated manifestservice to use EKS IAM SA (#2255) * feat(manifestservice-sa): Updated manifestservice to use EKS IAM SA * GPE-950 * GPE-950 --------- Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-manifestservice.sh | 17 ++++++----------- .../manifestservice/manifestservice-deploy.yaml | 1 + 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/gen3/bin/kube-setup-manifestservice.sh b/gen3/bin/kube-setup-manifestservice.sh index a3df01d42..ccbde4691 100644 --- a/gen3/bin/kube-setup-manifestservice.sh +++ b/gen3/bin/kube-setup-manifestservice.sh @@ -12,28 +12,23 @@ gen3_load "gen3/gen3setup" hostname="$(gen3 api hostname)" bucketname="manifest-${hostname//./-}" -username="manifest-bot-${hostname//./-}" +username="manifestbot-${hostname//./-}" mkdir -p $(gen3_secrets_folder)/g3auto/manifestservice credsFile="$(gen3_secrets_folder)/g3auto/manifestservice/config.json" +gen3_log_info "kube-setup-manifestservice" "setting up manifest-service resources" +gen3 s3 create "$bucketname" || true +gen3 awsrole create ${username} manifestservice-sa || true +gen3 s3 attach-bucket-policy "$bucketname" --read-write --role-name ${username} || true if (! (g3kubectl describe secret manifestservice-g3auto 2> /dev/null | grep config.js > /dev/null 2>&1)) \ - && [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]]; + && [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]]; then - gen3_log_info "kube-setup-manifestservice" "setting up manifest-service resources" - gen3 s3 create "$bucketname" - gen3 awsuser create ${username} - gen3 s3 attach-bucket-policy "$bucketname" --read-write --user-name ${username} gen3_log_info "initializing manifestservice config.json" - user=$(gen3 secrets decode ${username}-g3auto awsusercreds.json) - key_id=$(jq -r .id <<< $user) - access_key=$(jq -r .secret <<< $user) cat - > "$credsFile" < Date: Tue, 30 May 2023 23:10:59 +0530 Subject: [PATCH 145/362] Update logo.svg (#2261) --- apis_configs/logo.svg | 99 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 1 deletion(-) diff --git a/apis_configs/logo.svg b/apis_configs/logo.svg index 7f056e548..da71f111e 100644 --- a/apis_configs/logo.svg +++ b/apis_configs/logo.svg @@ -1 +1,98 @@ -fresh \ No newline at end of file + + + + + Group 33 + Created with Sketch. + + + + + + + + + + + + + + + + + + + Group 33 + + + + From 5f87e82257f45acf82e3e3c9cbf8b69a8ae0fb04 Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Tue, 30 May 2023 15:09:52 -0500 Subject: [PATCH 146/362] HP-1117 Feat/cedar ingest update (#2232) * updating cedar ingestion script --- .../healdata/heal-cedar-data-ingest.py | 121 +++++++++++------- 1 file changed, 75 insertions(+), 46 deletions(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 79f5c7eb8..1235c6f58 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -79,53 +79,82 @@ def update_filter_metadata(metadata_to_update): token_header = {"Authorization": 'bearer ' + access_token} -# Get the metadata from cedar to register -print("Querying CEDAR...") -cedar = requests.get(f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}", headers=token_header) - -# If we get metadata back now register with MDS -if cedar.status_code == 200: - metadata_return = cedar.json() - if "metadata" not in metadata_return: - print("Got 200 from CEDAR wrapper but no metadata in body, something is not right!") - sys.exit(1) - - print(f"Successfully got {len(metadata_return['metadata'])} record(s) from CEDAR directory") - for cedar_record in metadata_return["metadata"]: - if "appl_id" not in cedar_record: - print("This record doesn't have appl_id, skipping...") - continue - cedar_record_id = str(cedar_record["appl_id"]) - - # Get the metadata record for the nih_application_id - mds = requests.get(f"http://revproxy-service/mds/metadata/{cedar_record_id}", - headers=token_header - ) - if mds.status_code == 200: - mds_res = mds.json() - mds_cedar_register_data_body = {} - mds_discovery_data_body = {} - if mds_res["_guid_type"] == "discovery_metadata": - print("Metadata is already registered. Updating MDS record") - elif mds_res["_guid_type"] == "unregistered_discovery_metadata": - print("Metadata is has not been registered. Registering it in MDS record") +limit = 10 +offset = 0 + +# initalize this to be bigger than our inital call so we can go through while loop +total = 100 + +while((limit + offset <= total)): + # Get the metadata from cedar to register + print("Querying CEDAR...") + cedar = requests.get(f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}", headers=token_header) + + # If we get metadata back now register with MDS + if cedar.status_code == 200: + metadata_return = cedar.json() + if "metadata" not in metadata_return: + print("Got 200 from CEDAR wrapper but no metadata in body, something is not right!") + sys.exit(1) + + total = metadata_return["metadata"]["totalCount"] + returned_records = len(metadata_return["metadata"]["records"]) + print(f"Successfully got {returned_records} record(s) from CEDAR directory") + for cedar_record in metadata_return["metadata"]["records"]: + if "appl_id" not in cedar_record: + print("This record doesn't have appl_id, skipping...") continue - pydash.merge(mds_discovery_data_body, mds_res["gen3_discovery"], cedar_record) - mds_discovery_data_body = update_filter_metadata(mds_discovery_data_body) - mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body - mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" - - print("Metadata is now being registered.") - mds_put = requests.put(f"http://revproxy-service/mds/metadata/{cedar_record_id}", - headers=token_header, - json = mds_cedar_register_data_body - ) - if mds_put.status_code == 200: - print(f"Successfully registered: {cedar_record_id}") + + # get the appl id from cedar for querying in our MDS + cedar_appl_id = str(cedar_record["appl_id"]) + + # Get the metadata record for the nih_application_id + mds = requests.get(f"http://revproxy-service/mds/metadata?gen3_discovery.appl_id={cedar_appl_id}&data=true") + if mds.status_code == 200: + mds_res = mds.json() + + # the query result key is the record of the metadata. If it doesn't return anything then our query failed. + if len(list(mds_res.keys())) == 0 or len(list(mds_res.keys())) > 1: + print("Query returned nothing for ", cedar_appl_id, "appl id") + continue + + # get the key for our mds record + cedar_record_id = list(mds_res.keys())[0] + + mds_res = mds_res[cedar_record_id] + mds_cedar_register_data_body = {} + mds_discovery_data_body = {} + if mds_res["_guid_type"] == "discovery_metadata": + print("Metadata is already registered. Updating MDS record") + elif mds_res["_guid_type"] == "unregistered_discovery_metadata": + print("Metadata is has not been registered. Registering it in MDS record") + continue + + pydash.merge(mds_discovery_data_body, mds_res["gen3_discovery"], cedar_record) + mds_discovery_data_body = update_filter_metadata(mds_discovery_data_body) + mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body + mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" + + print("Metadata is now being registered.") + mds_put = requests.put(f"http://revproxy-service/mds/metadata/{cedar_record_id}", + headers=token_header, + json = mds_cedar_register_data_body + ) + if mds_put.status_code == 200: + print(f"Successfully registered: {cedar_record_id}") + else: + print(f"Failed to register: {cedar_record_id}. Might not be MDS admin") + print(f"Status from MDS: {mds_put.status_code}") else: - print(f"Failed to register: {cedar_record_id}. Might not be MDS admin") - print(f"Status from MDS: {mds_put.status_code}") - else: - print(f"Failed to get information from MDS: {mds.status_code}") + print(f"Failed to get information from MDS: {mds.status_code}") + + if offset + limit == total: + break + + offset = offset + limit + if (offset + limit) > total: + limit = (offset + limit) - total + + else: print(f"Failed to get information from CEDAR wrapper service: {cedar.status_code}") From 7d3d347de1ca181f43bb8510da2f5830bfe25ddc Mon Sep 17 00:00:00 2001 From: George Thomas <98996322+george42-ctds@users.noreply.github.com> Date: Wed, 31 May 2023 12:54:06 -0700 Subject: [PATCH 147/362] (HP-1106): read license from kubernetes secret (#2254) * (HP-1106): read license from kubernetes secret * Add description of secret format, use env for secret key --- .../jobs/distribute-licenses-job.yaml | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/kube/services/jobs/distribute-licenses-job.yaml b/kube/services/jobs/distribute-licenses-job.yaml index 8c276f194..e17cf108c 100644 --- a/kube/services/jobs/distribute-licenses-job.yaml +++ b/kube/services/jobs/distribute-licenses-job.yaml @@ -48,6 +48,10 @@ spec: configMapKeyRef: name: manifest-hatchery key: "user-namespace" + - name: GEN3_LICENSE_SECRET_NAME + value: gen3-license-secret + - name: GEN3_LICENSE_KEY + value: licenseSecrets command: ["python"] args: - "-c" @@ -96,15 +100,23 @@ spec: used_licenses.sort() print(f"Licenses currently in use: {used_licenses}") - # This is a free trial license for demo purposes only - # Todo: store, mount licenses secret - license_file = """ - 501709301583!$n1d p$53 zvqe 2sfz jzft 7aei e8yL 8ue$ j38b!snic!first line!second line!2100! - 501709301583!$n1d p$53 zvqe 2sfz jzft 7aei e8yL 8ue$ j38b!snic!first line!second line!2100! - 501709301583!$n1d p$53 zvqe 2sfz jzft 7aei e8yL 8ue$ j38b!snic!first line!second line!2100! - """.strip() + # The license keys should be stored in a kubernetes secret. + # The format of the secret is one license string per line. + # The license strings are generated with 'stinit' using the information in a license PDF. + # The secret can be generated from a temporary file with a kubectl command, eg + # kubectl create secret generic GEN3_LICENSE_SECRET_NAME --from-file=GEN3_LICENSE_KEY=/path/to/file.lic - licenses = license_file.split("\n") + # Get license from kubernetes secret + print("Ready to read secret") + secret_name = os.environ['GEN3_LICENSE_SECRET_NAME'] + secret_key = os.environ['GEN3_LICENSE_KEY'] + license_secrets = os.popen( + f"kubectl get secret {secret_name} --template={{{{.data.{secret_key}}}}} | base64 -d" + ).read() + license_secrets = license_secrets.strip() + + licenses = license_secrets.split("\n") + print(f"Number of licenses = {len(licenses)}") available_license_ids = [ license_id for license_id, license in enumerate(licenses) if license_id not in used_licenses From 6989fdff94ae2c5f3e6b6f363d6ef68c31ee6c21 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 1 Jun 2023 13:05:53 -0500 Subject: [PATCH 148/362] fix(karpenter): Added a couple karpenter fixes for workflows (#2263) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-karpenter.sh | 2 +- kube/services/karpenter/provisionerWorkflow.yaml | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 3118586cb..0a81d1789 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -106,7 +106,7 @@ gen3_deploy_karpenter() { aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${security_groups} || true aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}" --resources ${subnets} || true aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-jupyter" --resources ${security_groups_jupyter} || true - aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-worfklow" --resources ${security_groups_workflow} || true + aws ec2 create-tags --tags "Key=karpenter.sh/discovery,Value=${vpc_name}-workflow" --resources ${security_groups_workflow} || true echo '{ "Version": "2012-10-17", "Statement": [ diff --git a/kube/services/karpenter/provisionerWorkflow.yaml b/kube/services/karpenter/provisionerWorkflow.yaml index a66a14707..f43dbf648 100644 --- a/kube/services/karpenter/provisionerWorkflow.yaml +++ b/kube/services/karpenter/provisionerWorkflow.yaml @@ -21,7 +21,9 @@ spec: taints: - key: role value: workflow - effect: NoSchedule + effect: NoSchedule + labels: + role: workflow limits: resources: cpu: 1000 From dc5e5a254cc7b9fbafd14c0774ed8d735acdb2be Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Thu, 1 Jun 2023 14:06:33 -0400 Subject: [PATCH 149/362] Updated how we configure the interruption queue for Karpenter, using the Helm values rather than a pre-created ConfigMap. (#2264) --- gen3/bin/kube-setup-karpenter.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 0a81d1789..72ffbfc31 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -28,6 +28,7 @@ gen3_deploy_karpenter() { else karpenter=${karpenter:-v0.22.0} fi + local queue_name="karpenter-sqs-${vpc_name}" echo '{ "Statement": [ { @@ -134,6 +135,7 @@ gen3_deploy_karpenter() { --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \ --set settings.aws.clusterEndpoint="${cluster_endpoint}" \ --set settings.aws.clusterName=${vpc_name} \ + --set settings.aws.interruptionQueueName="${queue_name}" \ --set serviceAccount.name=karpenter \ --set serviceAccount.create=false \ --set controller.env[0].name=AWS_REGION \ @@ -201,7 +203,7 @@ gen3_create_karpenter_sqs_eventbridge() { aws events put-targets --rule "Karpenter-${vpc_name}-ScheduledChangeRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true aws events put-targets --rule "Karpenter-${vpc_name}-InstanceStateChangeRule" --targets "Id"="1","Arn"="${queue_arn}" 2> /dev/null || true aws sqs set-queue-attributes --queue-url "${queue_url}" --attributes "Policy"="$(aws sqs get-queue-attributes --queue-url "${queue_url}" --attribute-names "Policy" --query "Attributes.Policy" --output text | jq -r '.Statement += [{"Sid": "AllowKarpenter", "Effect": "Allow", "Principal": {"Service": ["sqs.amazonaws.com","events.amazonaws.com"]}, "Action": "sqs:SendMessage", "Resource": "'${queue_arn}'"}]')" 2> /dev/null || true - g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/karpenter-global-settings.yaml SQS_NAME ${queue_name} | g3kubectl apply -f - + #g3k_kv_filter ${GEN3_HOME}/kube/services/karpenter/karpenter-global-settings.yaml SQS_NAME ${queue_name} | g3kubectl apply -f - } gen3_remove_karpenter() { From 6ee8ddc4a90423e04cb61fc2b28052b5775270e0 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 5 Jun 2023 14:42:27 -0500 Subject: [PATCH 150/362] Add a grace period for hatchery-reaper (#2259) --- gen3/bin/jupyter.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/gen3/bin/jupyter.sh b/gen3/bin/jupyter.sh index b2b74e043..169ec59dc 100644 --- a/gen3/bin/jupyter.sh +++ b/gen3/bin/jupyter.sh @@ -241,8 +241,15 @@ gen3_jupyter_idle_pods() { if jq -r --arg cluster "$clusterName" 'select(.cluster | startswith($cluster))' < "$tempClusterFile" | grep "$clusterName" > /dev/null; then echo "$name" if [[ "$command" == "kill" ]]; then - gen3_log_info "try to kill pod $name in $jnamespace" - g3kubectl delete pod --namespace "$jnamespace" "$name" 1>&2 + pod_creation=$(date -d $(g3kubectl get pod "$name" -n "$jnamespace" -o jsonpath='{.metadata.creationTimestamp}') +%s) + current_time=$(date +%s) + age=$((current_time - pod_creation)) + + # potential workspaces to be reaped for inactivity must be at least 60 minutes old + if ((age >= 3600)); then + gen3_log_info "try to kill pod $name in $jnamespace" + g3kubectl delete pod --namespace "$jnamespace" "$name" 1>&2 + fi fi else gen3_log_info "$clusterName not in $(cat $tempClusterFile)" From 10ada9fc4cd9095ebba118799077641a1fad5d07 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 6 Jun 2023 13:55:27 -0500 Subject: [PATCH 151/362] feat(datadog-prometheus): Updated datadog to not pull data from prometheus (#2267) Co-authored-by: Edward Malinowski --- .secrets.baseline | 4 ++-- kube/services/datadog/values.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 9890c38e6..621c0a009 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-02-09T21:25:40Z", + "generated_at": "2023-06-06T18:46:35Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -736,7 +736,7 @@ "hashed_secret": "52330dffa4d0795b4199a66428e54eca228e1661", "is_secret": false, "is_verified": false, - "line_number": 20, + "line_number": 23, "type": "Secret Keyword" } ], diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 6c7df39b4..3762ef373 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -203,9 +203,9 @@ datadog: ## ref: https://docs.datadoghq.com/agent/kubernetes/prometheus/ prometheusScrape: # datadog.prometheusScrape.enabled -- Enable autodiscovering pods and services exposing prometheus metrics. - enabled: true + enabled: false # datadog.prometheusScrape.serviceEndpoints -- Enable generating dedicated checks for service endpoints. - serviceEndpoints: true + serviceEndpoints: false # datadog.prometheusScrape.additionalConfigs -- Allows adding advanced openmetrics check configurations with custom discovery rules. (Requires Agent version 7.27+) additionalConfigs: [] # - From e343b467ba9abbbe7adefc0300fa4e3b7101b8f1 Mon Sep 17 00:00:00 2001 From: George Thomas <98996322+george42-ctds@users.noreply.github.com> Date: Thu, 8 Jun 2023 07:52:59 -0700 Subject: [PATCH 152/362] Update stata license secret name. (#2266) --- kube/services/jobs/distribute-licenses-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/jobs/distribute-licenses-job.yaml b/kube/services/jobs/distribute-licenses-job.yaml index e17cf108c..5e5209694 100644 --- a/kube/services/jobs/distribute-licenses-job.yaml +++ b/kube/services/jobs/distribute-licenses-job.yaml @@ -49,7 +49,7 @@ spec: name: manifest-hatchery key: "user-namespace" - name: GEN3_LICENSE_SECRET_NAME - value: gen3-license-secret + value: stata-workspace-gen3-license - name: GEN3_LICENSE_KEY value: licenseSecrets command: ["python"] From 06b00a39e9467bc6e47b879be00585f8f0969ad1 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Fri, 9 Jun 2023 12:19:47 -0500 Subject: [PATCH 153/362] Update cohort-middleware-deploy and argo-wrapper to have optional configs. (#2158) * Update cohort-middleware-deploy.yaml * Update argo-wrapper-deploy.yaml Add argo-config to be optional so pods will be up for jenkins tests. --------- Co-authored-by: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> --- kube/services/argo-wrapper/argo-wrapper-deploy.yaml | 1 + kube/services/cohort-middleware/cohort-middleware-deploy.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index 67acf0ca7..65f68d98a 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -57,6 +57,7 @@ spec: - name: argo-config configMap: name: manifest-argo + optional: true containers: - name: argo-wrapper diff --git a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml index 602924d26..9e8e92980 100644 --- a/kube/services/cohort-middleware/cohort-middleware-deploy.yaml +++ b/kube/services/cohort-middleware/cohort-middleware-deploy.yaml @@ -60,6 +60,7 @@ spec: - name: cohort-middleware-config secret: secretName: cohort-middleware-config + optional: true containers: - name: cohort-middleware GEN3_COHORT-MIDDLEWARE_IMAGE|-image: quay.io/cdis/cohort-middleware:latest-| From 69fd34310af7e669cbd04e05a81548eafde555eb Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Tue, 13 Jun 2023 10:56:30 -0400 Subject: [PATCH 154/362] Updating argo configs to enable offloading workflows to postgres --- gen3/bin/kube-setup-argo.sh | 2 +- kube/services/argo/values.yaml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index beba520aa..c7243d3da 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -243,7 +243,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version 0.22.11 + helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version 0.29.1 else gen3_log_info "kube-setup-argo exiting - argo already deployed, use --force to redeploy" fi diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 7c9ee3270..2f8d0ec6f 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -50,6 +50,7 @@ controller: passwordSecret: name: argo-db-creds key: db_password + nodeStatusOffload: true workflowDefaults: spec: From dd95646b0602c9106dbc3497d5ecf2c7894beb0b Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 14 Jun 2023 09:02:53 -0400 Subject: [PATCH 155/362] Update values.yaml (#2270) --- kube/services/argo/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 2f8d0ec6f..e8db62711 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -50,7 +50,7 @@ controller: passwordSecret: name: argo-db-creds key: db_password - nodeStatusOffload: true + nodeStatusOffLoad: true workflowDefaults: spec: From 375af8b51fcfd8ae7438ab73783b5254900c0aa3 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 15 Jun 2023 10:21:02 -0500 Subject: [PATCH 156/362] fix(manifestservice-netpolicy): added netpolicy to allow manifestservice to work (#2271) Co-authored-by: Edward Malinowski --- kube/services/manifestservice/manifestservice-deploy.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/kube/services/manifestservice/manifestservice-deploy.yaml b/kube/services/manifestservice/manifestservice-deploy.yaml index 3f67a272e..0966f2480 100644 --- a/kube/services/manifestservice/manifestservice-deploy.yaml +++ b/kube/services/manifestservice/manifestservice-deploy.yaml @@ -22,6 +22,7 @@ spec: s3: "yes" public: "yes" userhelper: "yes" + netvpc: "yes" GEN3_DATE_LABEL spec: serviceAccountName: manifestservice-sa From 772330231378c8310d12798c7689258d9da698ec Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 15 Jun 2023 11:19:44 -0500 Subject: [PATCH 157/362] fix(karpenter-sqs): Remove karpenters ability to create sqs queues to mitigate audit data loss (#2272) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-karpenter.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 72ffbfc31..6a8b21949 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -17,7 +17,8 @@ gen3_deploy_karpenter() { gen3_log_info "Ensuring that the spot instance service linked role is setup" # Ensure the spot instance service linked role is setup # It is required for running spot instances - gen3_create_karpenter_sqs_eventbridge + #### Uncomment this when we fix the sqs helper to allow for usage by more than one service + #gen3_create_karpenter_sqs_eventbridge aws iam create-service-linked-role --aws-service-name spot.amazonaws.com || true if g3k_config_lookup .global.karpenter_version; then karpenter=$(g3k_config_lookup .global.karpenter_version) @@ -189,7 +190,7 @@ gen3_update_karpenter_configs() { gen3_create_karpenter_sqs_eventbridge() { local queue_name="karpenter-sqs-${vpc_name}" local eventbridge_rule_name="karpenter-eventbridge-${vpc_name}" - gen3 sqs create-queue-if-not-exist $queue_name >> "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" + #gen3 sqs create-queue-if-not-exist $queue_name >> "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" local queue_url=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.url') local queue_arn=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.arn') # Create eventbridge rules From 2c79e0d6bc2ce718e921d26f320e9b96ec463670 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Tue, 20 Jun 2023 13:14:58 -0500 Subject: [PATCH 158/362] fix: don't run unnecessary cm sync if rolling all (#2274) --- gen3/bin/kube-setup-metadata.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-metadata.sh b/gen3/bin/kube-setup-metadata.sh index 04913235c..1cd148dc2 100644 --- a/gen3/bin/kube-setup-metadata.sh +++ b/gen3/bin/kube-setup-metadata.sh @@ -76,7 +76,7 @@ fi # Sync the manifest config from manifest.json (or manifests/metadata.json) to the k8s config map. # This may not actually create the manifest-metadata config map if the user did not specify any metadata # keys in their manifest configuration. -gen3 gitops configmaps +[[ -z "$GEN3_ROLL_ALL" ]] && gen3 gitops configmaps # Check the manifest-metadata configmap to see if the aggregate mds feature is enabled. Skip aws-es-proxysetup if configmap doesn't exist. if g3kubectl get configmap manifest-metadata > /dev/null 2>&1; then From d2e308a12bb7afa8ec7c5ee12ca3d84d8b01a265 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 21 Jun 2023 14:55:00 -0500 Subject: [PATCH 159/362] fix(pelican-log-ingestion): Remove pelican export log ingestion (#2277) Co-authored-by: Edward Malinowski --- kube/services/datadog/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 3762ef373..6db8d7ad9 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -221,7 +221,7 @@ datadog: # - send_distribution_buckets: true # timeout: 5 - containerExcludeLogs: "kube_namespace:logging kube_namespace:argo" + containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:pelican-export*" ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements From 20122134dcd9a504dbf30cf5785fc4f026fa0f98 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 21 Jun 2023 15:48:23 -0500 Subject: [PATCH 160/362] fix(pelican-log-ingestion): Remove pelican export log ingestion (#2278) Co-authored-by: Edward Malinowski --- kube/services/datadog/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index 6db8d7ad9..c613bd079 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -221,7 +221,7 @@ datadog: # - send_distribution_buckets: true # timeout: 5 - containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:pelican-export*" + containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:pelican-export* name:job-task" ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements From f0059e31e8d5adf415372528124365b373a8c8bc Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Fri, 7 Jul 2023 13:03:52 -0500 Subject: [PATCH 161/362] Apache Guacamole (#2279) * feat: initial YAMLs for Kubernetes for Apache Guacamole * fix: secret matching * fix: remove envFrom * fix: different path for apache guacamole * fix: revproxy configuration --- gen3/bin/kube-setup-apache-guacamole.sh | 121 ++++++++++++++++ .../apache-guacamole-configmap.yaml | 18 +++ .../apache-guacamole-deploy.yaml | 131 ++++++++++++++++++ .../apache-guacamole-secret.yaml | 14 ++ .../apache-guacamole-service.yaml | 26 ++++ .../gen3.nginx.conf/guacamole-service.conf | 20 +++ 6 files changed, 330 insertions(+) create mode 100644 gen3/bin/kube-setup-apache-guacamole.sh create mode 100644 kube/services/apache-guacamole/apache-guacamole-configmap.yaml create mode 100644 kube/services/apache-guacamole/apache-guacamole-deploy.yaml create mode 100644 kube/services/apache-guacamole/apache-guacamole-secret.yaml create mode 100644 kube/services/apache-guacamole/apache-guacamole-service.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/guacamole-service.conf diff --git a/gen3/bin/kube-setup-apache-guacamole.sh b/gen3/bin/kube-setup-apache-guacamole.sh new file mode 100644 index 000000000..31193c526 --- /dev/null +++ b/gen3/bin/kube-setup-apache-guacamole.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +export hostname=$(gen3 api hostname) +export namespace=$(gen3 api namespace) + +# lib --------------------- + +new_client() { + gen3_log_info "kube-setup-apache-guacamole" "creating fence oidc client for Apache Guacamole" + local fence_client="guacamole" + local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client $fence_client --urls https://${hostname}/guac/guacamole/#/ --username guacamole --auto-approve --public --external --allowed-scopes openid profile email user | tail -1) + # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') + if [[ ! $secrets =~ (\'(.*)\', None) ]]; then + # try delete client + g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client $fence_client > /dev/null 2>&1 + secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client $fence_client --urls https://${hostname}/guac/guacamole/#/ --username guacamole --auto-approve --public --external --allowed-scopes openid profile email user | tail -1) + if [[ ! $secrets =~ (\'(.*)\', None) ]]; then + gen3_log_err "kube-setup-apache-guacamole" "Failed generating oidc client for guacamole: $secrets" + return 1 + fi + fi + local FENCE_CLIENT_ID="${BASH_REMATCH[2]}" + local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}" + gen3_log_info "create guacamole-secret" + mkdir -m 0700 -p "$(gen3_secrets_folder)/g3auto/guacamole" + + cat - < /dev/null 2>&1; then + local credsPath="$(gen3_secrets_folder)/g3auto/guacamole/appcreds.json" + if [ -f "$credsPath" ]; then + gen3 secrets sync + return 0 + fi + mkdir -p "$(dirname "$credsPath")" + if ! new_client > "$credsPath"; then + gen3_log_err "Failed to setup guacamole fence client" + rm "$credsPath" || true + return 1 + fi + gen3 secrets sync + fi + + if ! g3kubectl describe secret guacamole-g3auto | grep dbcreds.json > /dev/null 2>&1; then + gen3_log_info "create database" + if ! gen3 db setup guacamole; then + gen3_log_err "Failed setting up database for guacamole service" + return 1 + fi + gen3 secrets sync + fi +} + +setup_secrets() { + # guacamole-secrets.yaml populate and apply. + gen3_log_info "Deploying secrets for guacamole" + # subshell + + ( + if ! dbcreds="$(gen3 db creds guacamole)"; then + gen3_log_err "unable to find db creds for guacamole service" + return 1 + fi + + if ! appcreds="$(gen3 secrets decode guacamole-g3auto appcreds.json)"; then + gen3_log_err "unable to find app creds for guacamole service" + return 1 + fi + + local hostname=$(gen3 api hostname) + export DB_NAME=$(jq -r ".db_database" <<< "$dbcreds") + export DB_USER=$(jq -r ".db_username" <<< "$dbcreds") + export DB_PASS=$(jq -r ".db_password" <<< "$dbcreds") + export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds") + + export FENCE_URL="https://${hostname}/user/user" + export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration" + export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds") + export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds") + + export OPENID_AUTHORIZATION_ENDPOINT="https://${hostname}/user/oauth2/authorize" + export OPENID_JWKS_ENDPOINT="https://${hostname}/user/.well-known/jwks" + export OPENID_REDIRECT_URI="https://${hostname}/guac/guacamole/#/" + export OPENID_ISSUER="https://${hostname}/user" + export OPENID_USERNAME_CLAIM_TYPE="sub" + export OPENID_SCOPE="openid profile email" + + envsubst <"${GEN3_HOME}/kube/services/apache-guacamole/apache-guacamole-configmap.yaml" | g3kubectl apply -f - + envsubst <"${GEN3_HOME}/kube/services/apache-guacamole/apache-guacamole-secret.yaml" | g3kubectl apply -f - + ) +} + +# main -------------------------------------- +if [[ $# -gt 0 && "$1" == "new-client" ]]; then + new_client + exit $? +fi + +setup_creds + +setup_secrets + +gen3 roll apache-guacamole +g3kubectl apply -f "${GEN3_HOME}/kube/services/apache-guacamole/apache-guacamole-service.yaml" + +cat < Date: Mon, 10 Jul 2023 14:48:28 -0600 Subject: [PATCH 162/362] adding coralogix to web whitelist (#2282) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index bcc5eb155..c36194765 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -76,6 +76,7 @@ gopkg.in grafana.com http.us.debian.org ifconfig.io +ingress.coralogix.us internet2.edu k8s.gcr.io ks.osdc.io From a94ad96e693d1fcd9cfbc8c8f68b83d9280bb505 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 11 Jul 2023 14:36:43 -0600 Subject: [PATCH 163/362] =?UTF-8?q?modifying=20the=20kube-setup-aws-es-pro?= =?UTF-8?q?xy=20scipt=20to=20look=20for=20a=20second=20clus=E2=80=A6=20(#2?= =?UTF-8?q?280)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * modifying the kube-setup-aws-es-proxy scipt to look for a second cluster for the es7 upgrade as we want to preserve the old cluster while pointing to the new one. * forgot to remove the "2" so the old cluster will be used if the flag is not present --- gen3/bin/kube-setup-aws-es-proxy.sh | 46 +++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 12 deletions(-) diff --git a/gen3/bin/kube-setup-aws-es-proxy.sh b/gen3/bin/kube-setup-aws-es-proxy.sh index d3aafcedc..f13a4d411 100644 --- a/gen3/bin/kube-setup-aws-es-proxy.sh +++ b/gen3/bin/kube-setup-aws-es-proxy.sh @@ -8,23 +8,45 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" +# Deploy Datadog with argocd if flag is set in the manifest path +manifestPath=$(g3k_manifest_path) +es7="$(jq -r ".[\"global\"][\"es7\"]" < "$manifestPath" | tr '[:upper:]' '[:lower:]')" + [[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then envname="$(gen3 api environment)" - if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \ - && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then - gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" - g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" - gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." + + if [ "$es7" = true ]; then + if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \ + && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then + gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" + gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." + else + # + # probably running in jenkins or job environment + # try to make sure network policy labels are up to date + # + gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up." + gen3 kube-setup-networkpolicy service aws-es-proxy + g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true + fi else - # - # probably running in jenkins or job environment - # try to make sure network policy labels are up to date - # - gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up." - gen3 kube-setup-networkpolicy service aws-es-proxy - g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true + if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata --query "DomainStatusList[*].Endpoints" --output text)" \ + && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then + gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" + gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." + else + # + # probably running in jenkins or job environment + # try to make sure network policy labels are up to date + # + gen3_log_info "kube-setup-aws-es-proxy" "Not deploying aws-es-proxy, no endpoint to hook it up." + gen3 kube-setup-networkpolicy service aws-es-proxy + g3kubectl patch deployment "aws-es-proxy-deployment" -p '{"spec":{"template":{"metadata":{"labels":{"netvpc":"yes"}}}}}' || true + fi fi gen3 job cron es-garbage '@daily' else From 2bed77c34ae7a0830b8b92d9238d8f892494f306 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 13 Jul 2023 10:11:41 -0600 Subject: [PATCH 164/362] EBSCSI Driver policy (#2262) * adding EBSCSI Driver policy to the EKS Workers policy as the CSI Driver is required for clusters above 1.23 * also, adding to the eks-nodepool module * updating kubeproxy and cni for version 1.24 --- gen3/bin/kube-setup-system-services.sh | 4 ++-- tf_files/aws/modules/eks-nodepool/cloud.tf | 5 +++++ tf_files/aws/modules/eks/cloud.tf | 5 +++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh index 34ae87436..609ee01c7 100644 --- a/gen3/bin/kube-setup-system-services.sh +++ b/gen3/bin/kube-setup-system-services.sh @@ -16,10 +16,10 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" -kubeproxy=${kubeproxy:-1.22.11} +kubeproxy=${kubeproxy:-1.24.7} coredns=${coredns:-1.8.7} kubednsautoscaler=${kubednsautoscaler:-1.8.6} -cni=${cni:-1.12.0} +cni=${cni:-1.12.2} calico=${calico:-1.7.8} diff --git a/tf_files/aws/modules/eks-nodepool/cloud.tf b/tf_files/aws/modules/eks-nodepool/cloud.tf index 589b9a429..1cdedd964 100644 --- a/tf_files/aws/modules/eks-nodepool/cloud.tf +++ b/tf_files/aws/modules/eks-nodepool/cloud.tf @@ -162,6 +162,11 @@ resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKS_CNI_Policy" { role = "${aws_iam_role.eks_node_role.name}" } +resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKSCSIDriverPolicy" { + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" + role = "${aws_iam_role.eks_node_role.name}" +} + resource "aws_iam_role_policy_attachment" "eks-node-AmazonEC2ContainerRegistryReadOnly" { policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" role = "${aws_iam_role.eks_node_role.name}" diff --git a/tf_files/aws/modules/eks/cloud.tf b/tf_files/aws/modules/eks/cloud.tf index 517606a8d..f8b237eeb 100644 --- a/tf_files/aws/modules/eks/cloud.tf +++ b/tf_files/aws/modules/eks/cloud.tf @@ -429,6 +429,11 @@ resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKS_CNI_Policy" { role = "${aws_iam_role.eks_node_role.name}" } +resource "aws_iam_role_policy_attachment" "eks-node-AmazonEKSCSIDriverPolicy" { + policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy" + role = "${aws_iam_role.eks_node_role.name}" +} + resource "aws_iam_role_policy_attachment" "eks-node-AmazonEC2ContainerRegistryReadOnly" { policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" role = "${aws_iam_role.eks_node_role.name}" From 1a7428bcda9c0fa101df80901a0af9b7f8831435 Mon Sep 17 00:00:00 2001 From: George Thomas <98996322+george42-ctds@users.noreply.github.com> Date: Mon, 17 Jul 2023 09:54:49 -0700 Subject: [PATCH 165/362] chore/update jupyter pystata image name (#2286) --- kube/services/jobs/distribute-licenses-job.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/jobs/distribute-licenses-job.yaml b/kube/services/jobs/distribute-licenses-job.yaml index 5e5209694..aef52c75c 100644 --- a/kube/services/jobs/distribute-licenses-job.yaml +++ b/kube/services/jobs/distribute-licenses-job.yaml @@ -76,7 +76,7 @@ spec: for container in pod.get('spec', {}).get('containers', []): - if "stata-heal" in container['image']: + if "jupyter-pystata-gen3-licensed" in container['image']: existing_license_id = pod.get("metadata", {}).get("annotations", {}).get("stata-license") From e6a321e5a7531b1b56d6598e70cbaf7711728100 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 19 Jul 2023 17:54:09 -0400 Subject: [PATCH 166/362] =?UTF-8?q?Created=20a=20setup=20script=20to=20ins?= =?UTF-8?q?tall=20argo=20events,=20as=20well=20as=20any=20resourc=E2=80=A6?= =?UTF-8?q?=20(#2287)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Created a setup script to install argo events, as well as any resources used by it. For now, all the resources are for Argo Workflows * Moving from using the public eventbus demo to a local copy, so that updates to the documentation don't break our setup --- gen3/bin/kube-setup-argo-events.sh | 60 +++++++++++ kube/services/argo-events/eventbus.yaml | 11 ++ .../argo-events/workflows/configmap.yaml | 102 ++++++++++++++++++ .../workflows/eventsource-completed.yaml | 20 ++++ .../workflows/eventsource-created.yaml | 18 ++++ .../workflows/eventsource-deleted.yaml | 16 +++ .../argo-events/workflows/job-admin-role.yaml | 12 +++ .../workflows/sensor-completed.yaml | 60 +++++++++++ .../argo-events/workflows/sensor-created.yaml | 76 +++++++++++++ .../argo-events/workflows/sensor-deleted.yaml | 56 ++++++++++ 10 files changed, 431 insertions(+) create mode 100644 gen3/bin/kube-setup-argo-events.sh create mode 100644 kube/services/argo-events/eventbus.yaml create mode 100644 kube/services/argo-events/workflows/configmap.yaml create mode 100644 kube/services/argo-events/workflows/eventsource-completed.yaml create mode 100644 kube/services/argo-events/workflows/eventsource-created.yaml create mode 100644 kube/services/argo-events/workflows/eventsource-deleted.yaml create mode 100644 kube/services/argo-events/workflows/job-admin-role.yaml create mode 100644 kube/services/argo-events/workflows/sensor-completed.yaml create mode 100644 kube/services/argo-events/workflows/sensor-created.yaml create mode 100644 kube/services/argo-events/workflows/sensor-deleted.yaml diff --git a/gen3/bin/kube-setup-argo-events.sh b/gen3/bin/kube-setup-argo-events.sh new file mode 100644 index 000000000..b37c7c010 --- /dev/null +++ b/gen3/bin/kube-setup-argo-events.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Deploy Argo Events, and then optionally deploy resources to create Karpenter resources when a workflow is launched + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" +gen3_load "gen3/lib/g3k_manifest" + +ctx="$(g3kubectl config current-context)" +ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" +create_workflow_resources=false +force=false +override_namespace=false + +for arg in "${@}"; do + if [ "$arg" == "--create-workflow-resources" ]; then + create_workflow_resources=true + elif [ "$arg" == "--force" ]; then + force=true + elif [ "$arg" == "--override-namespace" ]; then + override_namespace=true + else + #Print usage info and exit + gen3_log_info "Usage: gen3 kube-setup-argo-events [--create-workflow-resources] [--force] [--override-namespace]" + exit 1 + fi +done + +#Check if argo-events namespace exists, if not create it +if ! kubectl get namespace argo-events > /dev/null 2>&1; then + gen3_log_info "Creating argo-events namespace, as it was not found" + kubectl create namespace argo-events +fi + +if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then + if (! helm status argo -n argo-events > /dev/null 2>&1 ) || [[ "$force" == true ]]; then + helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) + helm repo update 2> >(grep -v 'This is insecure' >&2) + helm upgrade --install argo argo/argo-events -n argo-events --version "2.1.3" + else + gen3_log_info "argo-events Helm chart already installed. To force reinstall, run with --force" + fi + + if kubectl get statefulset eventbus-default-stan -n argo-events >/dev/null 2>&1; then + gen3_log_info "Detected eventbus installation. To reinstall, please delete the eventbus first. You will need to delete any EventSource and Sensors currently in use" + else + kubectl apply -f ${GEN3_HOME}/kube/services/argo-events/eventbus.yaml + fi +else + gen3_log_info "Not running in default namespace, will not install argo-events helm chart" +fi + +if [[ "$create_workflow_resources" == true ]]; then + for file in ${GEN3_HOME}/kube/services/argo-events/workflows/*.yaml; do + kubectl apply -f "$file" + done + + #Creating rolebindings to allow Argo Events to create jobs, and allow those jobs to manage Karpenter resources + kubectl create rolebinding argo-events-job-admin-binding --role=job-admin --serviceaccount=argo-events:default --namespace=argo-events + kubectl create clusterrolebinding karpenter-admin-binding --clusterrole=karpenter-admin --serviceaccount=argo-events:default +fi \ No newline at end of file diff --git a/kube/services/argo-events/eventbus.yaml b/kube/services/argo-events/eventbus.yaml new file mode 100644 index 000000000..a53e3bd9c --- /dev/null +++ b/kube/services/argo-events/eventbus.yaml @@ -0,0 +1,11 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventBus +metadata: + name: default +spec: + nats: + native: + # Optional, defaults to 3. If it is < 3, set it to 3, that is the minimal requirement. + replicas: 3 + # Optional, authen strategy, "none" or "token", defaults to "none" + auth: token \ No newline at end of file diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml new file mode 100644 index 000000000..d9ad3d413 --- /dev/null +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -0,0 +1,102 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: karpenter-templates + namespace: argo-events +data: + provisioner.yaml: | + apiVersion: karpenter.sh/v1alpha5 + kind: Provisioner + metadata: + name: workflow-$WORKFLOW_NAME + spec: + requirements: + - key: karpenter.sh/capacity-type + operator: In + values: ["on-demand"] + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: karpenter.k8s.aws/instance-category + operator: In + values: + - c + - t + taints: + - key: role + value: $WORKFLOW_NAME + effect: NoSchedule + labels: + role: $WORKFLOW_NAME + limits: + resources: + cpu: 1000 + providerRef: + name: workflow-$WORKFLOW_NAME + # Allow pods to be rearranged + consolidation: + enabled: true + # Kill nodes after 30 days to ensure they stay up to date + ttlSecondsUntilExpired: 2592000 + + nodetemplate.yaml: | + apiVersion: karpenter.k8s.aws/v1alpha1 + kind: AWSNodeTemplate + metadata: + name: workflow-$WORKFLOW_NAME + spec: + subnetSelector: + karpenter.sh/discovery: vhdcperf + securityGroupSelector: + karpenter.sh/discovery: vhdcperf-workflow + tags: + Environment: vhdcperf + Name: eks-vhdcperf-workflow-karpenter + karpenter.sh/discovery: vhdcperf + workflow-name: $WORKFLOW_NAME + gen3-username: $GEN3_USERNAME + metadataOptions: + httpEndpoint: enabled + httpProtocolIPv6: disabled + httpPutResponseHopLimit: 2 + httpTokens: optional + userData: | + MIME-Version: 1.0 + Content-Type: multipart/mixed; boundary="BOUNDARY" + + --BOUNDARY + Content-Type: text/x-shellscript; charset="us-ascii" + + #!/bin/bash -xe + instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' + curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + sysctl -w fs.inotify.max_user_watches=12000 + + sudo yum update -y + sudo yum install -y dracut-fips openssl >> /opt/fips-install.log + sudo dracut -f + # configure grub + sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" + + power_state: + delay: now + mode: reboot + message: Powering off + timeout: 2 + condition: true + + --BOUNDARY-- + blockDeviceMappings: + - deviceName: /dev/xvda + ebs: + volumeSize: 50Gi + volumeType: gp2 + encrypted: true + deleteOnTermination: true diff --git a/kube/services/argo-events/workflows/eventsource-completed.yaml b/kube/services/argo-events/workflows/eventsource-completed.yaml new file mode 100644 index 000000000..b3c7488fa --- /dev/null +++ b/kube/services/argo-events/workflows/eventsource-completed.yaml @@ -0,0 +1,20 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: argo-workflow-ended-source + namespace: argo-events +spec: + template: + serviceAccountName: default + resource: + workflow-ended: + namespace: argo + group: argoproj.io + version: v1alpha1 + resource: workflows + eventTypes: + - UPDATE + filter: + labels: + - key: workflows.argoproj.io/completed + value: "true" diff --git a/kube/services/argo-events/workflows/eventsource-created.yaml b/kube/services/argo-events/workflows/eventsource-created.yaml new file mode 100644 index 000000000..9abf78e19 --- /dev/null +++ b/kube/services/argo-events/workflows/eventsource-created.yaml @@ -0,0 +1,18 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: argo-workflow-created-source + namespace: argo-events +spec: + template: + serviceAccountName: default + resource: + workflow-created: + namespace: argo + group: argoproj.io + version: v1alpha1 + resource: workflows + eventTypes: + - ADD + filter: + afterStart: false diff --git a/kube/services/argo-events/workflows/eventsource-deleted.yaml b/kube/services/argo-events/workflows/eventsource-deleted.yaml new file mode 100644 index 000000000..54a00464e --- /dev/null +++ b/kube/services/argo-events/workflows/eventsource-deleted.yaml @@ -0,0 +1,16 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: argo-workflow-deleted-source + namespace: argo-events +spec: + template: + serviceAccountName: default + resource: + workflow-deleted: + namespace: argo + group: argoproj.io + version: v1alpha1 + resource: workflows + eventTypes: + - DELETE diff --git a/kube/services/argo-events/workflows/job-admin-role.yaml b/kube/services/argo-events/workflows/job-admin-role.yaml new file mode 100644 index 000000000..462652c97 --- /dev/null +++ b/kube/services/argo-events/workflows/job-admin-role.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: job-admin + namespace: argo-events +rules: + - apiGroups: + - batch + resources: + - jobs + verbs: + - '*' diff --git a/kube/services/argo-events/workflows/sensor-completed.yaml b/kube/services/argo-events/workflows/sensor-completed.yaml new file mode 100644 index 000000000..e92ad6918 --- /dev/null +++ b/kube/services/argo-events/workflows/sensor-completed.yaml @@ -0,0 +1,60 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: argo-workflow-ended-sensor + namespace: argo-events +spec: + template: + serviceAccountName: default + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: argo-workflow-ended + eventSourceName: argo-workflow-ended-source + eventName: workflow-ended + triggers: + - template: + name: log-event + log: + intervalSeconds: 10 + - template: + name: argo-workflow + k8s: + operation: create + parameters: + - src: + dependencyName: argo-workflow-ended + dataKey: body.metadata.name + dest: spec.template.spec.containers.0.env.0.value + source: + resource: + apiVersion: batch/v1 + kind: Job + metadata: + generateName: delete-karpenter-resources- + namespace: argo-events + labels: + workflow: "" + spec: + ttlSecondsAfterFinished: 900 + completions: 1 + parallelism: 1 + template: + spec: + restartPolicy: Never + containers: + - name: karpenter-resource-creator + image: quay.io/cdis/awshelper + command: ["/bin/sh"] + args: + - "-c" + - | + kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + kubectl delete provisioners workflow-$WORKFLOW_NAME + env: + - name: WORKFLOW_NAME + value: "" + backoffLimit: 0 + diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml new file mode 100644 index 000000000..27cbc5643 --- /dev/null +++ b/kube/services/argo-events/workflows/sensor-created.yaml @@ -0,0 +1,76 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: argo-workflow-created-sensor + namespace: argo-events +spec: + template: + serviceAccountName: default + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: workflow-created-event + eventSourceName: argo-workflow-created-source + eventName: workflow-created + triggers: + - template: + name: log-event + log: + intervalSeconds: 10 + - template: + name: argo-workflow + k8s: + operation: create + parameters: + - src: + dependencyName: workflow-created-event + dataKey: body.metadata.name + dest: spec.template.spec.containers.0.env.0.value + - src: + dependencyName: workflow-created-event + dataKey: body.metadata.name + dest: metadata.labels.workflow + - src: + dependencyName: workflow-created-event + dataKey: body.metadata.labels.gen3username + dest: spec.template.spec.containers.0.env.1.value + source: + resource: + apiVersion: batch/v1 + kind: Job + metadata: + generateName: create-karpenter-resources- + namespace: argo-events + labels: + workflow: "" + spec: + completions: 1 + ttlSecondsAfterFinished: 900 + parallelism: 1 + template: + spec: + restartPolicy: Never + containers: + - name: karpenter-resource-creator + image: quay.io/cdis/awshelper + command: ["/bin/sh"] + args: + - "-c" + - | + for file in /home/manifests/*.yaml; do envsubst < $file | kubectl apply -f -; done + env: + - name: WORKFLOW_NAME + value: "" + - name: GEN3_USERNAME + value: "" + volumeMounts: + - name: karpenter-templates-volume + mountPath: /home/manifests + volumes: + - name: karpenter-templates-volume + configMap: + name: karpenter-templates + backoffLimit: 0 + diff --git a/kube/services/argo-events/workflows/sensor-deleted.yaml b/kube/services/argo-events/workflows/sensor-deleted.yaml new file mode 100644 index 000000000..61e2235d7 --- /dev/null +++ b/kube/services/argo-events/workflows/sensor-deleted.yaml @@ -0,0 +1,56 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: argo-workflow-deleted-sensor + namespace: argo-events +spec: + template: + serviceAccountName: default + dependencies: + - name: argo-workflow-deleted + eventSourceName: argo-workflow-deleted-source + eventName: workflow-deleted + triggers: + - template: + name: log-event + log: + intervalSeconds: 10 + - template: + name: argo-workflow + k8s: + operation: create + parameters: + - src: + dependencyName: argo-workflow-deleted + dataKey: body.metadata.name + dest: spec.template.spec.containers.0.env.0.value + source: + resource: + apiVersion: batch/v1 + kind: Job + metadata: + generateName: delete-karpenter-resources- + namespace: argo-events + labels: + workflow: "" + spec: + ttlSecondsAfterFinished: 900 + completions: 1 + parallelism: 1 + template: + spec: + restartPolicy: Never + containers: + - name: karpenter-resource-creator + image: quay.io/cdis/awshelper + command: ["/bin/sh"] + args: + - "-c" + - | + kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + kubectl delete provisioners workflow-$WORKFLOW_NAME + env: + - name: WORKFLOW_NAME + value: "" + backoffLimit: 0 + From ec3d3b1a5ae419f5e7df8e00c76a0514afd87b40 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Fri, 21 Jul 2023 12:57:21 -0500 Subject: [PATCH 167/362] Feat/cohort middleware (#2293) * feat: add kube-setup-cohort-middleware script * feat: update with keeping secret in Gen3Secrets * feat: cleanup and Gen3Secrets --- gen3/bin/kube-setup-cohort-middleware.sh | 62 +++++++++++++++++++ .../cohort-middleware-deploy.yaml | 6 +- 2 files changed, 65 insertions(+), 3 deletions(-) create mode 100644 gen3/bin/kube-setup-cohort-middleware.sh diff --git a/gen3/bin/kube-setup-cohort-middleware.sh b/gen3/bin/kube-setup-cohort-middleware.sh new file mode 100644 index 000000000..91b414849 --- /dev/null +++ b/gen3/bin/kube-setup-cohort-middleware.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# Deploy cohort-middleware into existing commons + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +setup_secrets() { + gen3_log_info "Deploying secrets for cohort-middleware" + # subshell + + ( + if ! dbcreds="$(gen3 db creds ohdsi)"; then + gen3_log_err "unable to find db creds for ohdsi service (was Atlas deployed?)" + return 1 + fi + + mkdir -p $(gen3_secrets_folder)/g3auto/cohort-middleware + credsFile="$(gen3_secrets_folder)/g3auto/cohort-middleware/development.yaml" + + if [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]]; then + DB_NAME=$(jq -r ".db_database" <<< "$dbcreds") + export DB_NAME + DB_USER=$(jq -r ".db_username" <<< "$dbcreds") + export DB_USER + DB_PASS=$(jq -r ".db_password" <<< "$dbcreds") + export DB_PASS + DB_HOST=$(jq -r ".db_host" <<< "$dbcreds") + export DB_HOST + + cat - > "$credsFile" < Date: Fri, 21 Jul 2023 13:04:17 -0500 Subject: [PATCH 168/362] Orthanc w/ S3; OHIF Viewer v3 (#2276) * feat: Orthanc with S3 support * feat: back to Postgres storage for Orthanc * feat: separate Orthanc & DICOM Viewer deployment for S3-based storage * feat: proper name * feat: stop rewrite for ohif-viewer * feat: ok, this is needed * feat: new secrets for orthanc * feat: update for secret name * feat: adding extra env variables * feat: correct env variable * feat: extra plugins configuration * feat: add custom config for ohif-viewer * feat: change ohif-viewer nginx port to 8080 * fix: change readiness/liveness port to 8080 for ohif-viewer * fix: PUBLIC_URL env var for ohif-viewer * fix: need to check how it works * feat: enable s3 for orthanc * feat: make Orthanc use authz * fix: the configuration should have been for Orthanc * feat: committing the secret changes * fix: ohif-viewer authz * feat: treat /system for Orthanc separately * feat: use basic auth for public user in deployment --- gen3/bin/kube-setup-dicom.sh | 106 ++++++++++++++++++ .../ohif-viewer/ohif-viewer-deploy.yaml | 94 ++++++++++++++++ .../ohif-viewer/ohif-viewer-service.yaml | 16 +++ kube/services/orthanc/orthanc-deploy.yaml | 103 +++++++++++++++++ kube/services/orthanc/orthanc-service.yaml | 16 +++ .../gen3.nginx.conf/ohif-viewer-service.conf | 16 +++ .../gen3.nginx.conf/orthanc-service.conf | 26 +++++ 7 files changed, 377 insertions(+) create mode 100644 gen3/bin/kube-setup-dicom.sh create mode 100644 kube/services/ohif-viewer/ohif-viewer-deploy.yaml create mode 100644 kube/services/ohif-viewer/ohif-viewer-service.yaml create mode 100644 kube/services/orthanc/orthanc-deploy.yaml create mode 100644 kube/services/orthanc/orthanc-service.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf create mode 100644 kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf diff --git a/gen3/bin/kube-setup-dicom.sh b/gen3/bin/kube-setup-dicom.sh new file mode 100644 index 000000000..85114f33f --- /dev/null +++ b/gen3/bin/kube-setup-dicom.sh @@ -0,0 +1,106 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +hostname=$(gen3 api hostname) +export hostname +namespace=$(gen3 api namespace) +export namespace + +# Deploy the dicom-server service +setup_database_and_config() { + gen3_log_info "setting up dicom-server DB and config" + + if g3kubectl describe secret orthanc-s3-g3auto > /dev/null 2>&1; then + gen3_log_info "orthanc-s3-g3auto secret already configured" + return 0 + fi + if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + gen3_log_err "skipping db setup in non-adminvm environment" + return 0 + fi + + # Setup config files that dicom-server consumes + local secretsFolder + secretsFolder="$(gen3_secrets_folder)/g3auto/orthanc-s3" + if [[ ! -f "$secretsFolder/orthanc_config_overwrites.json" ]]; then + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + if ! gen3 db setup orthanc-s3; then + gen3_log_err "Failed setting up orthanc database for dicom-server" + return 1 + fi + fi + + ref_hostname="${hostname//\./-}" + bucketname="${ref_hostname}-orthanc-storage" + awsuser="${ref_hostname}-orthanc" + + if [[ ! -f "$secretsFolder/s3creds.json" ]]; then + gen3 s3 create "${bucketname}" + gen3 awsuser create "${awsuser}" + gen3 s3 attach-bucket-policy "${bucketname}" --read-write --user-name "${awsuser}" + + user=$(gen3 secrets decode "${awsuser}"-g3auto awsusercreds.json) + key_id=$(jq -r .id <<< "$user") + access_key=$(jq -r .secret <<< "$user") + + cat - > "$secretsFolder/s3creds.json" < "$secretsFolder/orthanc_config_overwrites.json" < Date: Fri, 21 Jul 2023 13:04:26 -0500 Subject: [PATCH 169/362] feat: increase proxy buffer size for headers (#2285) * feat: increase proxy buffer size for headers * feat: add fastcgi buffer size * feat: disable buffering * Update uwsgi.conf * Update uwsgi.conf * Update uwsgi.conf * Update uwsgi.conf * Update uwsgi.conf * Update uwsgi.conf * Update uwsgi.conf * Update uwsgi.conf --- Docker/python-nginx/python3.9-buster/uwsgi.conf | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/Docker/python-nginx/python3.9-buster/uwsgi.conf b/Docker/python-nginx/python3.9-buster/uwsgi.conf index 97c53335d..7bafdb48d 100644 --- a/Docker/python-nginx/python3.9-buster/uwsgi.conf +++ b/Docker/python-nginx/python3.9-buster/uwsgi.conf @@ -15,7 +15,19 @@ server { server { listen 80; - large_client_header_buffers 4 64k; + proxy_buffer_size 16k; + proxy_buffers 4 16k; + proxy_busy_buffers_size 32k; + + uwsgi_buffer_size 16k; + uwsgi_buffers 4 16k; + uwsgi_busy_buffers_size 32k; + + client_header_buffer_size 32k; + large_client_header_buffers 4 16k; + + proxy_buffering off; + uwsgi_buffering off; location / { uwsgi_param REMOTE_ADDR $http_x_forwarded_for if_not_empty; From 059b9e903b5f8ff9388e923dc54a40ae104af36a Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Fri, 21 Jul 2023 14:59:33 -0600 Subject: [PATCH 170/362] pinning the pyyaml version due to bug. (#2292) * pinning the pyyaml version due to bug * Update Docker/awshelper/Dockerfile Co-authored-by: burtonk <117617405+k-burt-uch@users.noreply.github.com> --------- Co-authored-by: burtonk <117617405+k-burt-uch@users.noreply.github.com> --- Docker/awshelper/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Docker/awshelper/Dockerfile b/Docker/awshelper/Dockerfile index 231870670..f3dd7b60e 100644 --- a/Docker/awshelper/Dockerfile +++ b/Docker/awshelper/Dockerfile @@ -38,6 +38,9 @@ RUN apt-get update && apt-get upgrade -y \ wget \ gettext-base +#can remove once https://github.com/yaml/pyyaml/issues/724 is solved +RUN pip install pyyaml==5.3.1 + RUN python3 -m pip install --upgrade pip \ && python3 -m pip install --upgrade setuptools \ && python3 -m pip install -U crcmod \ From bb6eb13fe59e62890ba0c55bb72d7a1c7c4c957c Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 21 Jul 2023 15:28:11 -0700 Subject: [PATCH 171/362] Add new new envs to CI pool (#2294) Adding jenkins-new-2, jenkins-new-3, jenkins-new-4 --- files/scripts/ci-env-pool-reset.sh | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index 1ceb0ccb5..362cfbfd5 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -30,13 +30,16 @@ cat - > jenkins-envs-services.txt < jenkins-envs-releases.txt < Date: Wed, 26 Jul 2023 14:40:30 -0700 Subject: [PATCH 172/362] chore/read gen3 license from g3auto secret (#2289) * chore/read gen3 license from g3auto secret --- .../jobs/distribute-licenses-job.yaml | 22 ++++++------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/kube/services/jobs/distribute-licenses-job.yaml b/kube/services/jobs/distribute-licenses-job.yaml index aef52c75c..1c2ad4284 100644 --- a/kube/services/jobs/distribute-licenses-job.yaml +++ b/kube/services/jobs/distribute-licenses-job.yaml @@ -48,10 +48,11 @@ spec: configMapKeyRef: name: manifest-hatchery key: "user-namespace" - - name: GEN3_LICENSE_SECRET_NAME - value: stata-workspace-gen3-license - - name: GEN3_LICENSE_KEY - value: licenseSecrets + - name: GEN3_STATA_LICENSE + valueFrom: + secretKeyRef: + name: stata-workspace-gen3-license-g3auto + key: "stata_license.txt" command: ["python"] args: - "-c" @@ -100,19 +101,10 @@ spec: used_licenses.sort() print(f"Licenses currently in use: {used_licenses}") - # The license keys should be stored in a kubernetes secret. + # The Gen3 Stata license strings should be stored in a kubernetes secret using g3auto. # The format of the secret is one license string per line. # The license strings are generated with 'stinit' using the information in a license PDF. - # The secret can be generated from a temporary file with a kubectl command, eg - # kubectl create secret generic GEN3_LICENSE_SECRET_NAME --from-file=GEN3_LICENSE_KEY=/path/to/file.lic - - # Get license from kubernetes secret - print("Ready to read secret") - secret_name = os.environ['GEN3_LICENSE_SECRET_NAME'] - secret_key = os.environ['GEN3_LICENSE_KEY'] - license_secrets = os.popen( - f"kubectl get secret {secret_name} --template={{{{.data.{secret_key}}}}} | base64 -d" - ).read() + license_secrets = os.environ['GEN3_STATA_LICENSE'] license_secrets = license_secrets.strip() licenses = license_secrets.split("\n") From bdafe3a998577b4adeb61a9be60110a7f6711d02 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Sun, 30 Jul 2023 09:43:44 -0700 Subject: [PATCH 173/362] Update jenkins to latest versions (#2297) * Update jenkins to latest versions * remove python-pip * remove duplicate install of python and awscli * fix * fix * fix * fix * build jenkins images for amd64 alone * fix * fix * fix * fix --- .../workflows/image_build_push_jenkins.yaml | 4 ++ .secrets.baseline | 10 ++--- Docker/jenkins/Jenkins-CI-Worker/Dockerfile | 39 +++++++------------ Docker/jenkins/Jenkins-Worker/Dockerfile | 31 +++++++-------- Docker/jenkins/Jenkins/Dockerfile | 29 ++++++-------- Docker/jenkins/Jenkins2/Dockerfile | 26 ++++++------- 6 files changed, 62 insertions(+), 77 deletions(-) diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml index ffea50ace..2d85aedf1 100644 --- a/.github/workflows/image_build_push_jenkins.yaml +++ b/.github/workflows/image_build_push_jenkins.yaml @@ -14,6 +14,7 @@ jobs: DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins" OVERRIDE_REPO_NAME: "jenkins" USE_QUAY_ONLY: true + BUILD_PLATFORMS: "linux/amd64" secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} @@ -27,6 +28,7 @@ jobs: DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins2" OVERRIDE_REPO_NAME: "jenkins2" USE_QUAY_ONLY: true + BUILD_PLATFORMS: "linux/amd64" secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} @@ -40,6 +42,7 @@ jobs: DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-CI-Worker" OVERRIDE_REPO_NAME: "gen3-ci-worker" USE_QUAY_ONLY: true + BUILD_PLATFORMS: "linux/amd64" secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} @@ -53,6 +56,7 @@ jobs: DOCKERFILE_BUILD_CONTEXT: "./Docker/jenkins/Jenkins-Worker" OVERRIDE_REPO_NAME: "gen3-qa-worker" USE_QUAY_ONLY: true + BUILD_PLATFORMS: "linux/amd64" secrets: ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} diff --git a/.secrets.baseline b/.secrets.baseline index 621c0a009..791bab52e 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-06-06T18:46:35Z", + "generated_at": "2023-07-26T18:54:08Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -78,7 +78,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 122, + "line_number": 113, "type": "Secret Keyword" } ], @@ -86,7 +86,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 136, + "line_number": 135, "type": "Secret Keyword" } ], @@ -94,7 +94,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 110, + "line_number": 105, "type": "Secret Keyword" } ], @@ -102,7 +102,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 110, + "line_number": 106, "type": "Secret Keyword" } ], diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index afb1fca9f..8c6c78325 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -1,12 +1,9 @@ -FROM jenkins/jnlp-slave:4.13.3-1-jdk11 +FROM jenkins/inbound-agent:jdk11 USER root ENV DEBIAN_FRONTEND=noninteractive -# install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget - RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ apt-transport-https \ @@ -16,7 +13,6 @@ RUN set -xe && apt-get update \ libffi-dev \ libssl-dev \ libghc-regex-pcre-dev \ - linux-headers-amd64 \ libcurl4-openssl-dev \ libncurses5-dev \ libncursesw5-dev \ @@ -27,12 +23,12 @@ RUN set -xe && apt-get update \ libbz2-dev \ libexpat1-dev \ liblzma-dev \ - python-virtualenv \ lua5.3 \ r-base \ software-properties-common \ sudo \ tk-dev \ + wget \ zlib1g-dev \ zsh \ ca-certificates-java \ @@ -58,30 +54,25 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ # # install docker tools: -# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1 -# * https://docs.docker.com/compose/install/#install-compose # -RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ - && add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" \ - && apt-get update \ - && apt-get install -y docker-ce \ - && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ - && chmod a+rx /usr/local/bin/docker-compose +RUN sudo install -m 0755 -d /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && sudo chmod a+r /etc/apt/keyrings/docker.gpg \ + && echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update && apt-get install -y docker-ce # install nodejs RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - RUN apt-get update && apt-get install -y nodejs -# add psql: https://www.postgresql.org/download/linux/debian/ -RUN DISTRO="$(lsb_release -c -s)" \ - && echo "deb http://apt.postgresql.org/pub/repos/apt/ ${DISTRO}-pgdg main" > /etc/apt/sources.list.d/pgdg.list \ - && wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ - && apt-get update \ - && apt-get install -y postgresql-client-13 libpq-dev \ - && rm -rf /var/lib/apt/lists/* +# Install postgres 13 client +RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc| gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \ + echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" | tee /etc/apt/sources.list.d/pgdg.list && \ + apt-get update && \ + apt-get install -y postgresql-client-13 # Copy sh script responsible for installing Python COPY install-python3.8.sh /root/tmp/install-python3.8.sh diff --git a/Docker/jenkins/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile index 7b1d460cc..61216733a 100644 --- a/Docker/jenkins/Jenkins-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-Worker/Dockerfile @@ -1,16 +1,9 @@ -FROM jenkins/jnlp-slave:4.13.3-1-jdk11 +FROM jenkins/inbound-agent:jdk11 USER root ENV DEBIAN_FRONTEND=noninteractive -# install python and pip and aws cli -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python-pip python3 python3-pip build-essential libgit2-dev zip unzip less vim gettext-base wget -RUN set -xe && python -m pip install awscli --upgrade && python -m pip install pytest --upgrade && python -m pip install PyYAML --upgrade && python -m pip install lxml --upgrade -RUN set -xe && python3 -m pip install pytest --upgrade && python3 -m pip install PyYAML --upgrade -RUN set -xe && python -m pip install yq --upgrade && python3 -m pip install yq --upgrade -RUN set -xe && python3 -m pip install pandas --upgrade - RUN apt-get update \ && apt-get install -y lsb-release \ apt-transport-https \ @@ -35,6 +28,7 @@ RUN apt-get update \ lua5.3 \ software-properties-common \ sudo \ + wget \ && ln -s /usr/bin/lua5.3 /usr/local/bin/lua # install Ruby. @@ -45,11 +39,17 @@ RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sou && apt-get update \ && apt-get -t=buster-backports -y install git=1:2.30.* -# install k6 to run load tests -RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69 \ - && echo "deb https://dl.k6.io/deb stable main" | tee /etc/apt/sources.list.d/k6.list \ - && apt-get update \ - && apt-get install k6 +# +# install docker tools: +# +RUN sudo install -m 0755 -d /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && sudo chmod a+r /etc/apt/keyrings/docker.gpg \ + && echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update && apt-get install -y docker-ce # install xk6-browser RUN cd /opt && wget --quiet https://github.com/grafana/xk6-browser/releases/download/v0.3.0/xk6-browser-v0.3.0-linux-amd64.tar.gz \ @@ -71,15 +71,13 @@ RUN wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && c # # install docker tools: -# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1 -# * https://docs.docker.com/compose/install/#install-compose # RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ && /usr/bin/add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian \ $(lsb_release -c -s) \ stable" \ && apt-get update \ - && apt-get install -y docker-ce \ + && apt-get install -y docker-ce-cli \ && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ && chmod a+rx /usr/local/bin/docker-compose @@ -118,6 +116,7 @@ RUN chmod +x /root/tmp/install-python3.9.sh; sync && \ bash /root/tmp/install-python3.9.sh && \ rm -rf /root/tmp/install-python3.9.sh && \ unlink /usr/bin/python3 && \ + ln -s /usr/local/bin/python3.9 /usr/bin/python && \ ln -s /usr/local/bin/python3.9 /usr/bin/python3 RUN env diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile index a872ee1dd..e6cf065db 100644 --- a/Docker/jenkins/Jenkins/Dockerfile +++ b/Docker/jenkins/Jenkins/Dockerfile @@ -1,12 +1,9 @@ -FROM jenkins/jenkins:2.375 +FROM jenkins/jenkins:2.415-jdk11 USER root ENV DEBIAN_FRONTEND=noninteractive -# install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget - RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ apt-transport-https \ @@ -30,6 +27,7 @@ RUN set -xe && apt-get update \ software-properties-common \ sudo \ tk-dev \ + wget \ zlib1g-dev \ zsh \ && ln -s /usr/bin/lua5.3 /usr/local/bin/lua @@ -45,18 +43,15 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ # # install docker tools: -# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1 -# * https://docs.docker.com/compose/install/#install-compose # -RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ - && add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" \ - && apt-get update \ - && apt-get install -y docker-ce \ - && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ - && chmod a+rx /usr/local/bin/docker-compose +RUN sudo install -m 0755 -d /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && sudo chmod a+r /etc/apt/keyrings/docker.gpg \ + && echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update && apt-get install -y docker-ce # install nodejs RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - @@ -81,8 +76,8 @@ RUN chmod +x /root/tmp/install-python3.8.sh; sync && \ ln -s /Python-3.8.0/python /usr/bin/python3 # Fix shebang for lsb_release -RUN sed -i 's/python3/python3.5/' /usr/bin/lsb_release && \ - sed -i 's/python3/python3.5/' /usr/bin/add-apt-repository +RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \ + sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. RUN set -xe && python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade && python3 -m pip install pytest --upgrade && python3 -m pip install poetry && python3 -m pip install PyYAML --upgrade && python3 -m pip install lxml --upgrade && python3 -m pip install yq --upgrade diff --git a/Docker/jenkins/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile index 59cb5672e..45f8fb373 100644 --- a/Docker/jenkins/Jenkins2/Dockerfile +++ b/Docker/jenkins/Jenkins2/Dockerfile @@ -1,12 +1,9 @@ -FROM jenkins/jenkins:2.375 +FROM jenkins/jenkins:2.415-jdk11 USER root ENV DEBIAN_FRONTEND=noninteractive -# install python -RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils python python-setuptools python-dev python3 python3-pip python3-venv build-essential zip unzip jq less vim gettext-base wget - RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ apt-transport-https \ @@ -30,6 +27,7 @@ RUN set -xe && apt-get update \ software-properties-common \ sudo \ tk-dev \ + wget \ zlib1g-dev \ zsh \ && ln -s /usr/bin/lua5.3 /usr/local/bin/lua @@ -45,18 +43,16 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ # # install docker tools: -# * https://docs.docker.com/install/linux/docker-ce/debian/#install-docker-ce-1 -# * https://docs.docker.com/compose/install/#install-compose # -RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - \ - && add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/debian \ - $(lsb_release -cs) \ - stable" \ - && apt-get update \ - && apt-get install -y docker-ce \ - && curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ - && chmod a+rx /usr/local/bin/docker-compose +RUN sudo install -m 0755 -d /etc/apt/keyrings \ + && curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg \ + && sudo chmod a+r /etc/apt/keyrings/docker.gpg \ + && echo \ + "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \ + "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \ + && apt-get update && apt-get install -y docker-ce + # install nodejs RUN curl -sL https://deb.nodesource.com/setup_18.x | bash - From f128dc892381cc403e5d40cb564259790ed984ad Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 31 Jul 2023 10:33:19 -0600 Subject: [PATCH 174/362] removing squid cache to see if that resolves self-signed certificate errors we have been seeing with our Java applications. (#2298) --- flavors/squid_auto/startup_configs/squid.conf | 1 - 1 file changed, 1 deletion(-) diff --git a/flavors/squid_auto/startup_configs/squid.conf b/flavors/squid_auto/startup_configs/squid.conf index 653026200..b1e44810a 100644 --- a/flavors/squid_auto/startup_configs/squid.conf +++ b/flavors/squid_auto/startup_configs/squid.conf @@ -56,7 +56,6 @@ http_access deny all persistent_request_timeout 5 seconds -cache_dir ufs /var/cache/squid 100 16 256 pid_filename /var/run/squid/squid.pid # vi:syntax=squid.conf From 31a5277d29f82c0974788aefaa4ba1520ea263c6 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Mon, 31 Jul 2023 10:14:22 -0700 Subject: [PATCH 175/362] Add missing jenkins dependencies (#2299) --- .secrets.baseline | 10 +++++----- Docker/jenkins/Jenkins-CI-Worker/Dockerfile | 2 ++ Docker/jenkins/Jenkins-Worker/Dockerfile | 2 ++ Docker/jenkins/Jenkins/Dockerfile | 2 ++ Docker/jenkins/Jenkins2/Dockerfile | 2 ++ 5 files changed, 13 insertions(+), 5 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 791bab52e..8e671afaa 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-07-26T18:54:08Z", + "generated_at": "2023-07-31T16:54:24Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -78,7 +78,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 113, + "line_number": 115, "type": "Secret Keyword" } ], @@ -86,7 +86,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 135, + "line_number": 137, "type": "Secret Keyword" } ], @@ -94,7 +94,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 105, + "line_number": 107, "type": "Secret Keyword" } ], @@ -102,7 +102,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 106, + "line_number": 108, "type": "Secret Keyword" } ], diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index 8c6c78325..671cd2e02 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -4,6 +4,8 @@ USER root ENV DEBIAN_FRONTEND=noninteractive +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base + RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ apt-transport-https \ diff --git a/Docker/jenkins/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile index 61216733a..088186b04 100644 --- a/Docker/jenkins/Jenkins-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-Worker/Dockerfile @@ -4,6 +4,8 @@ USER root ENV DEBIAN_FRONTEND=noninteractive +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base + RUN apt-get update \ && apt-get install -y lsb-release \ apt-transport-https \ diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile index e6cf065db..ae39ac574 100644 --- a/Docker/jenkins/Jenkins/Dockerfile +++ b/Docker/jenkins/Jenkins/Dockerfile @@ -4,6 +4,8 @@ USER root ENV DEBIAN_FRONTEND=noninteractive +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base + RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ apt-transport-https \ diff --git a/Docker/jenkins/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile index 45f8fb373..9976a07c2 100644 --- a/Docker/jenkins/Jenkins2/Dockerfile +++ b/Docker/jenkins/Jenkins2/Dockerfile @@ -4,6 +4,8 @@ USER root ENV DEBIAN_FRONTEND=noninteractive +RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-essential zip unzip jq less vim gettext-base + RUN set -xe && apt-get update \ && apt-get install -y lsb-release \ apt-transport-https \ From 5c0866a3a70f97d583c529e2aa656abb21d3adf6 Mon Sep 17 00:00:00 2001 From: burtonk <117617405+k-burt-uch@users.noreply.github.com> Date: Mon, 31 Jul 2023 12:15:26 -0500 Subject: [PATCH 176/362] Update squid_authorized_keys_user (#2291) Adding Kyle Burton's public key. --- files/authorized_keys/squid_authorized_keys_user | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/authorized_keys/squid_authorized_keys_user b/files/authorized_keys/squid_authorized_keys_user index 46b43a030..4b35fecd9 100644 --- a/files/authorized_keys/squid_authorized_keys_user +++ b/files/authorized_keys/squid_authorized_keys_user @@ -18,4 +18,5 @@ ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKJR5N5VIU9qdSfCtlskzuQ7A5kNn8YPeXsoKq0HhY ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC3vyd6a7tsANi149ylPQYS8Gsp/SxJyhdK/j6arv77KbM0EIzzUiclFLnMKcqUQ263FrPyx3a3UP80R77ayCnwcEHrxlJrYfyFUva8vtmI9mu8VE7oXvuR/jcOyXM9NosxyYacL/p6W5X4r8tqo/gJFjmls1YRfu3JPlTgTT0VzGJu+B6rLEsw53c37VVzSaCtu/jBOjyxI1/UaNg1cd+hcfoQxJ9zSDqqE7ZUNOc3zHP+1AGYCQ/CJsNrDl2OkppIdC9He5jgjLhyD7yvyarI+oF05oHknol/K1hXK+yxIkF2Ou5krfjw7TMBvD+JbQVb35vL9acXFF20+lHLRLbobPU/6ZZTup3q7IRm5OWaL2CJtYZbJvicKW0Ep+vTzaiQjK71L6UxcIvnzvbP9Dnatv1GBMMDaQxAa4Lood8NG2ty1yfLN972akGqBlwJASXMRd/ogzxv2KSH9w6HHYoc2WpDhUtNHmjwX1FSLYPW3qx5ICMW6j9gR2u1tG4Ohzp1CmYVElnRHbnBrTkLde65Vqedk2tQy8fcopH59ZASIuR4GbhCb2SiNkr1PHEvfhLMzg/UCSnnhX9vUNhkPjJRy/bdL3pOt/77lpIQUqQBArOiZmfG8OD0q4+3Nr+c9v5bSSvynjirlKk+wb8sKyOoSAXdFeovL/A0BUKUjCtsXQ== dev@test.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQChK/8JjpUeWcF/1Ea2M4mSbLz1tOfpq74xD2USxE54kx7VoN1G7ylV76yqSIeRq1e7PPBEg5ZD1aXUJnlI32RwLJ5kaHnoB82Ta+Fv1B/vVoHCObcALfiHPpwPf1kM2liWEB0EhYcz1OUv3YQriPqjiRoWfnbw60GIyzhpWZhKRq0zlISOaTYdV9kafX+N7M6/gSU0632TgUwwsStYrffEleyrC/Lh+4UaESozWoPFiZLl2eMCKfZNFBB99HTFifImW2yC6Ag1QhCd1i3NpfiYuaSDH7WR3slPRSd8DiUAwGC2DkIuWPp3bhaAv2V4mtLIBAaTZsINIACB2+w7yf9yvCGtdobCmp4AA7ik9rEkRLk/Jff0YBHd6Z4qyIuRht3ZeWXIYSK1zOlPfs4lPUgvbjlPgMVFV2CrvOTnS+YZdW+8AklwRC3HDPD8wv3H/eGxl3K0vHWTBbTb774nVNfRDw81wcezCXFNUn4p2he7fgKcxs/rnMsYUcY8JJNR7Iz+NNIGUCom6HFwCMQdangFMHUW5TxxrlJcwVRaAns1M6g3ilYO+uvN/XsgCpZWYWnv5rBk8qz6dBM7gpc8tSr6Hvr7/vlghF3jpL+mQiW+7vUL+UZrUFNyoacUcQ+NuxKacHtHQKuRDyWofp+CB2b2a744F3mpkxx74HIkiZ72mQ== dev@test.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDDTX+pQvGrQVXmHGDhBP+632tgbb1j+BQWkrsUkDJGzwFiGs4dgqDs2eC+aDVq2LFz4xj0SgussFAKciB45OgmSZKX5yUE3Oo/lqov0Bb5f85iBHGv/X/JiuIYaq8GJklVyyo1sfKLUK1SOal6bE1WofezyTyDsdrHjIU50quzW7nB1CmL6rekIv/+df/seut4b3De1d2uX5WGGtcvQ5yTSgBW5aabMAJ2V9WlP/6Dw040Kq0MyKV01cIJ1HAjFhP58gbf3Eytz3AqqJVT6u0QroxhesCgKTyGcAyYy3airI/N0FHdC5oABVEJ6dKyy1rYvOchuxYeVMVVWn0vS7mZ+vP7dqaDmgEUU2qmTPBQZV2xBWCdpfyUYYARW2JzlEaySbmA+yoxFBsquunVbIgUGNEUbxefsFdM3k5pS6I1uuEM0ATYH5iNz84nKKCcksGlib0i/pEtra6N/mFF7yjHYBRb/E/VCZig0gKezDJWu/DO0emJA+kdQpqp48U+qFrSWkuiO0dCQYl3VCVo8vedgMGPjr8MbUjU7o8W1+DYyjFM8HYMknRNdVAqAoK+cedw9mAWVGpKFrl61caGTFck0634nAVFUmfGTh9XRaZeFdDnivxnqP837gcsdKnEGYnkrxWap97XeXzK0P0Svy1zBfUQyzU5vrHfHt2H7ILDMw== prodv1-usersync-sftp -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaO/doqHANcTZFEqZOoy9kKgbxu0d/cS1nEINlFcoQ/jnCG7huznWnWiYgnlkS6/Op9VrDp6qG/UBDye2mTvAh2FHPsOzSGvgml3dPYB5fy6G/xoXd7NJnIxttwFUvk4GuLZ40s24WCcXoFGJ2vaSAVYr0q6lmqOqk6jp1/lNj4+QFD4mcH2//jTscSFNseRII2NECu+PnnWAuYFOIHH1IODOvInEivUvN6VBX410D7iD7cEdhgiYitFZH6Cp6ubWG7OUKdZYv0067eO6HDDzl7y+BBUf3DF6Lr8gqtGXVqmAB9UqeBJ8pP3pNWKbgAa8sHvS8JxElCIc+4EM5dTI2OrDYKiuCTPZEC14WEFZLKqH7tjQFuZe0jfVRtoFNmKWClCgkJDWpyIkdR+qHcnOwlYkUVN3B02WVu4kTfox2ZUz65tLspJNAxAjYVrI7+c6LTQHSJwMcAMYcehR3vuqAfKE7xM6ReNxRQXsWaasdJgT2IJKj7vHu/G9GVycjiheg3zakJ9rr+63I68XlHNnTtfjIl/jgIHgcU18ggbwkwjL3xk39YttutlAaNAGUYCsopn/HdK8A86KvTCwHGEKtubgEHmv1oRAOooVaNes1oko2y9Saaqee52bsvwfeTLgxXB43d9GOWLoyBlgprDiufssFHoiJKQlgrqEwtg+vYQ== giangbui0816@gmail.com \ No newline at end of file +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDaO/doqHANcTZFEqZOoy9kKgbxu0d/cS1nEINlFcoQ/jnCG7huznWnWiYgnlkS6/Op9VrDp6qG/UBDye2mTvAh2FHPsOzSGvgml3dPYB5fy6G/xoXd7NJnIxttwFUvk4GuLZ40s24WCcXoFGJ2vaSAVYr0q6lmqOqk6jp1/lNj4+QFD4mcH2//jTscSFNseRII2NECu+PnnWAuYFOIHH1IODOvInEivUvN6VBX410D7iD7cEdhgiYitFZH6Cp6ubWG7OUKdZYv0067eO6HDDzl7y+BBUf3DF6Lr8gqtGXVqmAB9UqeBJ8pP3pNWKbgAa8sHvS8JxElCIc+4EM5dTI2OrDYKiuCTPZEC14WEFZLKqH7tjQFuZe0jfVRtoFNmKWClCgkJDWpyIkdR+qHcnOwlYkUVN3B02WVu4kTfox2ZUz65tLspJNAxAjYVrI7+c6LTQHSJwMcAMYcehR3vuqAfKE7xM6ReNxRQXsWaasdJgT2IJKj7vHu/G9GVycjiheg3zakJ9rr+63I68XlHNnTtfjIl/jgIHgcU18ggbwkwjL3xk39YttutlAaNAGUYCsopn/HdK8A86KvTCwHGEKtubgEHmv1oRAOooVaNes1oko2y9Saaqee52bsvwfeTLgxXB43d9GOWLoyBlgprDiufssFHoiJKQlgrqEwtg+vYQ== giangbui0816@gmail.com +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDTpJ2l8nfOvhJ4Y3pjadFU69nfJBRuE0BaHE22LK9qflFWdhGW+T/x8Qy9406DFXCh6KED+q9lC+N4nR92AfgFNaBmkXZkzWLoXcqO1IWRexHwTqAUcrtLjpx5wNdCr3+vv9hWhXtvYg8ewnrZc+WxYde4EUmXbhzPXbg0SkBXTr6bpYhs6inyttfBeJNxbeydrW7cmhFiAdOkm03o3AXdH86PNlWVfVHy8OHHzf4fbvlJlOx7OeB+wOyQUr3DW+IWBLQFJk4uyagn/ECV9OIQpxoJFTQjcSrJ6v/GqlY5PImM6YxL8NlZu46CDIxagaZkum+iJ8dtPYr6tJuLiP5Ny0Gsl1X5DoKlstgyqqPNYTnZVS4GSS5Hyxm6HmodZ78OR5+vAoyWKZ3unXU5Dbkz0Qxq9VtrGo2xd0M+dDi/7YazRpLL0tc39w48Wl7KD3jFzoesZp1JHeEGLdGXlGCw8AM1FT0WDf28ShTRds6uWPGvMtM3XkVDPMLFwroKv1RCErmqLYod4HOMuwlmdRvtDGYb3NYsliOnHPiT9nhu2J6KmT1jj8uFOLyTaJCArtBqIsXscP3R4o0wBlQl3FniMdiK7ESkv8DUaOr1Co+/3wX9n/p/BW5bxuq1R9HpNyKsrALyNJUkquVT+5aPcNKXvmAeHAw/D0TYzy6ZKBpnDw== kyuleburton@Kyules-MacBook-Pro.local From c89994e4d7dee99126ef14cdf6347fd303130ee7 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 31 Jul 2023 14:24:04 -0400 Subject: [PATCH 177/362] Feat/update argo events config (#2300) * Matching workflow node tags to our current setup * Fixed a typo in the previous commit --- kube/services/argo-events/workflows/configmap.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index d9ad3d413..9846ba8b6 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -54,8 +54,9 @@ data: Environment: vhdcperf Name: eks-vhdcperf-workflow-karpenter karpenter.sh/discovery: vhdcperf - workflow-name: $WORKFLOW_NAME - gen3-username: $GEN3_USERNAME + workflowname: $WORKFLOW_NAME + gen3username: $GEN3_USERNAME + gen3service: argo-workflows metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled From 02ab503ee6fcaae30f20c67a7b7d27c6e9aa22e3 Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Mon, 31 Jul 2023 16:20:09 -0500 Subject: [PATCH 178/362] HP-1083 Feat/cedar mds update (#2268) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * updating cedar ingest for new MDS format for HEAL * fix for upper level metadata * change where appl_id comes from in cedar ingestion * testing alt appl_id * fix appl id again * update appl_id check * add debug statement for cedar ingestion * remove debug statements and finalize * updateing cedar records in ingestion job * test (#2281) * test * update * test * test * update * test * fix * fix * test * fix * update * update * final * update * test * revert test * fix * test * debug * revert debug * fix: dups * Created a setup script to install argo events, as well as any resourc… (#2287) * Created a setup script to install argo events, as well as any resources used by it. For now, all the resources are for Argo Workflows * Moving from using the public eventbus demo to a local copy, so that updates to the documentation don't break our setup * back to master --------- Co-authored-by: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Co-authored-by: Mingfei Shao Co-authored-by: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> --- .../healdata/heal-cedar-data-ingest.py | 106 +++++++++++++----- 1 file changed, 77 insertions(+), 29 deletions(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 1235c6f58..c4d68199a 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -1,15 +1,17 @@ import argparse +import json import sys import requests import pydash +from uuid import UUID # Defines how a field in metadata is going to be mapped into a key in filters FILTER_FIELD_MAPPINGS = { - "Study Type.study_stage": "Study Type", - "Data.data_type": "Data Type", - "Study Type.study_subject_type": "Subject Type", - "Human Subject Applicability.gender_applicability": "Gender", - "Human Subject Applicability.age_applicability": "Age" + "study_metadata.study_type.study_stage": "Study Type", + "study_metadata.data.data_type": "Data Type", + "study_metadata.study_type.study_subject_type": "Subject Type", + "study_metadata.human_subject_applicability.gender_applicability": "Gender", + "study_metadata.human_subject_applicability.age_applicability": "Age" } # Defines how to handle special cases for values in filters @@ -31,9 +33,30 @@ # Defines field that we don't want to include in the filters OMITTED_VALUES_MAPPING = { - "Human Subject Applicability.gender_applicability": "Not applicable" + "study_metadata.human_subject_applicability.gender_applicability": "Not applicable" } +def is_valid_uuid(uuid_to_test, version=4): + """ + Check if uuid_to_test is a valid UUID. + + Parameters + ---------- + uuid_to_test : str + version : {1, 2, 3, 4} + + Returns + ------- + `True` if uuid_to_test is a valid UUID, otherwise `False`. + + """ + + try: + uuid_obj = UUID(uuid_to_test, version=version) + except ValueError: + return False + return str(uuid_obj) == uuid_to_test + def update_filter_metadata(metadata_to_update): filter_metadata = [] for metadata_field_key, filter_field_key in FILTER_FIELD_MAPPINGS.items(): @@ -82,9 +105,13 @@ def update_filter_metadata(metadata_to_update): limit = 10 offset = 0 -# initalize this to be bigger than our inital call so we can go through while loop +# initialize this to be bigger than our initial call so we can go through while loop total = 100 +if not is_valid_uuid(dir_id): + print("Directory ID is not in UUID format!") + sys.exit(1) + while((limit + offset <= total)): # Get the metadata from cedar to register print("Querying CEDAR...") @@ -101,60 +128,81 @@ def update_filter_metadata(metadata_to_update): returned_records = len(metadata_return["metadata"]["records"]) print(f"Successfully got {returned_records} record(s) from CEDAR directory") for cedar_record in metadata_return["metadata"]["records"]: - if "appl_id" not in cedar_record: + # get the appl id from cedar for querying in our MDS + cedar_appl_id = pydash.get(cedar_record, "metadata_location.nih_application_id") + if cedar_appl_id is None: print("This record doesn't have appl_id, skipping...") continue - # get the appl id from cedar for querying in our MDS - cedar_appl_id = str(cedar_record["appl_id"]) - # Get the metadata record for the nih_application_id - mds = requests.get(f"http://revproxy-service/mds/metadata?gen3_discovery.appl_id={cedar_appl_id}&data=true") + mds = requests.get(f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.nih_application_id={cedar_appl_id}&data=true") if mds.status_code == 200: mds_res = mds.json() # the query result key is the record of the metadata. If it doesn't return anything then our query failed. if len(list(mds_res.keys())) == 0 or len(list(mds_res.keys())) > 1: - print("Query returned nothing for ", cedar_appl_id, "appl id") + print("Query returned nothing for", cedar_appl_id, "appl id") continue # get the key for our mds record - cedar_record_id = list(mds_res.keys())[0] + mds_record_guid = list(mds_res.keys())[0] - mds_res = mds_res[cedar_record_id] - mds_cedar_register_data_body = {} + mds_res = mds_res[mds_record_guid] + mds_cedar_register_data_body = {**mds_res} mds_discovery_data_body = {} + mds_clinical_trials = {} if mds_res["_guid_type"] == "discovery_metadata": print("Metadata is already registered. Updating MDS record") elif mds_res["_guid_type"] == "unregistered_discovery_metadata": - print("Metadata is has not been registered. Registering it in MDS record") - continue + print("Metadata has not been registered. Registering it in MDS record") + + if "clinicaltrials_gov" in cedar_record: + mds_clinical_trials = cedar_record["clinicaltrials_gov"] + del cedar_record["clinicaltrials_gov"] + + # some special handing for this field, because its parent will be deleted before we merging the CEDAR and MDS SLMD to avoid duplicated values + cedar_record_other_study_websites = cedar_record.get("metadata_location", {}).get("other_study_websites", []) + del cedar_record["metadata_location"] + + mds_res["gen3_discovery"]["study_metadata"].update(cedar_record) + mds_res["gen3_discovery"]["study_metadata"]["metadata_location"]["other_study_websites"] = cedar_record_other_study_websites + + # merge data from cedar that is not study level metadata into a level higher + deleted_keys = [] + for key, value in mds_res["gen3_discovery"]["study_metadata"].items(): + if not isinstance(value, dict): + mds_res["gen3_discovery"][key] = value + deleted_keys.append(key) + for key in deleted_keys: + del mds_res["gen3_discovery"]["study_metadata"][key] + + mds_discovery_data_body = update_filter_metadata(mds_res["gen3_discovery"]) - pydash.merge(mds_discovery_data_body, mds_res["gen3_discovery"], cedar_record) - mds_discovery_data_body = update_filter_metadata(mds_discovery_data_body) mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body + if mds_clinical_trials: + mds_cedar_register_data_body["clinicaltrials_gov"] = {**mds_cedar_register_data_body.get("clinicaltrials_gov", {}), **mds_clinical_trials} + mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" - print("Metadata is now being registered.") - mds_put = requests.put(f"http://revproxy-service/mds/metadata/{cedar_record_id}", + print(f"Metadata {mds_record_guid} is now being registered.") + mds_put = requests.put(f"http://revproxy-service/mds/metadata/{mds_record_guid}", headers=token_header, json = mds_cedar_register_data_body ) if mds_put.status_code == 200: - print(f"Successfully registered: {cedar_record_id}") + print(f"Successfully registered: {mds_record_guid}") else: - print(f"Failed to register: {cedar_record_id}. Might not be MDS admin") + print(f"Failed to register: {mds_record_guid}. Might not be MDS admin") print(f"Status from MDS: {mds_put.status_code}") else: print(f"Failed to get information from MDS: {mds.status_code}") + + else: + print(f"Failed to get information from CEDAR wrapper service: {cedar.status_code}") if offset + limit == total: break offset = offset + limit if (offset + limit) > total: - limit = (offset + limit) - total - - -else: - print(f"Failed to get information from CEDAR wrapper service: {cedar.status_code}") + limit = total - offset From c4d01f0d31235b0d37d7d039b50607270631e522 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 3 Aug 2023 12:34:06 -0500 Subject: [PATCH 179/362] Changes to OHDSI Atlas deployment to use upstream Docker images (#2301) * feat(ohdsi): init container for a config-local.js file * feat(ohdsi): different mount-point --- kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml index bf128920e..62265503e 100644 --- a/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml +++ b/kube/services/ohdsi-atlas/ohdsi-atlas-deploy.yaml @@ -72,7 +72,7 @@ spec: volumeMounts: - name: ohdsi-atlas-config-local readOnly: true - mountPath: /usr/share/nginx/html/atlas/js/config-local.js + mountPath: /etc/atlas/config-local.js subPath: config-local.js imagePullPolicy: Always resources: @@ -80,4 +80,4 @@ spec: cpu: 100m memory: 100Mi limits: - memory: 500Mi + memory: 500Mi From 92f8f3e207c062256c698e30374be629ee4a614a Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 7 Aug 2023 12:08:18 -0400 Subject: [PATCH 180/362] Covering a few extra cases for preventing clickjacking attacks. (#2309) --- .../gen3ff-as-root/frontend-framework-service.conf | 4 ++++ .../gen3.nginx.conf/gen3ff-as-root/portal-service.conf | 3 +++ .../portal-as-root/frontend-framework-service.conf | 4 ++++ 3 files changed, 11 insertions(+) diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf index ac2cb75f6..37e7623de 100644 --- a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/frontend-framework-service.conf @@ -2,6 +2,10 @@ if ($csrf_check !~ ^ok-\S.+$) { return 403 "failed csrf check"; } + + # added to avoid click-jacking attacks + add_header X-Frame-Options "SAMEORIGIN"; + set $proxy_service "frontend-framework"; set $upstream http://frontend-framework-service.$namespace.svc.cluster.local; proxy_pass $upstream; diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf index 58f0851d6..75d69c185 100644 --- a/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/gen3ff-as-root/portal-service.conf @@ -21,5 +21,8 @@ rewrite ^/(.*)$ /dashboard/Public/maintenance-page/index.html redirect; } + # added to avoid click-jacking attacks + add_header X-Frame-Options "SAMEORIGIN"; + proxy_pass $upstream; } diff --git a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf index dbb24e4b2..f3686d1a6 100644 --- a/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/portal-as-root/frontend-framework-service.conf @@ -6,6 +6,10 @@ if ($csrf_check !~ ^ok-\S.+$) { return 403 "failed csrf check"; } + + # added to avoid click-jacking attacks + add_header X-Frame-Options "SAMEORIGIN"; + set $proxy_service "frontend-framework"; # frontend framework service expects the /ff/ prefix, so no path rewrite set $upstream http://frontend-framework-service.$namespace.svc.cluster.local; From 62d2d8908da57f725894c16b19a45787e924f67c Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 8 Aug 2023 22:13:03 -0500 Subject: [PATCH 181/362] Update kube-roll-all.sh (#2302) * Update kube-roll-all.sh * feat: do not deploy cohort-middleware if Atlas is not deployed --------- Co-authored-by: Hara Prasad --- gen3/bin/kube-roll-all.sh | 6 ++++++ gen3/bin/kube-setup-cohort-middleware.sh | 14 +++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 552c27708..0cf7df2c3 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -243,6 +243,12 @@ else gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'" fi +if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then + gen3 kube-setup-cohort-middleware +else + gen3_log_info "not deploying cohort-middleware - no manifest entry for .versions[\"cohort-middleware\"]" +fi + gen3 kube-setup-revproxy if [[ "$GEN3_ROLL_FAST" != "true" ]]; then diff --git a/gen3/bin/kube-setup-cohort-middleware.sh b/gen3/bin/kube-setup-cohort-middleware.sh index 91b414849..76096469c 100644 --- a/gen3/bin/kube-setup-cohort-middleware.sh +++ b/gen3/bin/kube-setup-cohort-middleware.sh @@ -46,17 +46,17 @@ EOM fi gen3 secrets sync "initialize cohort-middleware/development.yaml" - - # envsubst <"${GEN3_HOME}/kube/services/cohort-middleware/development.yaml" | g3kubectl create secret generic cohort-middleware-config --from-file=development.yaml=/dev/stdin ) } # main -------------------------------------- -setup_secrets - -gen3 roll cohort-middleware -g3kubectl apply -f "${GEN3_HOME}/kube/services/cohort-middleware/cohort-middleware-service.yaml" -cat < Date: Thu, 10 Aug 2023 14:34:09 -0400 Subject: [PATCH 182/362] install go binary in jenkins CI worker (#2304) * install go binary in jenkins CI worker * install go * download go tar --------- Co-authored-by: Hara Prasad --- .secrets.baseline | 4 ++-- Docker/jenkins/Jenkins-CI-Worker/Dockerfile | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 8e671afaa..527ffcc30 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-07-31T16:54:24Z", + "generated_at": "2023-08-10T17:51:06Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -78,7 +78,7 @@ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_verified": false, - "line_number": 115, + "line_number": 122, "type": "Secret Keyword" } ], diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index 671cd2e02..40fd08fa3 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -54,6 +54,13 @@ RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" \ google-cloud-sdk-cbt \ kubectl +# install go - https://go.dev/doc/install +RUN wget https://go.dev/dl/go1.21.0.linux-amd64.tar.gz \ + && rm -rf /usr/local/go \ + && tar -C /usr/local -xzf go1.21.0.linux-amd64.tar.gz +ENV PATH="$PATH:/usr/local/go/bin" +RUN go version + # # install docker tools: # From bc3cb2c0552af0d1ed84a02275df3cb1a424c548 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Fri, 11 Aug 2023 10:08:36 -0500 Subject: [PATCH 183/362] update squid web whitelist (#2317) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index c36194765..fef94f059 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -123,6 +123,7 @@ orcid.org pgp.mit.edu ppa.launchpad.net prometheus-community.github.io +proxy.golang.org public.ecr.aws pubmirrors.dal.corespace.com reflector.westga.edu From b7868abbc30b17a5e0c6f134e455549d6e511097 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 14 Aug 2023 12:02:10 -0500 Subject: [PATCH 184/362] chore(wts-script): Updated kube-setup script to include new field (#2318) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-wts.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/gen3/bin/kube-setup-wts.sh b/gen3/bin/kube-setup-wts.sh index b807da2d5..ad8211d03 100644 --- a/gen3/bin/kube-setup-wts.sh +++ b/gen3/bin/kube-setup-wts.sh @@ -42,6 +42,8 @@ new_client() { "oidc_client_id": "$client_id", "oidc_client_secret": "$client_secret", + "aggregate_endpoint_allowlist": ["/authz/mapping"], + "external_oidc": [] } EOM From bfdd1c218e0f9be254e3ca0769550bc2cddd77aa Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 15 Aug 2023 16:18:02 -0500 Subject: [PATCH 185/362] Update values.yaml (#2320) --- kube/services/argo/values.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index e8db62711..c603cf1b2 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -5,6 +5,12 @@ controller: enabled: true servicePort: 9090 + resources: + requests: + memory: 8Gi + limits: + memory: 8Gi + podAnnotations: prometheus.io/scrape: "true" prometheus.io/path: /metrics @@ -76,6 +82,11 @@ server: extraEnv: - name: ARGO_HTTP1 value: "true" + resources: + requests: + memory: 8Gi + limits: + memory: 8Gi # -- Influences the creation of the ConfigMap for the workflow-controller itself. useDefaultArtifactRepo: true From ece60d4b40700fd03b432ffffe625ffb3f264908 Mon Sep 17 00:00:00 2001 From: vzpgb <45467497+vzpgb@users.noreply.github.com> Date: Thu, 17 Aug 2023 09:38:05 -0500 Subject: [PATCH 186/362] Increase argo worflow parallelism to 5 (#2321) --- kube/services/argo/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index c603cf1b2..67fa05a09 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,5 +1,5 @@ controller: - parallelism: 3 + parallelism: 5 metricsConfig: # -- Enables prometheus metrics server enabled: true From 1cba18ce0e2fbb08f1132422d1e78ea1bcaf8459 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Thu, 17 Aug 2023 09:40:26 -0500 Subject: [PATCH 187/362] Update configmap.yaml (#2316) Co-authored-by: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> --- kube/services/argo-events/workflows/configmap.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index 9846ba8b6..c88ce9fd0 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -75,6 +75,8 @@ data: aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + # set registryPullQPS + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 sudo yum update -y From 489f7a55996ab172057230647c5d8661363144f6 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 17 Aug 2023 12:17:41 -0600 Subject: [PATCH 188/362] removing the ambassador elb yalm file and removing elb from the kube-setup-ambassdor script (#2322) --- gen3/bin/kube-setup-ambassador.sh | 1 - .../ambassador-gen3-service-elb.yaml | 22 ------------------- 2 files changed, 23 deletions(-) delete mode 100644 kube/services/ambassador-gen3/ambassador-gen3-service-elb.yaml diff --git a/gen3/bin/kube-setup-ambassador.sh b/gen3/bin/kube-setup-ambassador.sh index 0f4e0be28..5f92af5cc 100644 --- a/gen3/bin/kube-setup-ambassador.sh +++ b/gen3/bin/kube-setup-ambassador.sh @@ -25,7 +25,6 @@ deploy_api_gateway() { return 0 fi gen3 roll ambassador-gen3 - g3k_kv_filter "${GEN3_HOME}/kube/services/ambassador-gen3/ambassador-gen3-service-elb.yaml" GEN3_ARN "$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}')" | g3kubectl apply -f - local luaYamlTemp="$(mktemp "$XDG_RUNTIME_DIR/lua.yaml.XXXXXX")" cat - > "$luaYamlTemp" < Date: Fri, 18 Aug 2023 13:43:44 -0500 Subject: [PATCH 189/362] Revert kubelet config change (#2323) --- kube/services/argo-events/workflows/configmap.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index c88ce9fd0..9846ba8b6 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -75,8 +75,6 @@ data: aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - # set registryPullQPS - echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 sudo yum update -y From 4eb49e23312eb30438b08d8a8c226d0559f0ccfa Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 23 Aug 2023 08:36:28 -0700 Subject: [PATCH 190/362] Add k6 to jenkins-worker (#2325) --- Docker/jenkins/Jenkins-Worker/Dockerfile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Docker/jenkins/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile index 088186b04..c31e54923 100644 --- a/Docker/jenkins/Jenkins-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-Worker/Dockerfile @@ -53,6 +53,12 @@ RUN sudo install -m 0755 -d /etc/apt/keyrings \ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null \ && apt-get update && apt-get install -y docker-ce +# install k6 to run load tests +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69 \ + && echo "deb https://dl.k6.io/deb stable main" | tee /etc/apt/sources.list.d/k6.list \ + && apt-get update \ + && apt-get install k6 + # install xk6-browser RUN cd /opt && wget --quiet https://github.com/grafana/xk6-browser/releases/download/v0.3.0/xk6-browser-v0.3.0-linux-amd64.tar.gz \ && tar -xvzf /opt/xk6-browser-v0.3.0-linux-amd64.tar.gz From 14aedad61f4526d21e5e737f33379c2cff92a810 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 23 Aug 2023 12:49:31 -0600 Subject: [PATCH 191/362] adding external secrets to squid conf (#2326) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index fef94f059..545b0b97c 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -32,6 +32,7 @@ centos.mirrors.wvstateu.edu cernvm.cern.ch charts.bitnami.com charts.helm.sh +charts.external-secrets.io cloud.r-project.org coreos.com covidstoplight.org From 2122ee31e6b94126d3d3b7a6308d0a90955d5001 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 23 Aug 2023 14:13:54 -0600 Subject: [PATCH 192/362] Feat/external secrets (#2327) * adding external secrets to squid conf * adding external secrets to wildcard --- files/squid_whitelist/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index a8c765814..6b469bf78 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -38,6 +38,7 @@ .dph.illinois.gov .elasticsearch.org .erlang-solutions.com +.external-secrets.io .extjs.com .fedoraproject.org .gen3.org From 06fe5970b6c8b755edbc598ac4d497b6fc300655 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 23 Aug 2023 14:23:55 -0600 Subject: [PATCH 193/362] Update web_whitelist (#2329) removing from web_whitelist --- files/squid_whitelist/web_whitelist | 1 - 1 file changed, 1 deletion(-) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 545b0b97c..fef94f059 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -32,7 +32,6 @@ centos.mirrors.wvstateu.edu cernvm.cern.ch charts.bitnami.com charts.helm.sh -charts.external-secrets.io cloud.r-project.org coreos.com covidstoplight.org From 01541d3724c9506c91bf5ad6c26584f8c201fdda Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 23 Aug 2023 14:27:03 -0600 Subject: [PATCH 194/362] Update web_whitelist (#2330) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index fef94f059..537901928 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -69,6 +69,7 @@ ftp.ussg.iu.edu fmwww.bc.edu gcr.io get.helm.sh +ghcr.io git.io go.googlesource.com golang.org From cc63d3557504d2d9e8094f277c372120edfea510 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 23 Aug 2023 14:33:00 -0600 Subject: [PATCH 195/362] Update web_whitelist (#2331) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 537901928..32282e99b 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -10,6 +10,7 @@ api.monqcle.com biodata-integration-tests.net marketing.biorender.com clinicaltrials.gov +charts.bitnami.com ctds-planx.atlassian.net data.cityofchicago.org dataguids.org From c45853f52dd2931f7011e8e7698c9fc19b157eb2 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Wed, 23 Aug 2023 15:35:24 -0500 Subject: [PATCH 196/362] Update web_whitelist (#2332) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 32282e99b..349d1e022 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -141,6 +141,7 @@ repo.dimenoc.com repos.mia.quadranet.com repos.redrockhost.com repos.sensuapp.org +repo.vmware.com repository.cloudera.com resource.metadatacenter.org rules.emergingthreats.net From 6bbde2a29388214fd15eb90323509a3df1112207 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Thu, 24 Aug 2023 15:03:37 -0500 Subject: [PATCH 197/362] fix(cohort-deploy-logic): Updated kube-roll-all to not deploy cohort middleware twice and swapped order to fix dependency issues (#2333) Co-authored-by: Edward Malinowski --- gen3/bin/kube-roll-all.sh | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 0cf7df2c3..c9cec5a25 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -243,6 +243,12 @@ else gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'" fi +if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then + gen3 kube-setup-ohdsi & +else + gen3_log_info "not deploying OHDSI tools - no manifest entry for '.versions[\"ohdsi-atlas\"]' and '.versions[\"ohdsi-webapi\"]'" +fi + if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then gen3 kube-setup-cohort-middleware else @@ -340,18 +346,6 @@ else gen3_log_info "not deploying argo-wrapper - no manifest entry for '.versions[\"argo-wrapper\"]'" fi -if g3k_manifest_lookup '.versions["cohort-middleware"]' 2> /dev/null; then - gen3 roll cohort-middleware & -else - gen3_log_info "not deploying cohort-middleware - no manifest entry for '.versions[\"cohort-middleware\"]'" -fi - -if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then - gen3 kube-setup-ohdsi & -else - gen3_log_info "not deploying OHDSI tools - no manifest entry for '.versions[\"ohdsi-atlas\"]' and '.versions[\"ohdsi-webapi\"]'" -fi - gen3_log_info "enable network policy" gen3 kube-setup-networkpolicy "enable" || true & From 576ebebecd7be01a6613d42781787e87da9ebc03 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 28 Aug 2023 11:28:50 -0600 Subject: [PATCH 198/362] adding a toleration to prometheus (#2335) * adding a toleration so prometheus pods don't get scheduled on fargate pods * testing with node affinity instead * removing commented lines --- kube/services/monitoring/values.yaml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/kube/services/monitoring/values.yaml b/kube/services/monitoring/values.yaml index ffdf92bd9..d93e5098a 100644 --- a/kube/services/monitoring/values.yaml +++ b/kube/services/monitoring/values.yaml @@ -1540,6 +1540,15 @@ prometheus-node-exporter: - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ service: portName: http-metrics + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "eks.amazonaws.com/compute-type" + operator: NotIn + values: + - fargate prometheus: monitor: enabled: true From 16f034ac895417793379a1c693dffb0b988bf1fa Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Mon, 28 Aug 2023 16:09:44 -0500 Subject: [PATCH 199/362] Feat/dbbackup auto (#2334) * Add automated DB backup and restore utilities * Add automated DB backup and restore utilities --- gen3/bin/dbbackup.sh | 201 ++++++++++++++++++ kube/services/jobs/psql-db-prep-dump-job.yaml | 79 +++++++ .../jobs/psql-db-prep-restore-job.yaml | 90 ++++++++ 3 files changed, 370 insertions(+) create mode 100644 gen3/bin/dbbackup.sh create mode 100644 kube/services/jobs/psql-db-prep-dump-job.yaml create mode 100644 kube/services/jobs/psql-db-prep-restore-job.yaml diff --git a/gen3/bin/dbbackup.sh b/gen3/bin/dbbackup.sh new file mode 100644 index 000000000..29f267221 --- /dev/null +++ b/gen3/bin/dbbackup.sh @@ -0,0 +1,201 @@ +#!/bin/bash + +#################################################################################################### +# Script: dbdump.sh +# +# Description: +# This script facilitates the management of database backups within the gen3 environment. It is +# equipped to establish policies, service accounts, roles, and S3 buckets. Depending on the +# command provided, it will either initiate a database dump or perform a restore. +# +# Usage: +# gen3 dbbackup [dump|restore] +# +# dump - Initiates a database dump, creating the essential AWS resources if they are absent. +# The dump operation is intended to be executed from the namespace/commons that requires +# the backup. +# restore - Initiates a database restore, creating the essential AWS resources if they are absent. +# The restore operation is meant to be executed in the target namespace, where the backup +# needs to be restored. +# +# Notes: +# This script extensively utilizes the AWS CLI and the gen3 CLI. Proper functioning demands a +# configured gen3 environment and the availability of the necessary CLI tools. +# +#################################################################################################### + +# Exit on error +#set -e + +# Print commands before executing +#set -x + +#trap 'echo "Error at Line $LINENO"' ERR + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +policy_name="bucket_reader_writer_gen3_db_backup" +account_id=$(aws sts get-caller-identity --query "Account" --output text) +vpc_name="$(gen3 api environment)" +namespace="$(gen3 db namespace)" +sa_name="dbbackup-sa" +bucket_name="gen3-db-backups-${account_id}" + +gen3_log_info "policy_name: $policy_name" +gen3_log_info "account_id: $account_id" +gen3_log_info "vpc_name: $vpc_name" +gen3_log_info "namespace: $namespace" +gen3_log_info "sa_name: $sa_name" +gen3_log_info "bucket_name: $bucket_name" + + +# Create an S3 access policy if it doesn't exist +create_policy() { + # Check if policy exists + if ! aws iam list-policies --query "Policies[?PolicyName == '$policy_name'] | [0].Arn" --output text | grep -q "arn:aws:iam"; then + # Create the S3 access policy - policy document + access_policy=$(cat <<-EOM +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:CreateBucket", + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::gen3-db-backups-*" + ] + } + ] +} +EOM + ) + + # Create the S3 access policy from the policy document + policy_arn=$(aws iam create-policy --policy-name "$policy_name" --policy-document "$access_policy" --query "Policy.Arn" --output text) + gen3_log_info "policy_arn: $policy_arn" + else + gen3_log_info "Policy $policy_name already exists, skipping policy creation." + policy_arn=$(aws iam list-policies --query "Policies[?PolicyName == '$policy_name'] | [0].Arn" --output text | grep "arn:aws:iam" | head -n 1) + gen3_log_info "policy_arn: $policy_arn" + fi +} + + +# Create or update the Service Account and its corresponding IAM Role +create_service_account_and_role() { + cluster_arn=$(kubectl config current-context) + eks_cluster=$(echo "$cluster_arn" | awk -F'/' '{print $2}') + oidc_url=$(aws eks describe-cluster --name $eks_cluster --query 'cluster.identity.oidc.issuer' --output text | sed -e 's/^https:\/\///') + role_name="${vpc_name}-${namespace}-${sa_name}-role" + role_arn="arn:aws:iam::${account_id}:role/${role_name}" + local trust_policy=$(mktemp -p "$XDG_RUNTIME_DIR" "tmp_policy.XXXXXX") + gen3_log_info "trust_policy: $trust_policy" + gen3_log_info "eks_cluster: $eks_cluster" + gen3_log_info "oidc_url: $oidc_url" + gen3_log_info "role_name: $role_name" + + + cat > ${trust_policy} <&1; then + gen3_log_info "Updating existing role: $role_name" + aws iam update-assume-role-policy --role-name $role_name --policy-document "file://$trust_policy" + else + gen3_log_info "Creating new role: $role_name" + aws iam create-role --role-name $role_name --assume-role-policy-document "file://$trust_policy" + fi + + # Attach the policy to the IAM role + aws iam attach-role-policy --role-name $role_name --policy-arn $policy_arn + + # Create the Kubernetes service account if it doesn't exist + if ! kubectl get serviceaccount -n $namespace $sa_name 2>&1; then + kubectl create serviceaccount -n $namespace $sa_name + fi + # Annotate the KSA with the IAM role ARN + gen3_log_info "Annotating Service Account with IAM role ARN" + kubectl annotate serviceaccount -n ${namespace} ${sa_name} eks.amazonaws.com/role-arn=${role_arn} --overwrite + +} + +# Create an S3 bucket if it doesn't exist +create_s3_bucket() { + # Check if bucket already exists + if aws s3 ls "s3://$bucket_name" 2>&1 | grep -q 'NoSuchBucket'; then + gen3_log_info "Bucket does not exist, creating..." + aws s3 mb "s3://$bucket_name" + else + gen3_log_info "Bucket $bucket_name already exists, skipping bucket creation." + fi +} + + +# Function to trigger the database backup job +db_dump() { + gen3 job run psql-db-prep-dump +} + + +# Function to trigger the database backup restore job +db_restore() { + gen3 job run psql-db-prep-restore +} + + +# main function to determine whether dump or restore +main() { + case "$1" in + dump) + gen3_log_info "Triggering database dump..." + create_policy + create_service_account_and_role + create_s3_bucket + db_dump + ;; + restore) + gen3_log_info "Triggering database restore..." + create_policy + create_service_account_and_role + create_s3_bucket + db_restore + ;; + *) + echo "Invalid command. Usage: gen3 dbbackup [dump|restore]" + return 1 + ;; + esac +} + +main "$1" diff --git a/kube/services/jobs/psql-db-prep-dump-job.yaml b/kube/services/jobs/psql-db-prep-dump-job.yaml new file mode 100644 index 000000000..86c513b78 --- /dev/null +++ b/kube/services/jobs/psql-db-prep-dump-job.yaml @@ -0,0 +1,79 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-prep-dump +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: dbbackup-sa + containers: + - name: pgdump + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + command: [ "/bin/bash" ] + args: + - "-c" + - | + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + account_id=$(aws sts get-caller-identity --query "Account" --output text) + default_bucket_name="gen3-db-backups-${account_id}" + default_databases=("indexd" "sheepdog" "metadata") + s3_dir="$(date +"%Y-%m-%d-%H-%M-%S")" + databases=("${default_databases[@]}") + bucket_name=$default_bucket_name + + for database in "${databases[@]}"; do + gen3_log_info "Starting database backup for ${database}" + gen3 db backup "${database}" > "${database}.sql" + + if [ $? -eq 0 ] && [ -f "${database}.sql" ]; then + gen3_log_info "Uploading backup file ${database}.sql to s3://${bucket_name}/${s3_dir}/${database}.sql" + aws s3 cp "${database}.sql" "s3://${bucket_name}/${s3_dir}/${database}.sql" + + if [ $? -eq 0 ]; then + gen3_log_info "Successfully uploaded ${database}.sql to S3" + else + gen3_log_err "Failed to upload ${database}.sql to S3" + fi + gen3_log_info "Deleting temporary backup file ${database}.sql" + rm -f "${database}.sql" + else + gen3_log_err "Backup operation failed for ${database}" + rm -f "${database}.sql" + fi + done + sleep 600 + restartPolicy: Never + diff --git a/kube/services/jobs/psql-db-prep-restore-job.yaml b/kube/services/jobs/psql-db-prep-restore-job.yaml new file mode 100644 index 000000000..710e6f4f1 --- /dev/null +++ b/kube/services/jobs/psql-db-prep-restore-job.yaml @@ -0,0 +1,90 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-prep-restore +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: dbbackup-sa + containers: + - name: pgrestore + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + command: [ "/bin/bash" ] + args: + - "-c" + - | + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + account_id=$(aws sts get-caller-identity --query "Account" --output text) + default_bucket_name="gen3-db-backups-${account_id}" + default_databases=("indexd" "sheepdog" "metadata") + backup_directories=$(aws s3 ls "s3://${default_bucket_name}/") + newest_directory=$(echo "$backup_directories" | awk '/PRE/ {if ($2 > max) max = $2} END {print max}') + databases=("${default_databases[@]}") + bucket_name=$default_bucket_name + namespace=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace) + date_str=$(date -u +%y%m%d_%H%M%S) + gen3_log_info "Database backup location in S3: ${bucket_name}/${newest_directory}" + gen3_log_info "namespace: $namespace \n\n" + + for database in "${databases[@]}"; do + gen3_log_info "Downloading database backup file s3://${default_bucket_name}/${newest_directory}${database}.sql" + aws s3 cp "s3://${default_bucket_name}/${newest_directory}${database}.sql" "${database}.sql" + server=$(gen3 db creds "$database" | jq -r '.g3FarmServer') + username=$(gen3 db creds "$database" | jq -r '.db_username') + db_name="${namespace}_${database}_${date_str}" + if [[ -z "$server" || -z "$username" ]]; then + gen3_log_info "Error: Unable to extract server name or username." + return 1 + fi + gen3 psql $database -c "create database $db_name;" 2>&1 | grep -q "permission denied" + if [ $? -eq 0 ]; then + gen3_log_info "User does not have permission to create database. Granting required permission..." + gen3 psql $server -c "alter user $username createdb;" + gen3 psql $database -c "create database $db_name;" + if [ $? -eq 0 ]; then + gen3_log_info "Database $db_name created successfully!" + else + gen3_log_info "Error creating database $db_name after granting permission." + fi + else + gen3_log_info "Database $db_name created successfully!" + fi + gen3_log_info "Starting database restore for ${database} to database $db_name" + gen3 psql "$database" -d "$db_name" -f "${database}.sql" 1>&2 + gen3_log_info "cleanup temporary backup file ${database}.sql \n\n\n" + done + sleep 600 + restartPolicy: Never From 7c631142682c32d8d538a1aff1f68912e99c44c6 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Tue, 29 Aug 2023 16:01:19 -0400 Subject: [PATCH 200/362] Feat/argo events fixes (#2336) * Fixed a few glitches/errors with kube-setup-argo-events * Removed the hardcoded environment from the awsnodetemplate template. * Added code to copy the environment from the global configmap, and use that new configmap when creating new resources. * Removing the afterStart setting and going with the default * Missed the removal on the last commit * Maybe it does need to be explicitly set --- gen3/bin/kube-setup-argo-events.sh | 16 ++++++++++++-- kube/services/argo-events/eventbus.yaml | 3 ++- .../argo-events/workflows/configmap.yaml | 10 ++++----- .../workflows/eventsource-created.yaml | 2 +- .../argo-events/workflows/sensor-created.yaml | 22 +++++++++++-------- 5 files changed, 35 insertions(+), 18 deletions(-) diff --git a/gen3/bin/kube-setup-argo-events.sh b/gen3/bin/kube-setup-argo-events.sh index b37c7c010..3537202f4 100644 --- a/gen3/bin/kube-setup-argo-events.sh +++ b/gen3/bin/kube-setup-argo-events.sh @@ -31,11 +31,22 @@ if ! kubectl get namespace argo-events > /dev/null 2>&1; then kubectl create namespace argo-events fi +# Check if target configmap exists +if ! kubectl get configmap environment -n argo-events > /dev/null 2>&1; then + + # Get value from source configmap + VALUE=$(kubectl get configmap global -n default -o jsonpath="{.data.environment}") + + # Create target configmap + kubectl create configmap environment -n argo-events --from-literal=environment=$VALUE + +fi + if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then if (! helm status argo -n argo-events > /dev/null 2>&1 ) || [[ "$force" == true ]]; then helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install argo argo/argo-events -n argo-events --version "2.1.3" + helm upgrade --install argo-events argo/argo-events -n argo-events --version "2.1.3" else gen3_log_info "argo-events Helm chart already installed. To force reinstall, run with --force" fi @@ -46,7 +57,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_na kubectl apply -f ${GEN3_HOME}/kube/services/argo-events/eventbus.yaml fi else - gen3_log_info "Not running in default namespace, will not install argo-events helm chart" + gen3_log_info "Not running in default namespace, will not install argo-events helm chart. This behavior can be overwritten with the --override-namespace flag" fi if [[ "$create_workflow_resources" == true ]]; then @@ -57,4 +68,5 @@ if [[ "$create_workflow_resources" == true ]]; then #Creating rolebindings to allow Argo Events to create jobs, and allow those jobs to manage Karpenter resources kubectl create rolebinding argo-events-job-admin-binding --role=job-admin --serviceaccount=argo-events:default --namespace=argo-events kubectl create clusterrolebinding karpenter-admin-binding --clusterrole=karpenter-admin --serviceaccount=argo-events:default + kubectl create clusterrolebinding argo-workflows-view-binding --clusterrole=argo-argo-workflows-view --serviceaccount=argo-events:default fi \ No newline at end of file diff --git a/kube/services/argo-events/eventbus.yaml b/kube/services/argo-events/eventbus.yaml index a53e3bd9c..00d5cf4d7 100644 --- a/kube/services/argo-events/eventbus.yaml +++ b/kube/services/argo-events/eventbus.yaml @@ -2,10 +2,11 @@ apiVersion: argoproj.io/v1alpha1 kind: EventBus metadata: name: default + namespace: argo-events spec: nats: native: # Optional, defaults to 3. If it is < 3, set it to 3, that is the minimal requirement. replicas: 3 # Optional, authen strategy, "none" or "token", defaults to "none" - auth: token \ No newline at end of file + auth: token diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index 9846ba8b6..a10e01b0d 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -47,13 +47,13 @@ data: name: workflow-$WORKFLOW_NAME spec: subnetSelector: - karpenter.sh/discovery: vhdcperf + karpenter.sh/discovery: $ENVIRONMENT securityGroupSelector: - karpenter.sh/discovery: vhdcperf-workflow + karpenter.sh/discovery: $ENVIRONMENT-workflow tags: - Environment: vhdcperf - Name: eks-vhdcperf-workflow-karpenter - karpenter.sh/discovery: vhdcperf + Environment: $ENVIRONMENT + Name: eks-$ENVIRONMENT-workflow-karpenter + karpenter.sh/discovery: $ENVIRONMENT workflowname: $WORKFLOW_NAME gen3username: $GEN3_USERNAME gen3service: argo-workflows diff --git a/kube/services/argo-events/workflows/eventsource-created.yaml b/kube/services/argo-events/workflows/eventsource-created.yaml index 9abf78e19..11d7084ca 100644 --- a/kube/services/argo-events/workflows/eventsource-created.yaml +++ b/kube/services/argo-events/workflows/eventsource-created.yaml @@ -15,4 +15,4 @@ spec: eventTypes: - ADD filter: - afterStart: false + afterStart: true diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml index 27cbc5643..7f06045bf 100644 --- a/kube/services/argo-events/workflows/sensor-created.yaml +++ b/kube/services/argo-events/workflows/sensor-created.yaml @@ -56,15 +56,20 @@ spec: - name: karpenter-resource-creator image: quay.io/cdis/awshelper command: ["/bin/sh"] - args: - - "-c" - - | - for file in /home/manifests/*.yaml; do envsubst < $file | kubectl apply -f -; done + args: + - "-c" + - | + for file in /home/manifests/*.yaml; do envsubst < $file | kubectl apply -f -; done env: - - name: WORKFLOW_NAME - value: "" - - name: GEN3_USERNAME - value: "" + - name: WORKFLOW_NAME + value: "" + - name: GEN3_USERNAME + value: "" + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: environment + key: environment volumeMounts: - name: karpenter-templates-volume mountPath: /home/manifests @@ -73,4 +78,3 @@ spec: configMap: name: karpenter-templates backoffLimit: 0 - From 9af1c73b6334b98d4a6f0fc9cec0f3a698404597 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 30 Aug 2023 10:17:55 -0400 Subject: [PATCH 201/362] Karpenter userdata cleanup (#2338) * Removed the -xe flag, and unauthorized calls to set tags. Also removed registryPullQPS limits * Adding back the -x flag, but leaving out -e for now --- kube/services/argo-events/workflows/configmap.yaml | 6 +++--- kube/services/karpenter/nodeTemplateDefault.yaml | 6 +++--- kube/services/karpenter/nodeTemplateGPU.yaml | 6 +++--- kube/services/karpenter/nodeTemplateJupyter.yaml | 6 +++--- kube/services/karpenter/nodeTemplateWorkflow.yaml | 6 +++--- 5 files changed, 15 insertions(+), 15 deletions(-) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index a10e01b0d..6b21221b5 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -69,11 +69,11 @@ data: --BOUNDARY Content-Type: text/x-shellscript; charset="us-ascii" - #!/bin/bash -xe + #!/bin/bash -x instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml index 0f76a392f..20198944f 100644 --- a/kube/services/karpenter/nodeTemplateDefault.yaml +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -23,11 +23,11 @@ spec: --BOUNDARY Content-Type: text/x-shellscript; charset="us-ascii" - #!/bin/bash -xe + #!/bin/bash -x instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml index b41e6441c..a6ca7bbc8 100644 --- a/kube/services/karpenter/nodeTemplateGPU.yaml +++ b/kube/services/karpenter/nodeTemplateGPU.yaml @@ -23,11 +23,11 @@ spec: --BOUNDARY Content-Type: text/x-shellscript; charset="us-ascii" - #!/bin/bash -xe + #!/bin/bash -x instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml index 579ac1aa3..ad72d3dd6 100644 --- a/kube/services/karpenter/nodeTemplateJupyter.yaml +++ b/kube/services/karpenter/nodeTemplateJupyter.yaml @@ -23,11 +23,11 @@ spec: --BOUNDARY Content-Type: text/x-shellscript; charset="us-ascii" - #!/bin/bash -xe + #!/bin/bash -x instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml index 60481b4fc..565d06f7c 100644 --- a/kube/services/karpenter/nodeTemplateWorkflow.yaml +++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml @@ -23,11 +23,11 @@ spec: --BOUNDARY Content-Type: text/x-shellscript; charset="us-ascii" - #!/bin/bash -xe + #!/bin/bash -x instanceId=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r .instanceId) curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys - aws ec2 create-tags --resources $instanceId --tags 'Key="instanceId",Value='$instanceId'' - curl https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/files/authorized_keys/ops_team >> /home/ec2-user/.ssh/authorized_keys + + echo "$(jq '.registryPullQPS=0' /etc/kubernetes/kubelet/kubelet-config.json)" > /etc/kubernetes/kubelet/kubelet-config.json sysctl -w fs.inotify.max_user_watches=12000 From 148925926add161f64c819087f2f44b94ba89e85 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Wed, 30 Aug 2023 14:29:46 -0500 Subject: [PATCH 202/362] Update web_wildcard_whitelist (#2340) --- files/squid_whitelist/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 6b469bf78..15d6037ba 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -31,6 +31,7 @@ .data-commons.org .datadoghq.com .datastage.io +.ddog-gov.com .diseasedatahub.org .docker.com .docker.io From 10e3adad4b4a12b2d008881089a1743201b5f7e0 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Wed, 30 Aug 2023 16:55:52 -0500 Subject: [PATCH 203/362] Add values from external git repository (#2269) --- kube/services/datadog/datadog-application.yaml | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/kube/services/datadog/datadog-application.yaml b/kube/services/datadog/datadog-application.yaml index f5a8925e1..19e0e1d86 100644 --- a/kube/services/datadog/datadog-application.yaml +++ b/kube/services/datadog/datadog-application.yaml @@ -5,14 +5,17 @@ metadata: namespace: argocd spec: project: default - source: - chart: datadog + sources: + - chart: datadog repoURL: 'https://helm.datadoghq.com' targetRevision: 3.6.4 helm: - valueFiles: - - https://raw.githubusercontent.com/uc-cdis/cloud-automation/master/kube/services/datadog/values.yaml + valueFiles: + - $values/kube/services/datadog/values.yaml releaseName: datadog + - repoURL: 'https://github.com/uc-cdis/cloud-automation.git' + targetRevision: master + ref: values destination: server: 'https://kubernetes.default.svc' namespace: datadog @@ -21,4 +24,4 @@ spec: prune: true selfHeal: true syncOptions: - - CreateNamespace=true \ No newline at end of file + - CreateNamespace=true From 607ef49e304358732f018c0c2c3c5ef8bdef2647 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Thu, 31 Aug 2023 11:38:47 -0400 Subject: [PATCH 204/362] Fix/restart argo events jobs (#2342) * Adding an OnFailure restart policy to the Argo Events jobs * Also adding a request for 2Gi of memory and 2 CPU cores to karpenter * Also setting the limits as well * Adding an enormous backoff limit, to let argo-events jobs retry if karpenter is unavailable or unstable --- gen3/bin/kube-setup-karpenter.sh | 6 +++++- .../argo-events/workflows/sensor-completed.yaml | 11 +++++------ .../argo-events/workflows/sensor-created.yaml | 4 ++-- .../argo-events/workflows/sensor-deleted.yaml | 11 +++++------ 4 files changed, 17 insertions(+), 15 deletions(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 6a8b21949..8ba8ed9d9 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -140,7 +140,11 @@ gen3_deploy_karpenter() { --set serviceAccount.name=karpenter \ --set serviceAccount.create=false \ --set controller.env[0].name=AWS_REGION \ - --set controller.env[0].value=us-east-1 + --set controller.env[0].value=us-east-1 \ + --set controller.resources.requests.memory="2Gi" \ + --set controller.resources.requests.cpu="2" \ + --set controller.resources.limits.memory="2Gi" \ + --set controller.resources.limits.cpu="2" fi gen3 awsrole sa-annotate karpenter "karpenter-controller-role-$vpc_name" karpenter gen3_log_info "Remove cluster-autoscaler" diff --git a/kube/services/argo-events/workflows/sensor-completed.yaml b/kube/services/argo-events/workflows/sensor-completed.yaml index e92ad6918..5e4e5ae35 100644 --- a/kube/services/argo-events/workflows/sensor-completed.yaml +++ b/kube/services/argo-events/workflows/sensor-completed.yaml @@ -43,18 +43,17 @@ spec: parallelism: 1 template: spec: - restartPolicy: Never + restartPolicy: OnFailure containers: - name: karpenter-resource-creator image: quay.io/cdis/awshelper command: ["/bin/sh"] - args: + args: - "-c" - | kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME kubectl delete provisioners workflow-$WORKFLOW_NAME env: - - name: WORKFLOW_NAME - value: "" - backoffLimit: 0 - + - name: WORKFLOW_NAME + value: "" + backoffLimit: 20 diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml index 7f06045bf..fa99f66c7 100644 --- a/kube/services/argo-events/workflows/sensor-created.yaml +++ b/kube/services/argo-events/workflows/sensor-created.yaml @@ -51,7 +51,7 @@ spec: parallelism: 1 template: spec: - restartPolicy: Never + restartPolicy: OnFailure containers: - name: karpenter-resource-creator image: quay.io/cdis/awshelper @@ -77,4 +77,4 @@ spec: - name: karpenter-templates-volume configMap: name: karpenter-templates - backoffLimit: 0 + backoffLimit: 20 diff --git a/kube/services/argo-events/workflows/sensor-deleted.yaml b/kube/services/argo-events/workflows/sensor-deleted.yaml index 61e2235d7..cad6a7a70 100644 --- a/kube/services/argo-events/workflows/sensor-deleted.yaml +++ b/kube/services/argo-events/workflows/sensor-deleted.yaml @@ -39,18 +39,17 @@ spec: parallelism: 1 template: spec: - restartPolicy: Never + restartPolicy: OnFailure containers: - name: karpenter-resource-creator image: quay.io/cdis/awshelper command: ["/bin/sh"] - args: + args: - "-c" - | kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME kubectl delete provisioners workflow-$WORKFLOW_NAME env: - - name: WORKFLOW_NAME - value: "" - backoffLimit: 0 - + - name: WORKFLOW_NAME + value: "" + backoffLimit: 20 From 8ec83e3da834a724f253b95d97b02652a5eefd92 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 5 Sep 2023 09:50:24 -0500 Subject: [PATCH 205/362] chore(updated-shibboleth-config): Updated the metadata for shibboleth (#2185) Co-authored-by: Edward Malinowski Co-authored-by: Alexander VanTol --- .../fenceshib/fenceshib-configmap.yaml | 100 +++++++++--------- 1 file changed, 52 insertions(+), 48 deletions(-) diff --git a/kube/services/fenceshib/fenceshib-configmap.yaml b/kube/services/fenceshib/fenceshib-configmap.yaml index 2412518c0..b8e55243d 100644 --- a/kube/services/fenceshib/fenceshib-configmap.yaml +++ b/kube/services/fenceshib/fenceshib-configmap.yaml @@ -231,48 +231,48 @@ data: few exceptions for newer attributes where the name is the same for both versions. You will usually want to uncomment or map the names for both SAML versions as a unit. --> - + - + - + - + - + - + - + - + @@ -286,7 +286,7 @@ data: - + @@ -416,47 +416,51 @@ data: - MIIGeDCCBWCgAwIBAgITKwAE3xjJ0BmsXYl8hwAAAATfGDANBgkqhkiG9w0BAQsF - ADBOMRUwEwYKCZImiZPyLGQBGRYFTE9DQUwxHDAaBgoJkiaJk/IsZAEZFgxESEhT - U0VDVVJJVFkxFzAVBgNVBAMTDk5JSC1EUEtJLUNBLTFBMB4XDTIxMDMyMzEwMjMz - MloXDTIzMDMyMzEwMjMzMlowcDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAk1EMREw - DwYDVQQHEwhCZXRoZXNkYTEMMAoGA1UEChMDSEhTMQwwCgYDVQQLEwNOSUgxJTAj - BgNVBAMTHHdhbXNpZ25pbmdmZWRlcmF0aW9uLm5paC5nb3YwggEiMA0GCSqGSIb3 - DQEBAQUAA4IBDwAwggEKAoIBAQDrng8ItLe/PdN7+GT50g0xd4Kc5zVLk5JhHV/M - C0ICo3ulYpNnK8f0vGYvKXhG9B4gyYjjAVgY8dHL1Yi9Vw4OCMHiAhT80qidFhah - xdcz8EaKWueqlMV+SZ8/6luahSmYYjKHAxICMg253gHsG6A64pWBsf58fzOYeEV/ - HIItkthIJ7Rh71gXeZwmcir3fAve1sQXrgXsRb265yFQaxLrRI+QA7k+Tiemlt4+ - 7wBOXdROm0kxGJT6u6+IG8g2Qdbc1JWaAmwROGCByREQzfMNUVpXCXJHhKSrHype - z8Z0o4p2sLXyOysbBAmNoShMhvaaPlsrJt7PyDN5uj6KaXNNAgMBAAGjggMrMIID - JzAdBgNVHQ4EFgQUb/4wTaSXJ6P1tAmI8mWJhMv1VHowHwYDVR0jBBgwFoAUeWw4 - jBnSyRkHcaYQ+YnwrdCDBZMwggESBgNVHR8EggEJMIIBBTCCAQGggf6ggfuGgcFs - ZGFwOi8vL0NOPU5JSC1EUEtJLUNBLTFBLENOPU5JSERQS0lDQVNWQyxDTj1DRFAs - Q049UHVibGljJTIwS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmln - dXJhdGlvbixEQz1ESEhTU0VDVVJJVFksREM9TE9DQUw/Y2VydGlmaWNhdGVSZXZv - Y2F0aW9uTGlzdD9iYXNlP29iamVjdENsYXNzPWNSTERpc3RyaWJ1dGlvblBvaW50 - hjVodHRwOi8vTklIRFBLSUNSTC5OSUguR09WL0NlcnREYXRhL05JSC1EUEtJLUNB - LTFBLmNybDCCATkGCCsGAQUFBwEBBIIBKzCCAScwgbQGCCsGAQUFBzAChoGnbGRh - cDovLy9DTj1OSUgtRFBLSS1DQS0xQSxDTj1BSUEsQ049UHVibGljJTIwS2V5JTIw - U2VydmljZXMsQ049U2VydmljZXMsQ049Q29uZmlndXJhdGlvbixEQz1ESEhTU0VD - VVJJVFksREM9TE9DQUw/Y0FDZXJ0aWZpY2F0ZT9iYXNlP29iamVjdENsYXNzPWNl - cnRpZmljYXRpb25BdXRob3JpdHkwQQYIKwYBBQUHMAKGNWh0dHA6Ly9OSUhEUEtJ - Q1JMLk5JSC5HT1YvQ2VydERhdGEvTklILURQS0ktQ0EtMUEuY3J0MCsGCCsGAQUF - BzABhh9odHRwOi8vTklIRFBLSU9DU1AuTklILkdPVi9vY3NwMAsGA1UdDwQEAwIF - oDA9BgkrBgEEAYI3FQcEMDAuBiYrBgEEAYI3FQiHscIohpH8F4b5jwiG7rxzgbud - JR2F39lChY/gIQIBZQIBJDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEw - JwYJKwYBBAGCNxUKBBowGDAKBggrBgEFBQcDAjAKBggrBgEFBQcDATANBgkqhkiG - 9w0BAQsFAAOCAQEAkgyJY5Pdyz7hF83hu9BsijKHOdMWe8fDyN7GsDR1O0URBuJW - oK7FsemmITwMCiDhH+NDkrRWM27EQhuv4w4yIUIFVqPeJS+Ff3gKyqB/VNcrDbfc - 1RU7Q0qyxwpItm/cEUTTTnfNppf/O6wn/FUbpvPbHMNukqhjtbiYJrmKcO1U0lEu - i7FlnPW6rRmEbhp/bChVJMkxw8sBH4K3Vrx9c15nPuBgv4E1cFLe1rwrt3wEeRlU - OaWMTbLwYBaBo2BC3iDHzWioSl4OtzItEkT5XxNOhViuoty09Tu5zd7byqiV7To3 - YVc+Yi/VBubgB+osvPXPAv0AQCLo88dO7MBWQg== + MIIGrDCCBZSgAwIBAgITKwAL5UokKuFiZ7VPlQAAAAvlSjANBgkqhkiG9w0B + AQsFADBOMRUwEwYKCZImiZPyLGQBGRYFTE9DQUwxHDAaBgoJkiaJk/IsZAEZ + FgxESEhTU0VDVVJJVFkxFzAVBgNVBAMTDk5JSC1EUEtJLUNBLTFBMB4XDTIy + MTIwNjE2NTUzNloXDTI0MTIwNTE2NTUzNlowgaMxCzAJBgNVBAYTAlVTMREw + DwYDVQQIEwhNYXJ5bGFuZDERMA8GA1UEBxMIQmV0aGVzZGExDDAKBgNVBAoT + A05JSDEMMAoGA1UECxMDQ0lUMSUwIwYDVQQDExx3YW1zaWduaW5nZmVkZXJh + dGlvbi5uaWguZ292MSswKQYJKoZIhvcNAQkBFhxuaWhsb2dpbnN1cHBvcnRA + bWFpbC5uaWguZ292MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA + o3aHcoq0SAof+GXCl6aZOw9w8CrWTSxz3hxEvG2RaJ4Bm0+UQEcQHArCiQ+Y + Wjmx8eORRwOblQKmcozpQAOxNRu7fbJn8msdryKdju+nBJg/gn0Ygn44EJEq + pZmBn+FBRgH/lADRdpLM8uO654i1x5Pr8TQtNMevGNot8oiacOZkB1A5N6+l + 4guxToA2ZuNhHRhwrpd1wIyq6sgY3J8XpWlx54HjDc8bZvia0bEhJns/qZpM + mAh5wvIP1I2JngqJ55mpl/btbIXX+uTn3tIomWre3KKjDKh9ZjUQom8VqTzp + oGYHSjTExuopsHnnVpC1HTW0QJoxFa5yR1f2fiUTZwIDAQABo4IDKzCCAycw + HQYDVR0OBBYEFMqGnTB0W0rFy8tD2y6JnApAzRCyMB8GA1UdIwQYMBaAFHls + OIwZ0skZB3GmEPmJ8K3QgwWTMIIBEgYDVR0fBIIBCTCCAQUwggEBoIH+oIH7 + hoHBbGRhcDovLy9DTj1OSUgtRFBLSS1DQS0xQSxDTj1OSUhEUEtJQ0FTVkMs + Q049Q0RQLENOPVB1YmxpYyUyMEtleSUyMFNlcnZpY2VzLENOPVNlcnZpY2Vz + LENOPUNvbmZpZ3VyYXRpb24sREM9REhIU1NFQ1VSSVRZLERDPUxPQ0FMP2Nl + cnRpZmljYXRlUmV2b2NhdGlvbkxpc3Q/YmFzZT9vYmplY3RDbGFzcz1jUkxE + aXN0cmlidXRpb25Qb2ludIY1aHR0cDovL05JSERQS0lDUkwuTklILkdPVi9D + ZXJ0RGF0YS9OSUgtRFBLSS1DQS0xQS5jcmwwggE5BggrBgEFBQcBAQSCASsw + ggEnMIG0BggrBgEFBQcwAoaBp2xkYXA6Ly8vQ049TklILURQS0ktQ0EtMUEs + Q049QUlBLENOPVB1YmxpYyUyMEtleSUyMFNlcnZpY2VzLENOPVNlcnZpY2Vz + LENOPUNvbmZpZ3VyYXRpb24sREM9REhIU1NFQ1VSSVRZLERDPUxPQ0FMP2NB + Q2VydGlmaWNhdGU/YmFzZT9vYmplY3RDbGFzcz1jZXJ0aWZpY2F0aW9uQXV0 + aG9yaXR5MEEGCCsGAQUFBzAChjVodHRwOi8vTklIRFBLSUNSTC5OSUguR09W + L0NlcnREYXRhL05JSC1EUEtJLUNBLTFBLmNydDArBggrBgEFBQcwAYYfaHR0 + cDovL05JSERQS0lPQ1NQLk5JSC5HT1Yvb2NzcDALBgNVHQ8EBAMCBaAwPQYJ + KwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIh7HCKIaR/BeG+Y8Ihu68c4G7nSUd + gZOnCYKOiSECAWQCAUwwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMC + MCcGCSsGAQQBgjcVCgQaMBgwCgYIKwYBBQUHAwEwCgYIKwYBBQUHAwIwDQYJ + KoZIhvcNAQELBQADggEBAGxvrAxX3RUmFXeUa1UewCWfzWCnI3wTMKkqvmI2 + CySFEOniXNXC/hhu0i000QD9mS527u+lGqgN6eaUaEaSDXMszYR753whJ1Wf + xJ50zji2mvUWDyzdRbcvxbVfYe6h6+TzQl0gd8z1DjAxkUWydv9aAFYHNiIY + BbhPqvrlOT+oV8CYI8ghEg7qyxo1mso99aVGCbnBA+6IC+jt8lvwQYFISW8J + lxJbz5P9fyAbQFuMvcvSkx1WWCCK+d3WsLzU2JETjmYNoID5skFaIfrq+rV1 + nBqQfCSKApojRaUMwn83IRcosSu0Y3dhpmxz2oDkOURbwOkuPJRgYnZRLBDn + e50= - + urn:oasis:names:tc:SAML:2.0:nameid-format:persistent - + From 20621c92cb23580773a761b12123948224e23838 Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Tue, 5 Sep 2023 16:04:15 -0500 Subject: [PATCH 206/362] updating heal cedar filter for research programs (#2354) --- files/scripts/healdata/heal-cedar-data-ingest.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index c4d68199a..892734584 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -11,7 +11,8 @@ "study_metadata.data.data_type": "Data Type", "study_metadata.study_type.study_subject_type": "Subject Type", "study_metadata.human_subject_applicability.gender_applicability": "Gender", - "study_metadata.human_subject_applicability.age_applicability": "Age" + "study_metadata.human_subject_applicability.age_applicability": "Age", + "research_program": "Research Program" } # Defines how to handle special cases for values in filters From 00ba748a51faf5ede21bd52108d57c997a366e8e Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 12 Sep 2023 10:40:58 -0500 Subject: [PATCH 207/362] feat: update cohort-middleware setup script to setup everything in Jenkins (#2361) --- gen3/bin/kube-setup-cohort-middleware.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-cohort-middleware.sh b/gen3/bin/kube-setup-cohort-middleware.sh index 76096469c..477de064c 100644 --- a/gen3/bin/kube-setup-cohort-middleware.sh +++ b/gen3/bin/kube-setup-cohort-middleware.sh @@ -17,7 +17,7 @@ setup_secrets() { mkdir -p $(gen3_secrets_folder)/g3auto/cohort-middleware credsFile="$(gen3_secrets_folder)/g3auto/cohort-middleware/development.yaml" - if [[ (! -f "$credsFile") && -z "$JENKINS_HOME" ]]; then + if [[ (! -f "$credsFile") ]]; then DB_NAME=$(jq -r ".db_database" <<< "$dbcreds") export DB_NAME DB_USER=$(jq -r ".db_username" <<< "$dbcreds") From 0b59b0df0a12a876a96747d171f606e32232b60f Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Thu, 14 Sep 2023 09:59:25 -0500 Subject: [PATCH 208/362] Update sslpolicy to tls 1.3 in ingress/alb (#2312) * Update sslpolicy to tls 1.3 in ingress/alb * Update ingress.yaml changed to higher tls 1.2 vs 1.3 --------- Co-authored-by: cmlsn <100160785+cmlsn@users.noreply.github.com> --- kube/services/ingress/ingress.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml index 3ceacf608..65916679a 100644 --- a/kube/services/ingress/ingress.yaml +++ b/kube/services/ingress/ingress.yaml @@ -11,7 +11,7 @@ metadata: alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=600 alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' - alb.ingress.kubernetes.io/ssl-policy: "ELBSecurityPolicy-TLS-1-2-2017-01" + alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-2021-06 spec: ingressClassName: alb rules: From fa75a4a9d97df3ee22bfb4e80dd108b28c06ebb5 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Thu, 14 Sep 2023 15:17:54 -0400 Subject: [PATCH 209/362] Update configmap.yaml (#2362) --- kube/services/argo-events/workflows/configmap.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index 6b21221b5..c707ba002 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -31,7 +31,7 @@ data: role: $WORKFLOW_NAME limits: resources: - cpu: 1000 + cpu: 2000 providerRef: name: workflow-$WORKFLOW_NAME # Allow pods to be rearranged From ae009d2b057e15d8c0bb4579315b003d01796e87 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Wed, 20 Sep 2023 11:11:43 -0500 Subject: [PATCH 210/362] Move DD to govcloud (#2341) * Move datadog to govcloud --- .secrets.baseline | 98 ++++++++++++++++++++++++++++--- kube/services/datadog/values.yaml | 26 +++++--- 2 files changed, 109 insertions(+), 15 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 527ffcc30..16475e0b2 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-08-10T17:51:06Z", + "generated_at": "2023-09-18T18:49:22Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -77,6 +77,7 @@ "Docker/jenkins/Jenkins-CI-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, "line_number": 122, "type": "Secret Keyword" @@ -85,14 +86,16 @@ "Docker/jenkins/Jenkins-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, - "line_number": 137, + "line_number": 143, "type": "Secret Keyword" } ], "Docker/jenkins/Jenkins/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, "line_number": 107, "type": "Secret Keyword" @@ -101,6 +104,7 @@ "Docker/jenkins/Jenkins2/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, "line_number": 108, "type": "Secret Keyword" @@ -294,14 +298,14 @@ ], "files/dashboard/usage-reports/package-lock.json": [ { - "hashed_secret": "65ecd0650541b6caecdb6986f1871c2e6a95bdfe", + "hashed_secret": "e095101882f706c4de95e0f75c5bcb9666e3f448", "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { - "hashed_secret": "e35a49e53bb97044b35cc0e4d963b4ac49e9ac7e", + "hashed_secret": "5422e4f96964d5739998b25ac214520c1b113e5b", "is_secret": false, "is_verified": false, "line_number": 15, @@ -342,6 +346,15 @@ "type": "Secret Keyword" } ], + "gen3/bin/kube-setup-aurora-monitoring.sh": [ + { + "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, + "is_verified": false, + "line_number": 59, + "type": "Secret Keyword" + } + ], "gen3/bin/kube-setup-certs.sh": [ { "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99", @@ -392,6 +405,15 @@ "type": "Secret Keyword" } ], + "gen3/bin/kube-setup-dicom.sh": [ + { + "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, + "is_verified": false, + "line_number": 78, + "type": "Secret Keyword" + } + ], "gen3/bin/kube-setup-jenkins.sh": [ { "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", @@ -731,9 +753,32 @@ "type": "Secret Keyword" } ], + "kube/services/argocd/values.yaml": [ + { + "hashed_secret": "27c6929aef41ae2bcadac15ca6abcaff72cda9cd", + "is_secret": false, + "is_verified": false, + "line_number": 360, + "type": "Private Key" + }, + { + "hashed_secret": "edbd5e119f94badb9f99a67ac6ff4c7a5204ad61", + "is_secret": false, + "is_verified": false, + "line_number": 379, + "type": "Secret Keyword" + }, + { + "hashed_secret": "91dfd9ddb4198affc5c194cd8ce6d338fde470e2", + "is_secret": false, + "is_verified": false, + "line_number": 412, + "type": "Secret Keyword" + } + ], "kube/services/datadog/values.yaml": [ { - "hashed_secret": "52330dffa4d0795b4199a66428e54eca228e1661", + "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b", "is_secret": false, "is_verified": false, "line_number": 23, @@ -1191,14 +1236,14 @@ "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", "is_secret": false, "is_verified": false, - "line_number": 166, + "line_number": 165, "type": "Secret Keyword" }, { - "hashed_secret": "244f421f896bdcdd2784dccf4eaf7c8dfd5189b5", + "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9", "is_secret": false, "is_verified": false, - "line_number": 266, + "line_number": 265, "type": "Secret Keyword" } ], @@ -1242,42 +1287,49 @@ "package-lock.json": [ { "hashed_secret": "0656ad0df3af4633dc369f13d5e8806973c5fd9d", + "is_secret": false, "is_verified": false, "line_number": 1481, "type": "Base64 High Entropy String" }, { "hashed_secret": "00091d875d922437c5fc9e6067a08e78c2482e87", + "is_secret": false, "is_verified": false, "line_number": 1489, "type": "Base64 High Entropy String" }, { "hashed_secret": "c4e5cc37e115bf7d86e76e3d799705bf691e4d00", + "is_secret": false, "is_verified": false, "line_number": 1521, "type": "Base64 High Entropy String" }, { "hashed_secret": "0512e37fbedf1d16828680a038a241b4780a5c04", + "is_secret": false, "is_verified": false, "line_number": 1547, "type": "Base64 High Entropy String" }, { "hashed_secret": "01868fd50edbfe6eb91e5b01209b543adc6857af", + "is_secret": false, "is_verified": false, "line_number": 1611, "type": "Base64 High Entropy String" }, { "hashed_secret": "a6f48bf1e398deffc7fd31da17c3506b46c97a93", + "is_secret": false, "is_verified": false, "line_number": 1640, "type": "Base64 High Entropy String" }, { "hashed_secret": "85ce358dbdec0996cf3ccd2bf1c6602af68c181e", + "is_secret": false, "is_verified": false, "line_number": 1648, "type": "Base64 High Entropy String" @@ -1291,48 +1343,56 @@ }, { "hashed_secret": "7098a3e6d6d2ec0a40f04fe12509c5c6f4c49c0e", + "is_secret": false, "is_verified": false, "line_number": 1683, "type": "Base64 High Entropy String" }, { "hashed_secret": "1664ad175bba1795a7ecad572bae7e0740b94f56", + "is_secret": false, "is_verified": false, "line_number": 1733, "type": "Base64 High Entropy String" }, { "hashed_secret": "1ec4ce2eb945ce2f816dcb6ebdd1e10247f439a3", + "is_secret": false, "is_verified": false, "line_number": 1742, "type": "Base64 High Entropy String" }, { "hashed_secret": "a7af5768a6d936e36f28e1030d7f894d7aaf555e", + "is_secret": false, "is_verified": false, "line_number": 1755, "type": "Base64 High Entropy String" }, { "hashed_secret": "6fbc7dd864586173160874f2a86ca7d2d552cb85", + "is_secret": false, "is_verified": false, "line_number": 1769, "type": "Base64 High Entropy String" }, { "hashed_secret": "81a961f2c89c6209328b74a8768e30fd76c3ac72", + "is_secret": false, "is_verified": false, "line_number": 1855, "type": "Base64 High Entropy String" }, { "hashed_secret": "797d4751c536c421cb82b9f62e0a804af30d78f5", + "is_secret": false, "is_verified": false, "line_number": 1889, "type": "Base64 High Entropy String" }, { "hashed_secret": "0d55babfa89f240142c0adfc7b560500a1d3ae7c", + "is_secret": false, "is_verified": false, "line_number": 1894, "type": "Base64 High Entropy String" @@ -1346,42 +1406,49 @@ }, { "hashed_secret": "4cf9419259c0ce8eee84b468af3c72db8b001620", + "is_secret": false, "is_verified": false, "line_number": 1950, "type": "Base64 High Entropy String" }, { "hashed_secret": "24816e3eb4308e247bde7c1d09ffb7b79c519b71", + "is_secret": false, "is_verified": false, "line_number": 1983, "type": "Base64 High Entropy String" }, { "hashed_secret": "e9adfe8a333d45f4776fe0eab31608be5d7b6a7d", + "is_secret": false, "is_verified": false, "line_number": 2004, "type": "Base64 High Entropy String" }, { "hashed_secret": "03d6fb388dd1b185129b14221f7127715822ece6", + "is_secret": false, "is_verified": false, "line_number": 2013, "type": "Base64 High Entropy String" }, { "hashed_secret": "ee161bb3f899720f95cee50a5f9ef9c9ed96278b", + "is_secret": false, "is_verified": false, "line_number": 2046, "type": "Base64 High Entropy String" }, { "hashed_secret": "ebeb5b574fa1ed24a40248275e6136759e766466", + "is_secret": false, "is_verified": false, "line_number": 2078, "type": "Base64 High Entropy String" }, { "hashed_secret": "a6a555a428522ccf439fd516ce7c7e269274363f", + "is_secret": false, "is_verified": false, "line_number": 2083, "type": "Base64 High Entropy String" @@ -1395,90 +1462,105 @@ }, { "hashed_secret": "3f1646b60abe74297d2f37a1eee5dc771ad834fc", + "is_secret": false, "is_verified": false, "line_number": 2138, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd933c71e82d5519ae0cb0779b370d02f6935759", + "is_secret": false, "is_verified": false, "line_number": 2143, "type": "Base64 High Entropy String" }, { "hashed_secret": "7090aa59cb52ad1f1810b08c4ac1ddf5c8fce523", + "is_secret": false, "is_verified": false, "line_number": 2150, "type": "Base64 High Entropy String" }, { "hashed_secret": "756444bea4ea3d67844d8ddf58ad32356e9c2430", + "is_secret": false, "is_verified": false, "line_number": 2188, "type": "Base64 High Entropy String" }, { "hashed_secret": "f74135fdd6b8dafdfb01ebbc61c5e5c24ee27cf8", + "is_secret": false, "is_verified": false, "line_number": 2291, "type": "Base64 High Entropy String" }, { "hashed_secret": "56fbae787f4aed7d0632e95840d71bd378d3a36f", + "is_secret": false, "is_verified": false, "line_number": 2303, "type": "Base64 High Entropy String" }, { "hashed_secret": "81cb6be182eb79444202c4563080aee75296a672", + "is_secret": false, "is_verified": false, "line_number": 2308, "type": "Base64 High Entropy String" }, { "hashed_secret": "f0f3f7bce32184893046ac5f8cc80da56c3ca539", + "is_secret": false, "is_verified": false, "line_number": 2317, "type": "Base64 High Entropy String" }, { "hashed_secret": "097893233346336f4003acfb6eb173ee59e648f0", + "is_secret": false, "is_verified": false, "line_number": 2327, "type": "Base64 High Entropy String" }, { "hashed_secret": "bb14c3b4ef4a9f2e86ffdd44b88d9b6729419671", + "is_secret": false, "is_verified": false, "line_number": 2332, "type": "Base64 High Entropy String" }, { "hashed_secret": "71344a35cff67ef081920095d1406601fb5e9b97", + "is_secret": false, "is_verified": false, "line_number": 2340, "type": "Base64 High Entropy String" }, { "hashed_secret": "eb3db6990fd43477a35dfeffc90b3f1ffa83c7bd", + "is_secret": false, "is_verified": false, "line_number": 2349, "type": "Base64 High Entropy String" }, { "hashed_secret": "266288bdc14807b538d1e48a5891e361fa9b4a14", + "is_secret": false, "is_verified": false, "line_number": 2357, "type": "Base64 High Entropy String" }, { "hashed_secret": "800477261175fd21f23e7321923e1fba6ae55471", + "is_secret": false, "is_verified": false, "line_number": 2369, "type": "Base64 High Entropy String" }, { "hashed_secret": "3f0c251b9c2c21454445a98fde6915ceacde2136", + "is_secret": false, "is_verified": false, "line_number": 2387, "type": "Base64 High Entropy String" diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index c613bd079..c7a56035c 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -20,7 +20,18 @@ datadog: # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. ## If set, this parameter takes precedence over "apiKey". - apiKeyExistingSecret: "datadog-agent" + apiKeyExistingSecret: "ddgov-apikey" + + # datadog.site -- The site of the Datadog intake to send Agent data to. + # (documentation: https://docs.datadoghq.com/getting_started/site/) + + ## Set to 'datadoghq.com' to send data to the US1 site (default). + ## Set to 'datadoghq.eu' to send data to the EU site. + ## Set to 'us3.datadoghq.com' to send data to the US3 site. + ## Set to 'us5.datadoghq.com' to send data to the US5 site. + ## Set to 'ddog-gov.com' to send data to the US1-FED site. + ## Set to 'ap1.datadoghq.com' to send data to the AP1 site. + site: ddog-gov.com # datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment ## ref: https://github.com/kubernetes/kube-state-metrics/tree/kube-state-metrics-helm-chart-2.13.2/charts/kube-state-metrics @@ -59,11 +70,13 @@ datadog: apm: # datadog.apm.socketEnabled -- Enable APM over Socket (Unix Socket or windows named pipe) ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ - socketEnabled: true + socketEnabled: false # datadog.apm.portEnabled -- Enable APM over TCP communication (port 8126 by default) ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ - portEnabled: true + portEnabled: false + + enabled: false # datadog.apm.port -- Override the trace Agent port ## Note: Make sure your client is sending to the same UDP port. @@ -80,15 +93,15 @@ datadog: # datadog.processAgent.processCollection -- Set this to true to enable process collection in process monitoring agent ## Requires processAgent.enabled to be set to true to have any effect - processCollection: true + processCollection: false # datadog.processAgent.stripProcessArguments -- Set this to scrub all arguments from collected processes ## Requires processAgent.enabled and processAgent.processCollection to be set to true to have any effect ## ref: https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows#process-arguments-scrubbing - stripProcessArguments: true + stripProcessArguments: false # datadog.processAgent.processDiscovery -- Enables or disables autodiscovery of integrations - processDiscovery: true + processDiscovery: false ## Enable systemProbe agent and provide custom configs systemProbe: @@ -327,4 +340,3 @@ agents: # agents.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if agents.rbac.create is true serviceAccountAnnotations: {} - From 6ce613ca97137774ce5de2430c22516c3158b098 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Fri, 22 Sep 2023 14:35:22 -0400 Subject: [PATCH 211/362] Feat/argo events for workflows error checking (#2366) * Updated instance types to latest/greatest and removed consolidation * Updated instance types to latest/greatest and removed consolidation * Added error checking for the create and delete jobs for workflows * Reverting configmap back to old settings, as we haven't finished testing them --------- Co-authored-by: Edward Malinowski --- .../services/argo-events/workflows/sensor-completed.yaml | 9 +++++++-- kube/services/argo-events/workflows/sensor-created.yaml | 8 +++++++- kube/services/argo-events/workflows/sensor-deleted.yaml | 9 +++++++-- 3 files changed, 21 insertions(+), 5 deletions(-) diff --git a/kube/services/argo-events/workflows/sensor-completed.yaml b/kube/services/argo-events/workflows/sensor-completed.yaml index 5e4e5ae35..293c0e119 100644 --- a/kube/services/argo-events/workflows/sensor-completed.yaml +++ b/kube/services/argo-events/workflows/sensor-completed.yaml @@ -51,8 +51,13 @@ spec: args: - "-c" - | - kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME - kubectl delete provisioners workflow-$WORKFLOW_NAME + if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + fi + + if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete provisioners workflow-$WORKFLOW_NAME + fi env: - name: WORKFLOW_NAME value: "" diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml index fa99f66c7..7b1b9d62f 100644 --- a/kube/services/argo-events/workflows/sensor-created.yaml +++ b/kube/services/argo-events/workflows/sensor-created.yaml @@ -59,7 +59,13 @@ spec: args: - "-c" - | - for file in /home/manifests/*.yaml; do envsubst < $file | kubectl apply -f -; done + if ! kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + envsubst < /home/manifests/nodetemplate.yaml | kubectl apply -f - + fi + + if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + envsubst < /home/manifests/provisioner.yaml | kubectl apply -f - + fi env: - name: WORKFLOW_NAME value: "" diff --git a/kube/services/argo-events/workflows/sensor-deleted.yaml b/kube/services/argo-events/workflows/sensor-deleted.yaml index cad6a7a70..c235a820a 100644 --- a/kube/services/argo-events/workflows/sensor-deleted.yaml +++ b/kube/services/argo-events/workflows/sensor-deleted.yaml @@ -47,8 +47,13 @@ spec: args: - "-c" - | - kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME - kubectl delete provisioners workflow-$WORKFLOW_NAME + if kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete awsnodetemplate workflow-$WORKFLOW_NAME + fi + + if kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then + kubectl delete provisioners workflow-$WORKFLOW_NAME + fi env: - name: WORKFLOW_NAME value: "" From 310bd14e86ec12e7ba75677ca5aa6a559aabe2ff Mon Sep 17 00:00:00 2001 From: George Thomas <98996322+george42-ctds@users.noreply.github.com> Date: Mon, 25 Sep 2023 08:15:05 -0700 Subject: [PATCH 212/362] (HP-1169): get study info tags from search filters (#2343) * (HP-1165): get study info tags from search filters * (HP-1169): retain existing 'Data Repository' tags --- files/scripts/healdata/heal-cedar-data-ingest.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 892734584..2f8273851 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -76,6 +76,19 @@ def update_filter_metadata(metadata_to_update): filter_metadata.append({"key": filter_field_key, "value": filter_field_value}) filter_metadata = pydash.uniq(filter_metadata) metadata_to_update["advSearchFilters"] = filter_metadata + # Retain these from existing tags + save_tags = ["Data Repository"] + tags = [ + tag + for tag in metadata_to_update["tags"] + if tag["category"] in save_tags + ] + # Add any new tags from advSearchFilters + for f in metadata_to_update["advSearchFilters"]: + tag = {"name": f["value"], "category": f["key"]} + if tag not in tags: + tags.append(tag) + metadata_to_update["tags"] = tags return metadata_to_update parser = argparse.ArgumentParser() From ebee0a32d9c3e9510de76ba2204deccae0b36afb Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 27 Sep 2023 14:59:46 -0500 Subject: [PATCH 213/362] fix(fluentd): Fixed fluentd logging to work with new cri parser format (#2367) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-fluentd.sh | 8 +- kube/services/fluentd/gen3-1.15.3.conf | 231 +++++++++++++++++++++++++ 2 files changed, 235 insertions(+), 4 deletions(-) create mode 100644 kube/services/fluentd/gen3-1.15.3.conf diff --git a/gen3/bin/kube-setup-fluentd.sh b/gen3/bin/kube-setup-fluentd.sh index 28a7011a8..02214be9e 100644 --- a/gen3/bin/kube-setup-fluentd.sh +++ b/gen3/bin/kube-setup-fluentd.sh @@ -25,11 +25,11 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then export KUBECTL_NAMESPACE=logging # lets check the the version of fluentd, and use the right configuration - # as of 2020-05-06 the latest version is v1.10.2 - if [ ${fluentdVersion} == "v1.10.2-debian-cloudwatch-1.0" ]; + # if we are using newer versions of fluentd, assume we are using containerd which needs the newer config + if [ ${fluentdVersion} == "v1.15.3-debian-cloudwatch-1.0" ]; then fluentdConfigmap="${XDG_RUNTIME_DIR}/gen3.conf" - cat ${GEN3_HOME}/kube/services/fluentd/gen3-1.10.2.conf | tee ${fluentdConfigmap} > /dev/null + cat ${GEN3_HOME}/kube/services/fluentd/gen3-1.15.3.conf | tee ${fluentdConfigmap} > /dev/null gen3 update_config fluentd-gen3 "${fluentdConfigmap}" rm ${fluentdConfigmap} else @@ -54,7 +54,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then fi # We need this serviceaccount to be in the default namespace for the job and cronjob to properly work g3kubectl apply -f "${GEN3_HOME}/kube/services/fluentd/fluent-jobs-serviceaccount.yaml" -n default - if [ ${fluentdVersion} == "v1.10.2-debian-cloudwatch-1.0" ]; + if [ ${fluentdVersion} == "v1.15.3-debian-cloudwatch-1.0" ]; then ( unset KUBECTL_NAMESPACE diff --git a/kube/services/fluentd/gen3-1.15.3.conf b/kube/services/fluentd/gen3-1.15.3.conf new file mode 100644 index 000000000..d9b6bed5d --- /dev/null +++ b/kube/services/fluentd/gen3-1.15.3.conf @@ -0,0 +1,231 @@ +# +# Gen3 customization of fluent config. +# - tries to extract structure from gen3 service logs +# - includes the default conf at the bottom - just adds prefix rules +# +# Deploy by: +# - mount this file into the container at /fluentd/etc/gen3.conf +# - set environment variable FLUENTD_CONF=gen3.conf +# +# https://www.fluentd.org/guides/recipes/docker-logging +# https://docs.fluentd.org/v0.12/articles/config-file#introduction:-the-life-of-a-fluentd-event +# https://docs.fluentd.org/v1.0/articles/out_rewrite_tag_filter + + + + + + @type tail + @id in_tail_container_logs + path /var/log/containers/*.log + pos_file /var/log/fluentd-containers.log.pos + tag "#{ENV['FLUENT_CONTAINER_TAIL_TAG'] || 'kubernetes.*'}" + exclude_path "#{ENV['FLUENT_CONTAINER_TAIL_EXCLUDE_PATH'] || use_default}" + read_from_head true + + @type "#{ENV['FLUENT_CONTAINER_TAIL_PARSER_TYPE'] || 'json'}" + time_format %Y-%m-%dT%H:%M:%S.%NZ + + + + + @type tail + path /var/log/messages + pos_file /var/log/host-messages.log.pos + + @type syslog + + tag host.messages + + + + + @type tail + path /var/log/secure + pos_file /var/log/host-secure.log.pos + + @type syslog + + tag host.secure + + + + @type tail + @id in_tail_docker + path /var/log/docker.log + pos_file /var/log/fluentd-docker.log.pos + tag docker + + @type regexp + expression /^time="(? + + + + + @type tail + @id in_tail_kubelet + multiline_flush_interval 5s + path /var/log/kubelet.log + pos_file /var/log/fluentd-kubelet.log.pos + tag kubelet + + @type kubernetes + + + + + + + @type kubernetes_metadata + @id filter_kube_metadata + kubernetes_url "#{ENV['FLUENT_FILTER_KUBERNETES_URL'] || 'https://' + ENV.fetch('KUBERNETES_SERVICE_HOST') + ':' + ENV.fetch('KUBERNETES_SERVICE_PORT') + '/api'}" + verify_ssl "#{ENV['KUBERNETES_VERIFY_SSL'] || true}" + ca_file "#{ENV['KUBERNETES_CA_FILE']}" + skip_labels "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_LABELS'] || 'false'}" + skip_container_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_CONTAINER_METADATA'] || 'false'}" + skip_master_url "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_MASTER_URL'] || 'false'}" + skip_namespace_metadata "#{ENV['FLUENT_KUBERNETES_METADATA_SKIP_NAMESPACE_METADATA'] || 'false'}" + + + + @type null + + + + @type null + + + + @type rewrite_tag_filter + + key $._HOSTNAME + pattern ^(.+)$ + tag $1.docker + + + + + @type rewrite_tag_filter + + key $._HOSTNAME + pattern ^(.+)$ + tag $1.kubelet + + + + + @type rewrite_tag_filter + + key $.host + pattern ^(.+)$ + tag $1.messages + + + + + @type rewrite_tag_filter + + key $.host + pattern ^(.+)$ + tag $1.secure + + + + + @type rewrite_tag_filter + + # json structured log - consider adoption a standard json schema: + # https://github.com/timberio/log-event-json-schema + key message + pattern /^\{\s*"gen3log":/ + tag kubernetes.gen3.json.${tag} + + + # combined log format - default Apache and nginx structure + # https://httpd.apache.org/docs/1.3/logs.html#combined + key message + pattern /^(((\d+\.\d+\.\d+\.\d+)|-)\s+){2}\S+\s+\[\d\d?\// + tag kubernetes.gen3.combined.${tag} + + + # unstructured log line + key message + pattern /\S/ + tag kubernetes.gen3.raw.${tag} + + + + + + @type record_transformer + + log_type json + # This one doesn't work for whatever reason, if you do ${record["kubernetes"]} the whole blob would be added, but can't access subobjects + #container_name ${record["kubernetes"]["container_name"]} + + + + + @type record_transformer + + log_type combined + + + + + @type record_transformer + + log_type raw + + + + + @type rewrite_tag_filter + + key $.kubernetes.pod_name + pattern ^(.+)$ + tag "#{Time.now.strftime('%Y-%m-%d')}.$1" + +# +# key $.kubernetes +# pattern ^(.+)$ +# tag $1.container_name +# + + +# +# @type rewrite_tag_filter +# +# key $.kubernetes.container_name +# pattern ^(.+)$ + #tag $1.${tag} +# tag ${tag}.$1 +# +# + +# TODO: +# * python stack traces: "Traceback (most recent call last):"" +# https://docs.fluentd.org/v0.12/articles/parser_multiline#formatn +# +# Idea: add `visitor` cookie to revproxy ... + + + + @type cloudwatch_logs + @id out_cloudwatch_logs + log_group_name "#{ENV['LOG_GROUP_NAME']}" + auto_create_stream true + use_tag_as_stream true + retention_in_days "#{ENV['RETENTION_IN_DAYS'] || 'nil'}" + json_handler yajl # To avoid UndefinedConversionError + log_rejected_request "#{ENV['LOG_REJECTED_REQUEST']}" # Log rejected request for missing parts + + + +#@include fluent.conf +#@include conf.d/*.conf From 14a83e6517a2d0f9dbfbd7eb27fc9a519a7422c1 Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Fri, 29 Sep 2023 10:11:38 -0500 Subject: [PATCH 214/362] update squid whitelist for chordshealth (#2368) --- files/squid_whitelist/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 15d6037ba..44f468097 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -21,6 +21,7 @@ .centos.org .ceph.com .chef.io +.chordshealth.org .clamav.net .cloud.google.com .cloudfront.net From 31ad772dbfebe9e11d380203ba51f119636c28a4 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 2 Oct 2023 14:24:25 -0500 Subject: [PATCH 215/362] Added node emptiness timer (#2372) Co-authored-by: Edward Malinowski --- kube/services/argo-events/workflows/configmap.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index c707ba002..62fcbbfb3 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -39,6 +39,7 @@ data: enabled: true # Kill nodes after 30 days to ensure they stay up to date ttlSecondsUntilExpired: 2592000 + ttlSecondsAfterEmpty: 10 nodetemplate.yaml: | apiVersion: karpenter.k8s.aws/v1alpha1 From d842370f83b1300a7b57db2a79f1ce6350d41f04 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 3 Oct 2023 09:00:33 -0500 Subject: [PATCH 216/362] PXP-10931 Hatchery Nextflow (#2369) --- gen3/bin/kube-setup-hatchery.sh | 59 ++++++++++++++++++++++++++++++--- 1 file changed, 55 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index 07172aa1e..691fb354a 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -42,7 +42,7 @@ policy=$( cat < Date: Wed, 4 Oct 2023 14:44:16 -0500 Subject: [PATCH 217/362] Updated instance types to latest/greatest and removed consolidation (#2364) * Updated instance types to latest/greatest and removed consolidation * Updated instance types to latest/greatest and removed consolidation --------- Co-authored-by: Edward Malinowski --- kube/services/argo-events/workflows/configmap.yaml | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index 62fcbbfb3..50f04583f 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -18,11 +18,12 @@ data: operator: In values: - amd64 - - key: karpenter.k8s.aws/instance-category + - key: karpenter.k8s.aws/instance-family operator: In values: - - c - - t + - c6i + - c7i + - m7i taints: - key: role value: $WORKFLOW_NAME @@ -34,9 +35,6 @@ data: cpu: 2000 providerRef: name: workflow-$WORKFLOW_NAME - # Allow pods to be rearranged - consolidation: - enabled: true # Kill nodes after 30 days to ensure they stay up to date ttlSecondsUntilExpired: 2592000 ttlSecondsAfterEmpty: 10 @@ -98,7 +96,7 @@ data: blockDeviceMappings: - deviceName: /dev/xvda ebs: - volumeSize: 50Gi + volumeSize: 100Gi volumeType: gp2 encrypted: true deleteOnTermination: true From 220788e9a313c1800a2f624426e54a1232d89ff7 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Thu, 5 Oct 2023 12:18:50 -0400 Subject: [PATCH 218/362] Tagging nodes by their purpose, so if we want to monitor them, we can filter nicely (#2374) --- kube/services/argo-events/workflows/configmap.yaml | 1 + kube/services/karpenter/nodeTemplateDefault.yaml | 1 + kube/services/karpenter/nodeTemplateGPU.yaml | 1 + kube/services/karpenter/nodeTemplateJupyter.yaml | 1 + kube/services/karpenter/nodeTemplateWorkflow.yaml | 1 + 5 files changed, 5 insertions(+) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index 50f04583f..eb5f1b04f 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -56,6 +56,7 @@ data: workflowname: $WORKFLOW_NAME gen3username: $GEN3_USERNAME gen3service: argo-workflows + purpose: workflow metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml index 20198944f..a3dbf6480 100644 --- a/kube/services/karpenter/nodeTemplateDefault.yaml +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -11,6 +11,7 @@ spec: karpenter.sh/discovery: VPC_NAME Environment: VPC_NAME Name: eks-VPC_NAME-karpenter + purpose: default metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml index a6ca7bbc8..5270b697f 100644 --- a/kube/services/karpenter/nodeTemplateGPU.yaml +++ b/kube/services/karpenter/nodeTemplateGPU.yaml @@ -11,6 +11,7 @@ spec: Environment: VPC_NAME Name: eks-VPC_NAME-gpu-karpenter karpenter.sh/discovery: VPC_NAME + purpose: gpu metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml index ad72d3dd6..74f24926a 100644 --- a/kube/services/karpenter/nodeTemplateJupyter.yaml +++ b/kube/services/karpenter/nodeTemplateJupyter.yaml @@ -11,6 +11,7 @@ spec: Environment: VPC_NAME Name: eks-VPC_NAME-jupyter-karpenter karpenter.sh/discovery: VPC_NAME + purpose: jupyter metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml index 565d06f7c..ec2b81a60 100644 --- a/kube/services/karpenter/nodeTemplateWorkflow.yaml +++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml @@ -11,6 +11,7 @@ spec: Environment: VPC_NAME Name: eks-VPC_NAME-workflow-karpenter karpenter.sh/discovery: VPC_NAME + purpose: workflow metadataOptions: httpEndpoint: enabled httpProtocolIPv6: disabled From a10e48230876715011026f8a00c4914ec3dbbe69 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Tue, 10 Oct 2023 17:16:34 -0400 Subject: [PATCH 219/362] Feat/node monitor (#2379) * AWS tags are nice, but we need k8s labels * This should get the node-monitor cronjob set up * Need to actually do these right * Getting config via env vars and configmaps now * That was dumb anyway * Switching to awshelper * Putting a sleep so I can check these * I don't think the SA was the problem, but I've got nothing better * Removing the sleep. IDK what's happening * Adding a label to let us do stuff within the bounds of netpolicy * We'll also log if we find a node running too long * Added a sync policy * There was some extra stuff we don't want * Fixing the branch that we point to --- kube/services/node-monitor/application.yaml | 22 ++++++++ kube/services/node-monitor/auth.yaml | 18 +++++++ kube/services/node-monitor/cronjob.yaml | 58 +++++++++++++++++++++ 3 files changed, 98 insertions(+) create mode 100644 kube/services/node-monitor/application.yaml create mode 100644 kube/services/node-monitor/auth.yaml create mode 100644 kube/services/node-monitor/cronjob.yaml diff --git a/kube/services/node-monitor/application.yaml b/kube/services/node-monitor/application.yaml new file mode 100644 index 000000000..df41c34b9 --- /dev/null +++ b/kube/services/node-monitor/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: node-monitor-application + namespace: argocd +spec: + destination: + namespace: default + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/node-monitor + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/node-monitor/auth.yaml b/kube/services/node-monitor/auth.yaml new file mode 100644 index 000000000..72560cddc --- /dev/null +++ b/kube/services/node-monitor/auth.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: node-monitor + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: node-monitor-binding +subjects: + - kind: ServiceAccount + name: node-monitor + namespace: default +roleRef: + kind: ClusterRole + name: system:node + apiGroup: rbac.authorization.k8s.io diff --git a/kube/services/node-monitor/cronjob.yaml b/kube/services/node-monitor/cronjob.yaml new file mode 100644 index 000000000..e53046280 --- /dev/null +++ b/kube/services/node-monitor/cronjob.yaml @@ -0,0 +1,58 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: node-monitor-cron + namespace: default +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: node-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + # This is the label we want to monitor, probably will never need to change + - name: NODE_LABEL + value: purpose=workflow + # This is 3 * 3600, or 3 hours + - name: THRESHOLD_TIME + value: "10800" + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + # Get all nodes with specific label and check their age + kubectl get nodes -l "$NODE_LABEL" -o json | jq -c '.items[] | {name: .metadata.name, creationTimestamp: .metadata.creationTimestamp}' | while read node_info; do + NODE_NAME=$(echo $node_info | jq -r '.name') + CREATION_TIMESTAMP=$(echo $node_info | jq -r '.creationTimestamp') + + # Convert creation timestamp to Unix Epoch time + CREATION_EPOCH=$(date -d "$CREATION_TIMESTAMP" +%s) + + # Get current Unix Epoch time + CURRENT_EPOCH=$(date +%s) + + # Calculate node age in seconds + NODE_AGE=$(($CURRENT_EPOCH - $CREATION_EPOCH)) + + # Check if node age is greater than threshold + if [ "$NODE_AGE" -gt "$THRESHOLD_TIME" ]; then + echo "Node $NODE_NAME has been around too long, sending an alert" + # Send alert to Slack + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODE_NAME}\` is older than 3 hours!\"}" $SLACK_WEBHOOK_URL + fi + done + restartPolicy: OnFailure From 9d7e5d43b7401fb7ead2445e3272234d02accba4 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Wed, 11 Oct 2023 12:50:35 -0500 Subject: [PATCH 220/362] fix: skip study if it is neither registered nor unregistered (#2381) --- files/scripts/healdata/heal-cedar-data-ingest.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 2f8273851..e6634a70a 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -169,6 +169,9 @@ def update_filter_metadata(metadata_to_update): print("Metadata is already registered. Updating MDS record") elif mds_res["_guid_type"] == "unregistered_discovery_metadata": print("Metadata has not been registered. Registering it in MDS record") + else: + print(f"This metadata data record has a special GUID type \"{mds_res['_guid_type']}\" and will be skipped") + continue if "clinicaltrials_gov" in cedar_record: mds_clinical_trials = cedar_record["clinicaltrials_gov"] From cac39461be73852965836bd84a19e6ec37ea5cb9 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Thu, 12 Oct 2023 08:58:55 -0700 Subject: [PATCH 221/362] Skip setup_secrets in CI runs (#2382) --- gen3/bin/kube-setup-cohort-middleware.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/gen3/bin/kube-setup-cohort-middleware.sh b/gen3/bin/kube-setup-cohort-middleware.sh index 477de064c..a6a024578 100644 --- a/gen3/bin/kube-setup-cohort-middleware.sh +++ b/gen3/bin/kube-setup-cohort-middleware.sh @@ -7,6 +7,10 @@ gen3_load "gen3/lib/kube-setup-init" setup_secrets() { gen3_log_info "Deploying secrets for cohort-middleware" # subshell + if [[ -n "$JENKINS_HOME" ]]; then + gen3_log_err "skipping secrets setup in non-adminvm environment" + return 0 + fi ( if ! dbcreds="$(gen3 db creds ohdsi)"; then From 140e79feb7b62cb4299e3c96f2e0b002b0e11cd5 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 16 Oct 2023 15:00:24 -0600 Subject: [PATCH 222/362] adding logic to exit a gitops sync if the new json contains invalid json (#2377) --- gen3/bin/gitops.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/gen3/bin/gitops.sh b/gen3/bin/gitops.sh index 48ba6512c..fda6d4ffa 100644 --- a/gen3/bin/gitops.sh +++ b/gen3/bin/gitops.sh @@ -291,9 +291,15 @@ gen3_gitops_sync() { if g3kubectl get configmap manifest-versions; then oldJson=$(g3kubectl get configmap manifest-versions -o=json | jq ".data") fi - newJson=$(g3k_config_lookup ".versions") echo "old JSON is: $oldJson" - echo "new JSON is: $newJson" + newJson=$(g3k_config_lookup ".versions") + # Make sure the script exits if newJSON contains invalid JSON + if [ $? -ne 0 ]; then + echo "Error: g3k_config_lookup command failed- invalid JSON" + exit 1 + else + echo "new JSON is: $newJson" + fi if [[ -z $newJson ]]; then echo "Manifest does not have versions section. Unable to get new versions, skipping version update." elif [[ -z $oldJson ]]; then From 265d53c94a218189910d36fac015c148a96a2702 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 17 Oct 2023 13:31:01 -0600 Subject: [PATCH 223/362] Gen3 Command to Create a New ES7 Cluster with the Same ES6 Cluster Settings (#2389) * created a gen3 command to create a new elasticsearch cluster based on the existing cluster. * updating script to check if new cluster exists before triggering the create command. Also, increased the sleep time to allow the cluster to come up. --- gen3/bin/create-es7-cluster.sh | 63 ++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) create mode 100644 gen3/bin/create-es7-cluster.sh diff --git a/gen3/bin/create-es7-cluster.sh b/gen3/bin/create-es7-cluster.sh new file mode 100644 index 000000000..d18c4203f --- /dev/null +++ b/gen3/bin/create-es7-cluster.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +# Save the new and old cluster names to vars +environment=`gen3 api environment` +existing_cluster_name="$environment-gen3-metadata" +new_cluster_name="$environment-gen3-metadata-2" + +# Gather existing cluster information +cluster_info=$(aws es describe-elasticsearch-domain --domain-name "$existing_cluster_name") + +# Extract relevant information from the existing cluster +instance_type=`echo "$cluster_info" | jq -r '.DomainStatus.ElasticsearchClusterConfig.InstanceType'` +instance_count=`echo "$cluster_info" | jq -r '.DomainStatus.ElasticsearchClusterConfig.InstanceCount'` +volume_type=`echo "$cluster_info" | jq -r '.DomainStatus.EBSOptions.VolumeType'` +volume_size=`echo "$cluster_info" | jq -r '.DomainStatus.EBSOptions.VolumeSize'` +vpc_name=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.VPCId'` +subnet_ids=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.SubnetIds[]'` +security_groups=`echo "$cluster_info" | jq -r '.DomainStatus.VPCOptions.SecurityGroupIds[]'` +access_policies=`echo "$cluster_info" | jq -r '.DomainStatus.AccessPolicies'` +kms_key_id=`echo "$cluster_info" | jq -r '.DomainStatus.EncryptionAtRestOptions.KmsKeyId'` + +# Check if the new Elasticsearch cluster name already exists +new_cluster=`aws es describe-elasticsearch-domain --domain-name "$new_cluster_name"` + +if [ -n "$new_cluster" ]; then + echo "Cluster $new_cluster_name already exists" +else + echo "Cluster does not exist- creating..." + # Create the new Elasticsearch cluster + aws es create-elasticsearch-domain \ + --domain-name "$new_cluster_name" \ + --elasticsearch-version "7.10" \ + --elasticsearch-cluster-config \ + "InstanceType=$instance_type,InstanceCount=$instance_count" \ + --ebs-options \ + "EBSEnabled=true,VolumeType=$volume_type,VolumeSize=$volume_size" \ + --vpc-options "SubnetIds=${subnet_ids[*]},SecurityGroupIds=${security_groups[*]}" \ + --access-policies "$access_policies" \ + --encryption-at-rest-options "Enabled=true,KmsKeyId=$kms_key_id"\ + > /dev/null 2>&1 + + # Wait for the new cluster to be available + sleep_duration=60 + max_retries=10 + retry_count=0 + + while [ $retry_count -lt $max_retries ]; do + cluster_status=$(aws es describe-elasticsearch-domain --domain-name "$new_cluster_name" | jq -r '.DomainStatus.Processing') + if [ "$cluster_status" != "true" ]; then + echo "New cluster is available." + break + fi + sleep $sleep_duration + ((retry_count++)) + done + + if [ $retry_count -eq $max_retries ]; then + echo "New cluster creation may still be in progress. Please check the AWS Management Console for the status." + fi +fi \ No newline at end of file From a35d14df23ec4f06b73e8b544e03261dfd978e99 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 18 Oct 2023 08:50:35 -0600 Subject: [PATCH 224/362] Argo Changes to Allow Workflow Runs From All NS (#2360) * making changes to allow argo workflows to run from any namespace. In this case, we need this functionality to be able to run the "usersync" argo workflow job and usersync must run in the namespace in question and not in the "argo" namespace. * adding in the ability to pass in a flag to modify the trust policy and allow for multiple namespace to use a role. * modifying the awsrole script to ensure the "flag" gets passed in properly to all functions. Also, adding in the "--flag" option in the kube-setup-argo script and removing the rolebindingcommand for the argo SA as it is no longer needed. * adding the workflow template and workflow cron for the fence usersync job. * fixing a syntax error in awsrole and remove default namespace logic from kube-setup-argo as roles are cluster based now. * removing this line as it is repetative * removing the service account name * granting default sa permissions in each namespace * making changes to grant the "argo" service account permissions in other namespaces while still allowing the "default" service account admin access in the argo namespace. * adding the "argo" service account to the usersync workflow templates as usersync must be run in the same namespace as fence. * fixing syntax error * fixing syntax error * modifying the flag that is used to pass the "all_namespaces" var and adding a line to create the argo sa * explicitly setting "flag" for troubleshooting and modifying the al namespaces trust policy so the same key is not used. * testing a different method * correcting the multiple namespace policy * correcting the rolebinding for the default sa in the argo namespace * changing the schedule for fence usersync cron and changing the "kind" to "cronworkflow" --- .secrets.baseline | 2 +- gen3/bin/awsrole.sh | 61 ++++- gen3/bin/kube-setup-argo.sh | 107 +++----- .../argo/workflows/fence-usersync-cron.yaml | 10 + .../argo/workflows/fence-usersync-wf.yaml | 257 ++++++++++++++++++ 5 files changed, 367 insertions(+), 70 deletions(-) create mode 100644 kube/services/argo/workflows/fence-usersync-cron.yaml create mode 100644 kube/services/argo/workflows/fence-usersync-wf.yaml diff --git a/.secrets.baseline b/.secrets.baseline index 16475e0b2..919833990 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -342,7 +342,7 @@ "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", "is_secret": false, "is_verified": false, - "line_number": 217, + "line_number": 191, "type": "Secret Keyword" } ], diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh index 476e7d003..144b7a4fe 100644 --- a/gen3/bin/awsrole.sh +++ b/gen3/bin/awsrole.sh @@ -20,6 +20,7 @@ gen3_awsrole_help() { # NOTE: service-account to role is 1 to 1 # # @param serviceAccount to link to the role +# @param flag (optional) - specify a flag to use a different trust policy # function gen3_awsrole_ar_policy() { local serviceAccount="$1" @@ -32,6 +33,9 @@ function gen3_awsrole_ar_policy() { local issuer_url local account_id local vpc_name + shift || return 1 + local flag=$1 + vpc_name="$(gen3 api environment)" || return 1 issuer_url="$(aws eks describe-cluster \ --name ${vpc_name} \ @@ -42,7 +46,42 @@ function gen3_awsrole_ar_policy() { local provider_arn="arn:aws:iam::${account_id}:oidc-provider/${issuer_url}" - cat - < config.tfvars @@ -199,6 +247,13 @@ EOF gen3_log_err $errMsg return 1 fi + shift || return 1 + local flag="" + # Check if the "all_namespaces" flag is provided + if [[ "$1" == "-f" || "$1" == "--flag" ]]; then + flag="$2" + shift 2 + fi # check if the name is already used by another entity local entity_type @@ -216,7 +271,7 @@ EOF fi TF_IN_AUTOMATION="true" - if ! _tfplan_role $rolename $saName $namespace; then + if ! _tfplan_role $rolename $saName $namespace -f $flag; then return 1 fi if ! _tfapply_role $rolename; then diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index c7243d3da..ff2438833 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -28,7 +28,10 @@ function setup_argo_buckets { # try to come up with a unique but composable bucket name bucketName="gen3-argo-${accountNumber}-${environment//_/-}" - userName="gen3-argo-${environment//_/-}-user" + nameSpace="$(gen3 db namespace)" + roleName="gen3-argo-${environment//_/-}-role" + bucketPolicy="argo-bucket-policy-${nameSpace}" + internalBucketPolicy="argo-internal-bucket-policy-${nameSpace}" if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."s3-bucket"') ]]; then if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" @@ -114,70 +117,41 @@ EOF ] } EOF - if ! secret="$(g3kubectl get secret argo-s3-creds -n argo 2> /dev/null)"; then - gen3_log_info "setting up bucket $bucketName" - - if aws s3 ls --page-size 1 "s3://${bucketName}" > /dev/null 2>&1; then - gen3_log_info "${bucketName} s3 bucket already exists" - # continue on ... - elif ! aws s3 mb "s3://${bucketName}"; then - gen3_log_err "failed to create bucket ${bucketName}" - fi - - gen3_log_info "Creating IAM user ${userName}" - if ! aws iam get-user --user-name ${userName} > /dev/null 2>&1; then - aws iam create-user --user-name ${userName} || true - else - gen3_log_info "IAM user ${userName} already exits.." - fi - - secret=$(aws iam create-access-key --user-name ${userName}) - if ! g3kubectl get namespace argo > /dev/null 2>&1; then - gen3_log_info "Creating argo namespace" - g3kubectl create namespace argo || true - g3kubectl label namespace argo app=argo || true - g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n argo || true - fi - else - # Else we want to recreate the argo-s3-creds secret so make a temp file with the current creds and delete argo-s3-creds secret - gen3_log_info "Argo S3 setup already completed" - local secretFile="$XDG_RUNTIME_DIR/temp_key_file_$$.json" - cat > "$secretFile" < /dev/null 2>&1; then + gen3_log_info "${bucketName} s3 bucket already exists" + # continue on ... + elif ! aws s3 mb "s3://${bucketName}"; then + gen3_log_err "failed to create bucket ${bucketName}" fi - - gen3_log_info "Creating s3 creds secret in argo namespace" - if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then - if [[ -z $internalBucketName ]]; then - g3kubectl delete secret -n argo argo-s3-creds || true - g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true - else - g3kubectl delete secret -n argo argo-s3-creds || true - g3kubectl create secret -n argo generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} || true - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true - fi + if ! g3kubectl get namespace argo > /dev/null 2>&1; then + gen3_log_info "Creating argo namespace" + g3kubectl create namespace argo || true + g3kubectl label namespace argo app=argo || true + # Grant admin access within the argo namespace to the default SA in the argo namespace + g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n argo || true + fi + gen3_log_info "Creating IAM role ${roleName}" + if aws iam get-role --role-name "${roleName}" > /dev/null 2>&1; then + gen3_log_info "IAM role ${roleName} already exists.." + roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) + gen3_log_info "Role annotate" + g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n argo + g3kubectl annotate serviceaccount argo eks.amazonaws.com/role-arn=${roleArn} -n $nameSpace else - g3kubectl create sa argo || true - # Grant admin access within the current namespace to the argo SA in the current namespace - g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=$(gen3 db namespace):argo -n $(gen3 db namespace) || true - aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile || true - if [[ -z $internalBucketName ]]; then - aws iam put-user-policy --user-name ${userName} --policy-name argo-internal-bucket-policy --policy-document file://$internalBucketPolicyFile || true - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} || true - else - g3kubectl create secret generic argo-s3-creds --from-literal=AccessKeyId=$(echo $secret | jq -r .AccessKey.AccessKeyId) --from-literal=SecretAccessKey=$(echo $secret | jq -r .AccessKey.SecretAccessKey) --from-literal=bucketname=${bucketName} --from-literal=internalbucketname=${internalBucketName} || true - - fi + gen3 awsrole create $roleName argo $nameSpace -f all_namespaces + roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) + g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n argo fi + # Grant admin access within the current namespace to the argo SA in the current namespace + g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=$nameSpace:argo -n $nameSpace || true + aws iam put-role-policy --role-name ${roleName} --policy-name ${bucketPolicy} --policy-document file://$policyFile || true + if [[ -z $internalBucketName ]]; then + aws iam put-role-policy --role-name ${roleName} --policy-name ${internalBucketPolicy} --policy-document file://$internalBucketPolicyFile || true + fi ## if new bucket then do the following # Get the aws keys from secret @@ -189,9 +163,9 @@ EOF aws s3api put-bucket-lifecycle --bucket ${bucketName} --lifecycle-configuration file://$bucketLifecyclePolicyFile # Always update the policy, in case manifest buckets change - aws iam put-user-policy --user-name ${userName} --policy-name argo-bucket-policy --policy-document file://$policyFile + aws iam put-role-policy --role-name ${roleName} --policy-name ${bucketPolicy} --policy-document file://$policyFile if [[ ! -z $internalBucketPolicyFile ]]; then - aws iam put-user-policy --user-name ${userName} --policy-name argo-internal-bucket-policy --policy-document file://$internalBucketPolicyFile + aws iam put-role-policy --role-name ${roleName} --policy-name ${internalBucketPolicy} --policy-document file://$internalBucketPolicyFile fi if [[ ! -z $(g3k_config_lookup '.indexd_admin_user' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo.indexd_admin_user') ]]; then if [[ ! -z $(g3k_config_lookup '.indexd_admin_user' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then @@ -231,11 +205,12 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then if (! helm status argo -n argo > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then DBHOST=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_host | base64 -d) DBNAME=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_database | base64 -d) - if [[ -z $(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.internalbucketname | base64 -d) ]]; then - BUCKET=$(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.bucketname | base64 -d) + if [[ -z $internalBucketName ]]; then + BUCKET=$bucketName else - BUCKET=$(kubectl get secrets -n argo argo-s3-creds -o json | jq -r .data.internalbucketname | base64 -d) + BUCKET=$internalBucketName fi + valuesFile="$XDG_RUNTIME_DIR/values_$$.yaml" valuesTemplate="${GEN3_HOME}/kube/services/argo/values.yaml" diff --git a/kube/services/argo/workflows/fence-usersync-cron.yaml b/kube/services/argo/workflows/fence-usersync-cron.yaml new file mode 100644 index 000000000..4723ce10f --- /dev/null +++ b/kube/services/argo/workflows/fence-usersync-cron.yaml @@ -0,0 +1,10 @@ +apiVersion: argoproj.io/v1alpha1 +kind: CronWorkflow +metadata: + name: fence-usersync-cron +spec: + serviceAccountName: argo + schedule: "*/30 * * * *" + workflowSpec: + workflowTemplateRef: + name: fence-usersync-workflow diff --git a/kube/services/argo/workflows/fence-usersync-wf.yaml b/kube/services/argo/workflows/fence-usersync-wf.yaml new file mode 100644 index 000000000..d7f56a2ce --- /dev/null +++ b/kube/services/argo/workflows/fence-usersync-wf.yaml @@ -0,0 +1,257 @@ +apiVersion: argoproj.io/v1alpha1 +kind: WorkflowTemplate +metadata: + name: fence-usersync-workflow +spec: + volumeClaimTemplates: + - metadata: + name: shared-data + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi + serviceAccountName: argo + entrypoint: fence-usersync + arguments: + parameters: + - name: ADD_DBGAP + value: "false" + - name: ONLY_DBGAP + value: "false" + templates: + - name: fence-usersync + steps: + - - name: wait-for-fence + template: wait-for-fence + - - name: awshelper + template: awshelper + - - name: usersyncer + template: usersyncer + + - name: wait-for-fence + container: + image: curlimages/curl:latest + command: ["/bin/sh","-c"] + args: ["while [ $(curl -sw '%{http_code}' http://fence-service -o /dev/null) -ne 200 ]; do sleep 5; echo 'Waiting for fence...'; done"] + + - name: awshelper + container: + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + securityContext: + runAsUser: 0 + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: hostname + - name: userYamlS3Path + valueFrom: + configMapKeyRef: + name: manifest-global + key: useryaml_s3path + - name: slackWebHook + value: None + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: + - "-c" + - | + GEN3_HOME=/home/ubuntu/cloud-automation + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + + if [ "${userYamlS3Path}" = 'none' ]; then + # echo "using local user.yaml" + # cp /var/www/fence/user.yaml /mnt/shared/user.yaml + echo "s3 yaml not provided - bailing out" + exit 1 + else + # ----------------- + echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/user.yaml" + n=0 + until [ $n -ge 5 ]; do + echo "Download attempt $n" + aws s3 cp "${userYamlS3Path}" /mnt/shared/user.yaml && break + n=$[$n+1] + sleep 2 + done + fi + if [[ ! -f /mnt/shared/user.yaml ]]; then + echo "awshelper failed to retrieve /mnt/shared/user.yaml" + exit 1 + fi + #----------- + echo "awshelper updating etl configmap" + if ! gen3 gitops etl-convert < /mnt/shared/user.yaml > /tmp/user.yaml; then + echo "ERROR: failed to generate ETL config" + exit 1 + fi + # kubectl delete configmap fence > /dev/null 2>&1 + # kubectl create configmap fence --from-file=/tmp/user.yaml + if [ "${slackWebHook}" != 'None' ]; then + curl -X POST --data-urlencode "payload={\"text\": \"AWSHelper: Syncing users on ${gen3Env}\"}" "${slackWebHook}" + fi + echo "Helper exit ok" + + - name: usersyncer + volumes: + - name: yaml-merge + configMap: + name: "fence-yaml-merge" + - name: config-volume + secret: + secretName: "fence-config" + - name: creds-volume + secret: + secretName: "fence-creds" + - name: fence-google-app-creds-secret-volume + secret: + secretName: "fence-google-app-creds-secret" + - name: fence-google-storage-creds-secret-volume + secret: + secretName: "fence-google-storage-creds-secret" + - name: fence-ssh-keys + secret: + secretName: "fence-ssh-keys" + defaultMode: 0400 + - name: fence-sshconfig + configMap: + name: "fence-sshconfig" + - name: projects + configMap: + name: "projects" + container: + image: quay.io/cdis/fence:master + imagePullPolicy: Always + env: + - name: PYTHONPATH + value: /var/www/fence + - name: SYNC_FROM_DBGAP + valueFrom: + configMapKeyRef: + name: manifest-global + key: sync_from_dbgap + - name: ADD_DBGAP + value: "{{workflow.parameters.ADD_DBGAP}}" + - name: ONLY_DBGAP + value: "{{workflow.parameters.ONLY_DBGAP}}" + - name: SLACK_SEND_DBGAP + valueFrom: + configMapKeyRef: + name: manifest-global + key: slack_send_dbgap + optional: true + - name: slackWebHook + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + optional: true + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: hostname + - name: FENCE_PUBLIC_CONFIG + valueFrom: + configMapKeyRef: + name: manifest-fence + key: fence-config-public.yaml + optional: true + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + - name: "config-volume" + readOnly: true + mountPath: "/var/www/fence/fence-config.yaml" + subPath: fence-config.yaml + - name: "creds-volume" + readOnly: true + mountPath: "/var/www/fence/creds.json" + - name: "yaml-merge" + readOnly: true + mountPath: "/var/www/fence/yaml_merge.py" + - name: "fence-google-app-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_app_creds_secret.json" + subPath: fence_google_app_creds_secret.json + - name: "fence-google-storage-creds-secret-volume" + readOnly: true + mountPath: "/var/www/fence/fence_google_storage_creds_secret.json" + subPath: fence_google_storage_creds_secret.json + - name: "fence-ssh-keys" + mountPath: "/root/.ssh/id_rsa" + subPath: "id_rsa" + - name: "fence-ssh-keys" + mountPath: "/root/.ssh/id_rsa.pub" + subPath: "id_rsa.pub" + - name: "fence-sshconfig" + mountPath: "/root/.ssh/config" + subPath: "config" + - name: "projects" + mountPath: "/var/www/fence/projects.yaml" + subPath: "projects.yaml" + command: ["/bin/bash"] + args: + - "-c" + # Script always succeeds if it runs (echo exits with 0) + - | + echo "${ADD_DBGAP}" + echo "${ONLY_DBGAP}" + echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" + python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml + echo 'options use-vc' >> /etc/resolv.conf + let count=0 + while [[ ! -f /mnt/shared/user.yaml && $count -lt 50 ]]; do + echo "fence container waiting for /mnt/shared/user.yaml"; + sleep 2 + let count=$count+1 + done + if [[ "$SYNC_FROM_DBGAP" != True && "$ADD_DBGAP" != "true" ]]; then + if [[ -f /mnt/shared/user.yaml ]]; then + echo "running fence-create" + time fence-create sync --arborist http://arborist-service --yaml /mnt/shared/user.yaml + else + echo "/mnt/shared/user.yaml did not appear within timeout :-(" + false # non-zero exit code + fi + exitcode=$? + else + output=$(mktemp "/tmp/fence-create-output_XXXXXX") + if [[ -f /mnt/shared/user.yaml && "$ONLY_DBGAP" != "true" ]]; then + echo "Running fence-create dbgap-sync with user.yaml - see $output" + time fence-create sync --arborist http://arborist-service --sync_from_dbgap "True" --projects /var/www/fence/projects.yaml --yaml /mnt/shared/user.yaml 2>&1 | tee "$output" + else + echo "Running fence-create dbgap-sync without user.yaml - see $output" + time fence-create sync --arborist http://arborist-service --sync_from_dbgap "True" --projects /var/www/fence/projects.yaml 2>&1 | tee "$output" + fi + exitcode="${PIPESTATUS[0]}" + echo "$output" + # Echo what files we are seeing on dbgap ftp to Slack + # We only do this step every 12 hours and not on weekends to reduce noise + if [[ -n "$SLACK_SEND_DBGAP" && "$SLACK_SEND_DBGAP" = True ]]; then + files=$(grep "Reading file" "$output") + let hour=$(date -u +10#%H) + let dow=$(date -u +10#%u) + if ! (( hour % 12 )) && (( dow < 6 )); then + if [ "${slackWebHook}" != 'None' ]; then + curl -X POST --data-urlencode "payload={\"text\": \"FenceHelper: \n\`\`\`\n${files}\n\`\`\`\"}" "${slackWebHook}" + fi + fi + fi + fi + if [[ $exitcode -ne 0 && "${slackWebHook}" != 'None' ]]; then + emptyfile=$(grep "EnvironmentError:" "$output") + if [ ! -z "$emptyfile" ]; then + curl -X POST --data-urlencode "payload={\"text\": \"JOBSKIPPED: User sync skipped on ${gen3Env} ${emptyfile}\"}" "${slackWebHook}"; + else + curl -X POST --data-urlencode "payload={\"text\": \"JOBFAIL: User sync failed on ${gen3Env}\"}" "${slackWebHook}" + fi + fi + echo "Exit code: $exitcode" + exit "$exitcode" \ No newline at end of file From 58bc19584e0b5bd3375275b1125f700f4a38d08d Mon Sep 17 00:00:00 2001 From: George Thomas <98996322+george42-ctds@users.noreply.github.com> Date: Thu, 19 Oct 2023 08:25:34 -0700 Subject: [PATCH 225/362] HP-1244 Chore/create cron job for cedar (#2387) * (HP-1244): update cedar script to use fence-client credentials * (HP-1244): update cedar job to get parameters from g3auto secrets * (HP-1233): remove comment line * (HP-1244): store cedar_client creds in environment variable * (HP-1244): update argument names passed to python script --- .../healdata/heal-cedar-data-ingest.py | 33 ++- kube/services/jobs/cedar-ingestion-job.yaml | 224 +++++++----------- 2 files changed, 114 insertions(+), 143 deletions(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index e6634a70a..e95ab8604 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -91,10 +91,29 @@ def update_filter_metadata(metadata_to_update): metadata_to_update["tags"] = tags return metadata_to_update + +def get_client_token(client_id: str, client_secret: str): + try: + token_url = f"http://revproxy-service/user/oauth2/token" + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + params = {'grant_type': 'client_credentials'} + data = 'scope=openid user data' + + token_result = requests.post( + token_url, params=params, headers=headers, data=data, + auth=(client_id, client_secret), + ) + token = token_result.json()["access_token"] + except: + raise Exception("Could not get token") + return token + + parser = argparse.ArgumentParser() parser.add_argument("--directory", help="CEDAR Directory ID for registering ") -parser.add_argument("--access_token", help="User access token") +parser.add_argument("--cedar_client_id", help="The CEDAR client id") +parser.add_argument("--cedar_client_secret", help="The CEDAR client secret") parser.add_argument("--hostname", help="Hostname") @@ -103,17 +122,23 @@ def update_filter_metadata(metadata_to_update): if not args.directory: print("Directory ID is required!") sys.exit(1) -if not args.access_token: - print("User access token is required!") +if not args.cedar_client_id: + print("CEDAR client id is required!") + sys.exit(1) +if not args.cedar_client_secret: + print("CEDAR client secret is required!") sys.exit(1) if not args.hostname: print("Hostname is required!") sys.exit(1) dir_id = args.directory -access_token = args.access_token +client_id = args.cedar_client_id +client_secret = args.cedar_client_secret hostname = args.hostname +print("Getting CEDAR client access token") +access_token = get_client_token(client_id, client_secret) token_header = {"Authorization": 'bearer ' + access_token} limit = 10 diff --git a/kube/services/jobs/cedar-ingestion-job.yaml b/kube/services/jobs/cedar-ingestion-job.yaml index ecc83335c..f6be4dd23 100644 --- a/kube/services/jobs/cedar-ingestion-job.yaml +++ b/kube/services/jobs/cedar-ingestion-job.yaml @@ -1,19 +1,22 @@ # # run with: -# gen3 job run cedar-ingestion \ -# SUBMISSION_USER $submission_user \ -# CEDAR_DIRECTORY_ID $cedar_directory_id \ -# -# SUBMISSION_USER(optional) -# e-mail of user-account to submit the data to MDS, must have MDS admin and CEDAR polices granted. Default: "cdis.autotest@gmail.com" +# gen3 job run cedar-ingestion [CEDAR_DIRECTORY_ID $cedar_directory_id] # # CEDAR_DIRECTORY_ID -# ID of CEDAR directory where instances will be pulled from, only needs its UUID part. For example: "123e4567-e89b-12d3-a456-426614174000" +# The directory id will be read from 'directory_id.txt' in the +# 'cedar-g3auto' secret. +# You can override the secret value with an optional command line argument. +# # The deployed CEDAR wrapper services must be able to read from this directory. # -# Example -# gen3 job run cedar-ingestion CEDAR_DIRECTORY_ID 123e4567-e89b-12d3-a456-426614174000 SUBMISSION_USER cdis.autotest@gmail.com +# ACCESS TOKENS +# Access tokens will be generated for an existing fence-client, cedar_ingest_client. +# The client_id and client_secret will be read from +# 'cedar_client_credentials.json' in the 'cedar-g3auto' secret. +# +# The fence-client must have MDS admin and CEDAR polices granted. # + apiVersion: batch/v1 kind: Job metadata: @@ -44,36 +47,13 @@ spec: - ONDEMAND serviceAccountName: useryaml-job volumes: - - name: yaml-merge - configMap: - name: "fence-yaml-merge" - name: shared-data emptyDir: {} -# ----------------------------------------------------------------------------- -# DEPRECATED! Remove when all commons are no longer using local_settings.py -# for fence. -# ----------------------------------------------------------------------------- - - name: old-config-volume - secret: - secretName: "fence-secret" - - name: creds-volume - secret: - secretName: "fence-creds" - - name: config-helper - configMap: - name: config-helper - - name: json-secret-volume + - name: cedar-client-volume-g3auto secret: - secretName: "fence-json-secret" -# ----------------------------------------------------------------------------- - - name: config-volume - secret: - secretName: "fence-config" - - name: fence-jwt-keys - secret: - secretName: "fence-jwt-keys" - containers: - - name: awshelper + secretName: cedar-g3auto # the secret name in kube + initContainers: + - name: cedar image: quay.io/cdis/awshelper:master imagePullPolicy: Always ports: @@ -84,10 +64,18 @@ spec: configMapKeyRef: name: global key: hostname - - name: SUBMISSION_USER - GEN3_SUBMISSION_USER|-value: "cdis.autotest@gmail.com"-| - name: CEDAR_DIRECTORY_ID GEN3_CEDAR_DIRECTORY_ID|-value: ""-| + - name: CEDAR_DIRECTORY_ID_SECRET + valueFrom: + secretKeyRef: + name: cedar-g3auto + key: "directory_id.txt" + - name: CEDAR_CLIENT_CREDENTIALS + valueFrom: + secretKeyRef: + name: cedar-g3auto + key: "cedar_client_credentials.json" volumeMounts: - name: shared-data mountPath: /mnt/shared @@ -95,117 +83,75 @@ spec: limits: cpu: 1 memory: 5Gi + command: ["/bin/bash" ] args: - "-c" - | if [[ -z "$CEDAR_DIRECTORY_ID" ]]; then - echo -e "CEDAR_DIRECTORY_ID is required" 1>&2 - exit 1 + if [[ ! -z "$CEDAR_DIRECTORY_ID_SECRET" ]]; then + echo "CEDAR_DIRECTORY_ID is from g3auto secret" + export CEDAR_DIRECTORY_ID=$CEDAR_DIRECTORY_ID_SECRET + else + echo -e "ERROR: CEDAR_DIRECTORY_ID must be in secret or on command line" 1>&2 + exit 0 + fi + else + echo "CEDAR_DIRECTORY_ID is from command line parameter" + fi + + if [[ ! -z "$CEDAR_CLIENT_CREDENTIALS" ]]; then + export CEDAR_CLIENT_ID=$(echo $CEDAR_CLIENT_CREDENTIALS | jq -r .client_id) + export CEDAR_CLIENT_SECRET=$(echo $CEDAR_CLIENT_CREDENTIALS | jq -r .client_secret) + else + echo -e "Could not read cedar-client credentials" 1>&2 + exit 0 fi - let count=0 - while [[ ! -f /mnt/shared/access_token.txt && $count -lt 50 ]]; do - echo "Waiting for /mnt/shared/access_token.txt"; - sleep 2 - let count=$count+1 - done + pip install pydash export GEN3_HOME="$HOME/cloud-automation" - export ACCESS_TOKEN="$(cat /mnt/shared/access_token.txt)" - python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --access_token $ACCESS_TOKEN --directory $CEDAR_DIRECTORY_ID --hostname $HOSTNAME - echo "All done - exit status $?" - - name: fence - GEN3_FENCE_IMAGE - imagePullPolicy: Always - env: - - name: PYTHONPATH - value: /var/www/fence - - name: SUBMISSION_USER - GEN3_SUBMISSION_USER|-value: "cdis.autotest@gmail.com"-| - - name: TOKEN_EXPIRATION - value: "3600" - - name: FENCE_PUBLIC_CONFIG - valueFrom: - configMapKeyRef: - name: manifest-fence - key: fence-config-public.yaml - optional: true - volumeMounts: -# ----------------------------------------------------------------------------- -# DEPRECATED! Remove when all commons are no longer using local_settings.py -# for fence. -# ----------------------------------------------------------------------------- - - name: "old-config-volume" - readOnly: true - mountPath: "/var/www/fence/local_settings.py" - subPath: local_settings.py - - name: "creds-volume" - readOnly: true - mountPath: "/var/www/fence/creds.json" - subPath: creds.json - - name: "config-helper" - readOnly: true - mountPath: "/var/www/fence/config_helper.py" - subPath: config_helper.py - - name: "json-secret-volume" - readOnly: true - mountPath: "/var/www/fence/fence_credentials.json" - subPath: fence_credentials.json -# ----------------------------------------------------------------------------- - - name: "config-volume" - readOnly: true - mountPath: "/var/www/fence/fence-config-secret.yaml" - subPath: fence-config.yaml - - name: "yaml-merge" - readOnly: true - mountPath: "/var/www/fence/yaml_merge.py" - subPath: yaml_merge.py - - name: "fence-jwt-keys" - readOnly: true - mountPath: "/fence/jwt-keys.tar" - subPath: "jwt-keys.tar" - - name: shared-data - mountPath: /mnt/shared - command: ["/bin/bash" ] - args: + python ${GEN3_HOME}/files/scripts/healdata/heal-cedar-data-ingest.py --directory $CEDAR_DIRECTORY_ID --cedar_client_id $CEDAR_CLIENT_ID --cedar_client_secret $CEDAR_CLIENT_SECRET --hostname $HOSTNAME + status=$? + if [[ $status -ne 0 ]]; then + echo "WARNING: non zero exit code: $status" + else + echo "All done - exit code: $status" + touch /mnt/shared/success + fi + containers: + - name: awshelper + env: + - name: slackWebHook + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + - name: gen3Env + valueFrom: + configMapKeyRef: + name: manifest-global + key: hostname + GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| + volumeMounts: + - name: shared-data + mountPath: /mnt/shared + command: ["/bin/bash"] + args: - "-c" - | - echo "${FENCE_PUBLIC_CONFIG:-""}" > "/var/www/fence/fence-config-public.yaml" - python /var/www/fence/yaml_merge.py /var/www/fence/fence-config-public.yaml /var/www/fence/fence-config-secret.yaml > /var/www/fence/fence-config.yaml - if [ -f /fence/jwt-keys.tar ]; then - cd /fence - tar xvf jwt-keys.tar - if [ -d jwt-keys ]; then - mkdir -p keys - mv jwt-keys/* keys/ - fi + if [[ ! "$slackWebHook" =~ ^http ]]; then + echo "Slack webhook not set" + exit 0 fi - echo "generate access token" - echo "fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION" - tempFile="$(mktemp -p /tmp token.txt_XXXXXX)" - success=false - count=0 - sleepTime=10 - # retry loop - while [[ $count -lt 3 && $success == false ]]; do - if fence-create --path fence token-create --type access_token --username $SUBMISSION_USER --scopes openid,user,test-client --exp $TOKEN_EXPIRATION > "$tempFile"; then - echo "fence-create success!" - tail -1 "$tempFile" > /mnt/shared/access_token.txt - # base64 --decode complains about invalid characters - don't know why - awk -F . '{ print $2 }' /mnt/shared/access_token.txt | base64 --decode 2> /dev/null - success=true - else - echo "fence-create failed!" - cat "$tempFile" - echo "sleep for $sleepTime, then retry" - sleep "$sleepTime" - let sleepTime=$sleepTime+$sleepTime - fi - let count=$count+1 - done - if [[ $success != true ]]; then - echo "Giving up on fence-create after $count retries - failed to create valid access token" + if ! [ -f /mnt/shared/success ]; then + success="FAILED" + color="ff0000" + else + success="SUCCESS" + color="2EB67D" fi - echo "" - echo "All Done - always succeed to avoid k8s retries" + echo "Sending ${success} message to slack..." + payload="{\"attachments\": [{\"fallback\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"color\": \"#${color}\",\"title\": \"JOB ${success}: cedar-ingest cronjob on ${gen3Env}\",\"text\": \"Pod name: ${HOSTNAME}\",\"ts\": \"$(date +%s)\"}]}" + echo "Payload=${payload}" + curl -X POST --data-urlencode "payload=${payload}" "${slackWebHook}" restartPolicy: Never From 9fc74f3273e7118d15885fe433bd3b40fe413646 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 23 Oct 2023 15:39:56 -0500 Subject: [PATCH 226/362] Update values.yaml (#2390) --- kube/services/datadog/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index c7a56035c..ed592abf7 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -235,7 +235,7 @@ datadog: # timeout: 5 containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:pelican-export* name:job-task" - + containerExclude: "kube_namespace:logging kube_namespace:kube-system kube_namespace:kubecost kube_namespace:argo kube_namespace:cortex-xdr kube_namespace:monitoring" ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements ## the external metrics API so you can autoscale HPAs based on datadog metrics From 8cc37a319b751647038df82b3c392afbfcd77690 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 23 Oct 2023 16:52:52 -0500 Subject: [PATCH 227/362] Update values.yaml --- kube/services/datadog/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/datadog/values.yaml b/kube/services/datadog/values.yaml index ed592abf7..fc0bbab8b 100644 --- a/kube/services/datadog/values.yaml +++ b/kube/services/datadog/values.yaml @@ -235,7 +235,7 @@ datadog: # timeout: 5 containerExcludeLogs: "kube_namespace:logging kube_namespace:argo name:pelican-export* name:job-task" - containerExclude: "kube_namespace:logging kube_namespace:kube-system kube_namespace:kubecost kube_namespace:argo kube_namespace:cortex-xdr kube_namespace:monitoring" + containerExclude: "kube_namespace:logging kube_namespace:kube-system kube_namespace:kubecost kube_namespace:argo kube_namespace:cortex-xdr kube_namespace:monitoring kube_namespace:datadog" ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements ## the external metrics API so you can autoscale HPAs based on datadog metrics From 1e1a9b70387c21bbcd82e958e69f06a7e688c395 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Fri, 27 Oct 2023 09:28:38 -0500 Subject: [PATCH 228/362] Jenkins dockerfiles improvements (#2398) --- .github/workflows/image_build_push.yaml | 8 ++++---- .github/workflows/image_build_push_jenkins.yaml | 11 ++++++----- .github/workflows/image_build_push_squid.yaml | 5 +++-- .secrets.baseline | 4 ++-- Docker/jenkins/Jenkins-CI-Worker/Dockerfile | 7 +++---- Docker/jenkins/Jenkins-Worker/Dockerfile | 6 +----- kube/services/jobs/usersync-job.yaml | 2 +- 7 files changed, 20 insertions(+), 23 deletions(-) diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml index 51543f0fe..d5bfea351 100644 --- a/.github/workflows/image_build_push.yaml +++ b/.github/workflows/image_build_push.yaml @@ -1,10 +1,10 @@ -name: Build Python Base Images and Push to Quay and ECR +name: Build Python Base Images on: push jobs: python_3-9: - name: Python 3.9 Build and Push + name: Python 3.9 uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile" @@ -17,7 +17,7 @@ jobs: QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} python_3-10: - name: Python 3.10 Build and Push + name: Python 3.10 uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile" @@ -30,7 +30,7 @@ jobs: QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} awshelper: - name: AwsHelper Build and Push + name: AwsHelper uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile" diff --git a/.github/workflows/image_build_push_jenkins.yaml b/.github/workflows/image_build_push_jenkins.yaml index 2d85aedf1..094417fe5 100644 --- a/.github/workflows/image_build_push_jenkins.yaml +++ b/.github/workflows/image_build_push_jenkins.yaml @@ -1,13 +1,14 @@ -name: Build Jenkins images and push to Quay +name: Build Jenkins images on: push: paths: + - .github/workflows/image_build_push_jenkins.yaml - Docker/jenkins/** jobs: jenkins: - name: Jenkins Build and Push + name: Jenkins uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins/Dockerfile" @@ -21,7 +22,7 @@ jobs: QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} jenkins2: - name: Jenkins2 Build and Push + name: Jenkins2 uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins2/Dockerfile" @@ -35,7 +36,7 @@ jobs: QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} jenkins-ci-worker: - name: Jenkins-CI-Worker Build and Push + name: Jenkins-CI-Worker uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-CI-Worker/Dockerfile" @@ -49,7 +50,7 @@ jobs: QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} jenkins-qa-worker: - name: Jenkins-QA-Worker Build and Push + name: Jenkins-QA-Worker uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/jenkins/Jenkins-Worker/Dockerfile" diff --git a/.github/workflows/image_build_push_squid.yaml b/.github/workflows/image_build_push_squid.yaml index 2849f0cc5..ce1761d3c 100644 --- a/.github/workflows/image_build_push_squid.yaml +++ b/.github/workflows/image_build_push_squid.yaml @@ -1,13 +1,14 @@ -name: Build Squid images and push to Quay +name: Build Squid images on: push: paths: + - .github/workflows/image_build_push_squid.yaml - Docker/squid/** jobs: squid: - name: Squid Build and Push + name: Squid image uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/squid/Dockerfile" diff --git a/.secrets.baseline b/.secrets.baseline index 919833990..0a8fe9cc9 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-09-18T18:49:22Z", + "generated_at": "2023-10-26T21:32:44Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -79,7 +79,7 @@ "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", "is_secret": false, "is_verified": false, - "line_number": 122, + "line_number": 121, "type": "Secret Keyword" } ], diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index 40fd08fa3..f0da68f69 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -34,11 +34,10 @@ RUN set -xe && apt-get update \ zlib1g-dev \ zsh \ ca-certificates-java \ - openjdk-11-jre-headless \ && ln -s /usr/bin/lua5.3 /usr/local/bin/lua # Use jdk11 -ENV JAVA_HOME="/usr/lib/jvm/java-11-openjdk-amd64" +ENV JAVA_HOME="/opt/java/openjdk" ENV PATH="$JAVA_HOME/bin:$PATH" COPY ./certfix.sh /certfix.sh @@ -75,7 +74,7 @@ RUN sudo install -m 0755 -d /etc/apt/keyrings \ # install nodejs RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - -RUN apt-get update && apt-get install -y nodejs +RUN apt-get update && apt-get install -y nodejs npm # Install postgres 13 client RUN curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc| gpg --dearmor -o /etc/apt/trusted.gpg.d/postgresql.gpg && \ @@ -98,7 +97,7 @@ RUN sed -i 's/python3/python3.8/' /usr/bin/lsb_release && \ sed -i 's/python3/python3.8/' /usr/bin/add-apt-repository # install aws cli, poetry, pytest, etc. -RUN set -xe && python3.8 -m pip install --upgrade pip && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade +RUN set -xe && python3.8 -m pip install --upgrade pip setuptools && python3.8 -m pip install awscli --upgrade && python3.8 -m pip install pytest --upgrade && python3.8 -m pip install poetry && python3.8 -m pip install PyYAML --upgrade && python3.8 -m pip install lxml --upgrade && python3.8 -m pip install yq --upgrade && python3.8 -m pip install datadog --upgrade # install terraform RUN curl -o /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip \ diff --git a/Docker/jenkins/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile index c31e54923..c824690de 100644 --- a/Docker/jenkins/Jenkins-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-Worker/Dockerfile @@ -8,6 +8,7 @@ RUN set -xe && apt-get update && apt-get install -y apt-utils dnsutils build-ess RUN apt-get update \ && apt-get install -y lsb-release \ + git \ apt-transport-https \ r-base \ libffi-dev \ @@ -36,11 +37,6 @@ RUN apt-get update \ # install Ruby. RUN apt-get install -y ruby-full -# install GIT from buster-backports -RUN echo "deb http://deb.debian.org/debian buster-backports main" > /etc/apt/sources.list.d/buster-backports.list \ - && apt-get update \ - && apt-get -t=buster-backports -y install git=1:2.30.* - # # install docker tools: # diff --git a/kube/services/jobs/usersync-job.yaml b/kube/services/jobs/usersync-job.yaml index 8f148a3b0..8a5471a20 100644 --- a/kube/services/jobs/usersync-job.yaml +++ b/kube/services/jobs/usersync-job.yaml @@ -260,7 +260,7 @@ spec: exit 1 fi #----------------- - echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/useryaml"; + echo "awshelper downloading ${userYamlS3Path} to /mnt/shared/user.yaml"; n=0 until [ $n -ge 5 ]; do echo "Download attempt $n" From 3ff37b9775bb8d0d65c4303c4e6824072a9af9be Mon Sep 17 00:00:00 2001 From: pieterlukasse Date: Fri, 27 Oct 2023 21:05:42 +0200 Subject: [PATCH 229/362] Feat: add new config for integrating teamproject and arborist check (#2397) * feat: add new config for integrating teamproject and arborist check ...these properties are needed for enabling "teamproject" authorization mode in WebAPI and telling WebAPI where the Arborist endpoint is living for actually getting authorization information. * feat: add ARBORIST_URL as env var for WebAPI * fix: use ARBORIST_URL in config.yaml * feat: add default for ARBORIST_URL * revert default value for ARBORIST_URL ...the service will already default to arborist-service url if not defined --- kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml | 3 +++ kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml index 5cd46edd9..8eb01ec08 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-config.yaml @@ -55,6 +55,9 @@ stringData: security_oauth_callback_api: https://atlas.$hostname/WebAPI/user/oauth/callback security_oauth_callback_urlResolver: query + security_ohdsi_custom_authorization_mode: teamproject + security_ohdsi_custom_authorization_url: $ARBORIST_URL/auth/mapping + logging_level_root: info logging_level_org_ohdsi: info logging_level_org_apache_shiro: info diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 65d6ed38c..258aa8f87 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -59,6 +59,13 @@ spec: containers: - name: ohdsi-webapi GEN3_OHDSI-WEBAPI_IMAGE|-image: quay.io/cdis/ohdsi-webapi:latest-| + env: + - name: ARBORIST_URL + valueFrom: + configMapKeyRef: + name: manifest-global + key: arborist_url + optional: true livenessProbe: httpGet: path: /WebAPI/info/ From e2dc59262a8642c24d12d8501ba745798e5d2e8d Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 30 Oct 2023 11:24:23 -0600 Subject: [PATCH 230/362] Creating a "NodeNotReady" Alert (#2393) * changing the folder name to be plural and added a new node monitor to test if there is a node stuck in the "notready" state. * moving argo node-monitors into their own folder as we will only want to apply these monitors in environments with argo workflows. Also, edited the cron timing for the node-not-ready cron. * changing the "Application.spec.source.directory.exclude" to not be an array. * "exclude" does not support arrays, so reverting this change as it is also not necessary --- .../application.yaml | 2 +- .../argo-monitors/application.yaml | 22 ++++++++++ .../argo-monitors/argo-node-age.yaml} | 4 +- .../{node-monitor => node-monitors}/auth.yaml | 0 .../node-monitors/node-not-ready.yaml | 43 +++++++++++++++++++ 5 files changed, 68 insertions(+), 3 deletions(-) rename kube/services/{node-monitor => node-monitors}/application.yaml (92%) create mode 100644 kube/services/node-monitors/argo-monitors/application.yaml rename kube/services/{node-monitor/cronjob.yaml => node-monitors/argo-monitors/argo-node-age.yaml} (97%) rename kube/services/{node-monitor => node-monitors}/auth.yaml (100%) create mode 100644 kube/services/node-monitors/node-not-ready.yaml diff --git a/kube/services/node-monitor/application.yaml b/kube/services/node-monitors/application.yaml similarity index 92% rename from kube/services/node-monitor/application.yaml rename to kube/services/node-monitors/application.yaml index df41c34b9..0748f7c35 100644 --- a/kube/services/node-monitor/application.yaml +++ b/kube/services/node-monitors/application.yaml @@ -11,7 +11,7 @@ spec: source: repoURL: https://github.com/uc-cdis/cloud-automation.git targetRevision: master - path: kube/services/node-monitor + path: kube/services/node-monitors/ directory: exclude: "application.yaml" syncPolicy: diff --git a/kube/services/node-monitors/argo-monitors/application.yaml b/kube/services/node-monitors/argo-monitors/application.yaml new file mode 100644 index 000000000..fca4ace86 --- /dev/null +++ b/kube/services/node-monitors/argo-monitors/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: node-monitor-argo-application + namespace: argocd +spec: + destination: + namespace: default + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/node-monitors/argo-monitors/ + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/node-monitor/cronjob.yaml b/kube/services/node-monitors/argo-monitors/argo-node-age.yaml similarity index 97% rename from kube/services/node-monitor/cronjob.yaml rename to kube/services/node-monitors/argo-monitors/argo-node-age.yaml index e53046280..890495ee0 100644 --- a/kube/services/node-monitor/cronjob.yaml +++ b/kube/services/node-monitors/argo-monitors/argo-node-age.yaml @@ -1,7 +1,7 @@ apiVersion: batch/v1 kind: CronJob metadata: - name: node-monitor-cron + name: argo-node-age namespace: default spec: schedule: "*/5 * * * *" @@ -55,4 +55,4 @@ spec: curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODE_NAME}\` is older than 3 hours!\"}" $SLACK_WEBHOOK_URL fi done - restartPolicy: OnFailure + restartPolicy: OnFailure \ No newline at end of file diff --git a/kube/services/node-monitor/auth.yaml b/kube/services/node-monitors/auth.yaml similarity index 100% rename from kube/services/node-monitor/auth.yaml rename to kube/services/node-monitors/auth.yaml diff --git a/kube/services/node-monitors/node-not-ready.yaml b/kube/services/node-monitors/node-not-ready.yaml new file mode 100644 index 000000000..6626b5507 --- /dev/null +++ b/kube/services/node-monitors/node-not-ready.yaml @@ -0,0 +1,43 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: node-not-ready-cron + namespace: default +spec: + schedule: "*/30 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: node-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/sh + + # Get nodes that show "NodeStatusNeverUpdated" + NODES=$(kubectl get nodes -o json | jq -r '.items[] | select(.status.conditions[] | select(.type == "Ready" and .status == "Unknown")) | .metadata.name') + + if [ -n "$NODES" ]; then + echo "Nodes reporting 'NodeStatusNeverUpdated', sending an alert:" + echo "$NODES" + # Send alert to Slack + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODES}\` is stuck in "NotReady"!\"}" $SLACK_WEBHOOK_URL + else + echo "No nodes reporting 'NodeStatusNeverUpdated'" + fi + restartPolicy: OnFailure From 6d411f5ae7a47bdd74723948c786e0dd9e2696e7 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 31 Oct 2023 10:56:08 -0500 Subject: [PATCH 231/362] Update orthanc-service.conf (#2386) --- kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf index 2eb77b179..70d2918ea 100644 --- a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf @@ -10,6 +10,12 @@ location /orthanc/ { if ($request_uri ~ "^/orthanc/dicom-web/studies/") { set $authz_method "read"; set $authz_resource "/services/orthanc/studies"; + + if ($request_method = POST) { + return 403; + # set $authz_method "create"; + # set $authz_resource "/services/orthanc/studies"; + } } auth_request /gen3-authz; From ae02274ad52d09f95940b7757fdb0b78b7460d52 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Wed, 1 Nov 2023 09:15:02 -0500 Subject: [PATCH 232/362] Jenkins add "/usr/share/dict/words" (#2399) --- Docker/jenkins/Jenkins-CI-Worker/Dockerfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index f0da68f69..242d5e74d 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -116,6 +116,9 @@ RUN curl -sS -o - https://dl-ssl.google.com/linux/linux_signing_key.pub | apt-ke && apt-get -y update \ && apt-get -y install google-chrome-stable +# data-simulator needs "/usr/share/dict/words" to generate data that isn't random strings +RUN apt-get install --reinstall wamerican + # update /etc/sudoers RUN sed 's/^%sudo/#%sudo/' /etc/sudoers > /etc/sudoers.bak \ && /bin/echo -e "\n%sudo ALL=(ALL:ALL) NOPASSWD:ALL\n" >> /etc/sudoers.bak \ From 9f35d535bdcc3ff1d29054ecd39caec9e3e11d88 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Wed, 1 Nov 2023 14:15:15 -0500 Subject: [PATCH 233/362] fix: allow only data commons origin (#2391) --- kube/services/revproxy/nginx.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/revproxy/nginx.conf b/kube/services/revproxy/nginx.conf index 2e3a3b151..d0e14f49b 100644 --- a/kube/services/revproxy/nginx.conf +++ b/kube/services/revproxy/nginx.conf @@ -236,7 +236,7 @@ server { # This overrides the individual services # set $allow_origin "*"; - if ($http_origin) { + if ($http_origin = "https://$host") { set $allow_origin "$http_origin"; } From 358773b6a091a1e985cfc98a85cc04c028a92104 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 2 Nov 2023 09:22:33 -0600 Subject: [PATCH 234/362] Update mutate-guppy-config-for-guppy-test.sh (#2403) --- gen3/bin/mutate-guppy-config-for-guppy-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/mutate-guppy-config-for-guppy-test.sh b/gen3/bin/mutate-guppy-config-for-guppy-test.sh index de7da10d5..151bb7169 100644 --- a/gen3/bin/mutate-guppy-config-for-guppy-test.sh +++ b/gen3/bin/mutate-guppy-config-for-guppy-test.sh @@ -16,7 +16,7 @@ sed -i 's/\(.*\)"index": "\(.*\)_etl",$/\1"index": "jenkins_subject_alias",/' or # for bloodpac-like envs sed -i 's/\(.*\)"index": "\(.*\)_case",$/\1"index": "jenkins_subject_alias",/' original_guppy_config.yaml # the pre-defined Canine index works with subject ONLY (never case) -sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml +# sed -i 's/\(.*\)"type": "case"$/\1"type": "subject"/' original_guppy_config.yaml sed -i 's/\(.*\)"index": "\(.*\)_file",$/\1"index": "jenkins_file_alias",/' original_guppy_config.yaml sed -i 's/\(.*\)"config_index": "\(.*\)_array-config",$/\1"config_index": "jenkins_configs_alias",/' original_guppy_config.yaml From c96599951a48edb50814db84789771ad94a205d9 Mon Sep 17 00:00:00 2001 From: pieterlukasse Date: Thu, 2 Nov 2023 17:22:24 +0100 Subject: [PATCH 235/362] Fix: set ARBORIST_URL using the kube-setup-ohdsi.sh (#2400) * fix: set ARBORIST_URL using the kube-setup-ohdsi.sh * fix: change arborist url to internal one * fix: get arborist_url directly from manifest.json --------- Co-authored-by: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> --- gen3/bin/kube-setup-ohdsi.sh | 2 ++ kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml | 7 ------- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh index d586570db..14b35a714 100644 --- a/gen3/bin/kube-setup-ohdsi.sh +++ b/gen3/bin/kube-setup-ohdsi.sh @@ -87,6 +87,8 @@ setup_secrets() { export DB_HOST=$(jq -r ".db_host" <<< "$dbcreds") export FENCE_URL="https://${hostname}/user/user" + # get arborist_url from manifest.json: + export ARBORIST_URL=$(g3k_manifest_lookup .global.arborist_url) export FENCE_METADATA_URL="https://${hostname}/.well-known/openid-configuration" export FENCE_CLIENT_ID=$(jq -r ".FENCE_CLIENT_ID" <<< "$appcreds") export FENCE_CLIENT_SECRET=$(jq -r ".FENCE_CLIENT_SECRET" <<< "$appcreds") diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 258aa8f87..65d6ed38c 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -59,13 +59,6 @@ spec: containers: - name: ohdsi-webapi GEN3_OHDSI-WEBAPI_IMAGE|-image: quay.io/cdis/ohdsi-webapi:latest-| - env: - - name: ARBORIST_URL - valueFrom: - configMapKeyRef: - name: manifest-global - key: arborist_url - optional: true livenessProbe: httpGet: path: /WebAPI/info/ From 997c596d86d22b4e085bc6da4e714050f06c7d16 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 2 Nov 2023 14:30:25 -0600 Subject: [PATCH 236/362] updating node monitor to include environment name (#2404) * updating node monitor to include environment name * moving the "!" --- kube/services/node-monitors/node-not-ready.yaml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kube/services/node-monitors/node-not-ready.yaml b/kube/services/node-monitors/node-not-ready.yaml index 6626b5507..500832fc3 100644 --- a/kube/services/node-monitors/node-not-ready.yaml +++ b/kube/services/node-monitors/node-not-ready.yaml @@ -22,6 +22,11 @@ spec: configMapKeyRef: name: global key: slack_webhook + - name: ENVIRONMENT + valueFrom: + configMapKeyRef: + name: global + key: environment command: ["/bin/bash"] args: @@ -36,7 +41,7 @@ spec: echo "Nodes reporting 'NodeStatusNeverUpdated', sending an alert:" echo "$NODES" # Send alert to Slack - curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODES}\` is stuck in "NotReady"!\"}" $SLACK_WEBHOOK_URL + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Node \`${NODES}\` is stuck in "NotReady" in \`${ENVIRONMENT}\`! \"}" $SLACK_WEBHOOK_URL else echo "No nodes reporting 'NodeStatusNeverUpdated'" fi From 816b9831aefac6cd64e49d957eb8444d82c12d8e Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 6 Nov 2023 13:34:11 -0600 Subject: [PATCH 237/362] Update web_whitelist (#2405) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 349d1e022..063eab2e6 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -76,6 +76,7 @@ go.googlesource.com golang.org gopkg.in grafana.com +grafana.github.io http.us.debian.org ifconfig.io ingress.coralogix.us From c6aeccd8f96382d7d32389e762a3bc31e770d821 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 7 Nov 2023 09:43:49 -0600 Subject: [PATCH 238/362] feat: add app-config.js for ohif-viewer (#2406) --- gen3/bin/kube-setup-dicom.sh | 13 ++ kube/services/ohif-viewer/app-config.js | 209 ++++++++++++++++++++++++ 2 files changed, 222 insertions(+) create mode 100644 kube/services/ohif-viewer/app-config.js diff --git a/gen3/bin/kube-setup-dicom.sh b/gen3/bin/kube-setup-dicom.sh index 85114f33f..42110eea2 100644 --- a/gen3/bin/kube-setup-dicom.sh +++ b/gen3/bin/kube-setup-dicom.sh @@ -82,6 +82,19 @@ EOM } EOM fi + + if g3k_manifest_lookup .versions["dicom-server"] > /dev/null 2>&1; then + export DICOM_SERVER_URL="/dicom-server" + gen3_log_info "attaching ohif viewer to old dicom-server (orthanc w/ aurora)" + fi + + if g3k_manifest_lookup .versions["orthanc"] > /dev/null 2>&1; then + export DICOM_SERVER_URL="/orthanc" + gen3_log_info "attaching ohif viewer to new dicom-server (orthanc w/ s3)" + fi + + envsubst <"${GEN3_HOME}/kube/services/ohif-viewer/app-config.js" > "$secretsFolder/app-config.js" + gen3 secrets sync 'setup orthanc-s3-g3auto secrets' } diff --git a/kube/services/ohif-viewer/app-config.js b/kube/services/ohif-viewer/app-config.js new file mode 100644 index 000000000..6768726f4 --- /dev/null +++ b/kube/services/ohif-viewer/app-config.js @@ -0,0 +1,209 @@ +window.config = { + routerBasename: '/ohif-viewer/', + // whiteLabeling: {}, + extensions: [], + modes: [], + customizationService: { + // Shows a custom route -access via http://localhost:3000/custom + // helloPage: '@ohif/extension-default.customizationModule.helloPage', + }, + showStudyList: true, + // some windows systems have issues with more than 3 web workers + maxNumberOfWebWorkers: 3, + // below flag is for performance reasons, but it might not work for all servers + omitQuotationForMultipartRequest: true, + showWarningMessageForCrossOrigin: true, + showCPUFallbackMessage: true, + showLoadingIndicator: true, + strictZSpacingForVolumeViewport: true, + maxNumRequests: { + interaction: 100, + thumbnail: 75, + // Prefetch number is dependent on the http protocol. For http 2 or + // above, the number of requests can be go a lot higher. + prefetch: 25, + }, + // filterQueryParam: false, + defaultDataSourceName: 'dicomweb', + /* Dynamic config allows user to pass "configUrl" query string this allows to load config without recompiling application. The regex will ensure valid configuration source */ + // dangerouslyUseDynamicConfig: { + // enabled: true, + // // regex will ensure valid configuration source and default is /.*/ which matches any character. To use this, setup your own regex to choose a specific source of configuration only. + // // Example 1, to allow numbers and letters in an absolute or sub-path only. + // // regex: /(0-9A-Za-z.]+)(\/[0-9A-Za-z.]+)*/ + // // Example 2, to restricts to either hosptial.com or othersite.com. + // // regex: /(https:\/\/hospital.com(\/[0-9A-Za-z.]+)*)|(https:\/\/othersite.com(\/[0-9A-Za-z.]+)*)/ + // regex: /.*/, + // }, + dataSources: [ + { + friendlyName: 'dcmjs DICOMWeb Server', + namespace: '@ohif/extension-default.dataSourcesModule.dicomweb', + sourceName: 'dicomweb', + configuration: { + name: 'dicomweb', + wadoUriRoot: '$DICOM_SERVER_URL/wado', + qidoRoot: '$DICOM_SERVER_URL/dicom-web', + wadoRoot: '$DICOM_SERVER_URL/dicom-web', + + qidoSupportsIncludeField: false, + supportsReject: false, + imageRendering: 'wadors', + thumbnailRendering: 'wadors', + enableStudyLazyLoad: true, + supportsFuzzyMatching: false, + supportsWildcard: true, + staticWado: true, + singlepart: 'bulkdata,video', + // whether the data source should use retrieveBulkData to grab metadata, + // and in case of relative path, what would it be relative to, options + // are in the series level or study level (some servers like series some study) + bulkDataURI: { + enabled: true, + relativeResolution: 'studies', + }, + }, + }, + { + friendlyName: 'dicomweb delegating proxy', + namespace: '@ohif/extension-default.dataSourcesModule.dicomwebproxy', + sourceName: 'dicomwebproxy', + configuration: { + name: 'dicomwebproxy', + }, + }, + { + friendlyName: 'dicom json', + namespace: '@ohif/extension-default.dataSourcesModule.dicomjson', + sourceName: 'dicomjson', + configuration: { + name: 'json', + }, + }, + { + friendlyName: 'dicom local', + namespace: '@ohif/extension-default.dataSourcesModule.dicomlocal', + sourceName: 'dicomlocal', + configuration: {}, + }, + ], + httpErrorHandler: error => { + // This is 429 when rejected from the public idc sandbox too often. + console.warn(error.status); + + // Could use services manager here to bring up a dialog/modal if needed. + console.warn('test, navigate to https://ohif.org/'); + }, + // whiteLabeling: { + // /* Optional: Should return a React component to be rendered in the "Logo" section of the application's Top Navigation bar */ + // createLogoComponentFn: function (React) { + // return React.createElement( + // 'a', + // { + // target: '_self', + // rel: 'noopener noreferrer', + // className: 'text-purple-600 line-through', + // href: '/', + // }, + // React.createElement('img', + // { + // src: './assets/customLogo.svg', + // className: 'w-8 h-8', + // } + // )) + // }, + // }, + hotkeys: [ + { + commandName: 'incrementActiveViewport', + label: 'Next Viewport', + keys: ['right'], + }, + { + commandName: 'decrementActiveViewport', + label: 'Previous Viewport', + keys: ['left'], + }, + { commandName: 'rotateViewportCW', label: 'Rotate Right', keys: ['r'] }, + { commandName: 'rotateViewportCCW', label: 'Rotate Left', keys: ['l'] }, + { commandName: 'invertViewport', label: 'Invert', keys: ['i'] }, + { + commandName: 'flipViewportHorizontal', + label: 'Flip Horizontally', + keys: ['h'], + }, + { + commandName: 'flipViewportVertical', + label: 'Flip Vertically', + keys: ['v'], + }, + { commandName: 'scaleUpViewport', label: 'Zoom In', keys: ['+'] }, + { commandName: 'scaleDownViewport', label: 'Zoom Out', keys: ['-'] }, + { commandName: 'fitViewportToWindow', label: 'Zoom to Fit', keys: ['='] }, + { commandName: 'resetViewport', label: 'Reset', keys: ['space'] }, + { commandName: 'nextImage', label: 'Next Image', keys: ['down'] }, + { commandName: 'previousImage', label: 'Previous Image', keys: ['up'] }, + // { + // commandName: 'previousViewportDisplaySet', + // label: 'Previous Series', + // keys: ['pagedown'], + // }, + // { + // commandName: 'nextViewportDisplaySet', + // label: 'Next Series', + // keys: ['pageup'], + // }, + { + commandName: 'setToolActive', + commandOptions: { toolName: 'Zoom' }, + label: 'Zoom', + keys: ['z'], + }, + // ~ Window level presets + { + commandName: 'windowLevelPreset1', + label: 'W/L Preset 1', + keys: ['1'], + }, + { + commandName: 'windowLevelPreset2', + label: 'W/L Preset 2', + keys: ['2'], + }, + { + commandName: 'windowLevelPreset3', + label: 'W/L Preset 3', + keys: ['3'], + }, + { + commandName: 'windowLevelPreset4', + label: 'W/L Preset 4', + keys: ['4'], + }, + { + commandName: 'windowLevelPreset5', + label: 'W/L Preset 5', + keys: ['5'], + }, + { + commandName: 'windowLevelPreset6', + label: 'W/L Preset 6', + keys: ['6'], + }, + { + commandName: 'windowLevelPreset7', + label: 'W/L Preset 7', + keys: ['7'], + }, + { + commandName: 'windowLevelPreset8', + label: 'W/L Preset 8', + keys: ['8'], + }, + { + commandName: 'windowLevelPreset9', + label: 'W/L Preset 9', + keys: ['9'], + }, + ], +}; From e1e2fdeeac9d9baafc3841f8aca248b0ad58ad35 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Tue, 7 Nov 2023 11:47:28 -0600 Subject: [PATCH 239/362] Update orthanc-service.conf (#2402) --- .../gen3.nginx.conf/orthanc-service.conf | 31 ++++++++++++------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf index 70d2918ea..ed736189c 100644 --- a/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/orthanc-service.conf @@ -7,17 +7,6 @@ location /orthanc/ { set $authz_method "create"; set $authz_service "orthanc"; - if ($request_uri ~ "^/orthanc/dicom-web/studies/") { - set $authz_method "read"; - set $authz_resource "/services/orthanc/studies"; - - if ($request_method = POST) { - return 403; - # set $authz_method "create"; - # set $authz_resource "/services/orthanc/studies"; - } - } - auth_request /gen3-authz; proxy_set_header Authorization "Basic cHVibGljOmhlbGxv"; @@ -30,3 +19,23 @@ location /orthanc/ { # no limit to payload size so we can upload large DICOM files client_max_body_size 0; } + +location /orthanc/dicom-web/studies/ { + set $authz_method "read"; + set $authz_resource "/services/orthanc/studies"; + set $authz_service "orthanc"; + + auth_request /gen3-authz; + if ($request_method = POST) { + return 403; + } + proxy_set_header Authorization "Basic cHVibGljOmhlbGxv"; + + set $proxy_service "orthanc"; + set $upstream http://orthanc-service.$namespace.svc.cluster.local; + rewrite ^/orthanc/(.*) /$1 break; + proxy_pass $upstream; + + # no limit to payload size so we can upload large DICOM files + client_max_body_size 0; +} From 7673146280cb3a53d7803ca064becfacf9cc3f86 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Wed, 8 Nov 2023 14:22:45 -0600 Subject: [PATCH 240/362] Update ohif-viewer-service.conf (#2410) --- .../revproxy/gen3.nginx.conf/ohif-viewer-service.conf | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf b/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf index 9a20bc832..22926bcf0 100644 --- a/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/ohif-viewer-service.conf @@ -3,11 +3,12 @@ location /ohif-viewer/ { # return 403 "failed csrf check"; # } - set $authz_resource "/services/ohif-viewer"; - set $authz_method "read"; - set $authz_service "ohif-viewer"; + # see if this can be fixed in the future for anonymous access + # set $authz_resource "/services/ohif-viewer"; + # set $authz_method "read"; + # set $authz_service "ohif-viewer"; - auth_request /gen3-authz; + # auth_request /gen3-authz; set $proxy_service "ohif-viewer"; set $upstream http://ohif-viewer-service.$namespace.svc.cluster.local; From fee95411e38ff3fb17ed08d164929f2f3c73037e Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Thu, 9 Nov 2023 09:43:12 -0600 Subject: [PATCH 241/362] Add log when gitops-sync initially detects changes (#2411) --- gen3/bin/gitops.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gen3/bin/gitops.sh b/gen3/bin/gitops.sh index fda6d4ffa..bc0358499 100644 --- a/gen3/bin/gitops.sh +++ b/gen3/bin/gitops.sh @@ -445,8 +445,13 @@ gen3_gitops_sync() { echo "DRYRUN flag detected, not rolling" gen3_log_info "dict_roll: $dict_roll; versions_roll: $versions_roll; portal_roll: $portal_roll; etl_roll: $etl_roll; fence_roll: $fence_roll" else - if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("fence_roll" = true) ]]; then + if [[ ( "$dict_roll" = true ) || ( "$versions_roll" = true ) || ( "$portal_roll" = true )|| ( "$etl_roll" = true ) || ( "$covid_cronjob_roll" = true ) || ("$fence_roll" = true) ]]; then echo "changes detected, rolling" + tmpHostname=$(gen3 api hostname) + if [[ $slack = true ]]; then + curl -X POST --data-urlencode "payload={\"text\": \"Gitops-sync Cron: Changes detected on ${tmpHostname} - rolling...\"}" "${slackWebHook}" + fi + # run etl job before roll all so guppy can pick up changes if [[ "$etl_roll" = true ]]; then gen3 update_config etl-mapping "$(gen3 gitops folder)/etlMapping.yaml" @@ -472,7 +477,6 @@ gen3_gitops_sync() { rollRes=$? # send result to slack if [[ $slack = true ]]; then - tmpHostname=$(gen3 api hostname) resStr="SUCCESS" color="#1FFF00" if [[ $rollRes != 0 ]]; then From e51eba5f3eb65a3bf144b39ac5e95934d5f08d5f Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Fri, 10 Nov 2023 15:25:32 -0600 Subject: [PATCH 242/362] Add rmq.n3c.ncats.io to Squid whitelist (#2413) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 063eab2e6..0cbc0a6bc 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -145,6 +145,7 @@ repos.sensuapp.org repo.vmware.com repository.cloudera.com resource.metadatacenter.org +rmq.n3c.ncats.io rules.emergingthreats.net rweb.quant.ku.edu sa-update.dnswl.org From f7d0469b087a4020aa5cf3c9aab52de00e54a4bc Mon Sep 17 00:00:00 2001 From: George Thomas <98996322+george42-ctds@users.noreply.github.com> Date: Wed, 15 Nov 2023 07:27:12 -0800 Subject: [PATCH 243/362] Chore/create cedar client (#2407) * (chore HP-1273): add CEDAR client creation * (fix PPS-655): change comment * (fix PPS-655): remove last comma in json secret * (chore HP-1273): if creation fails then don't start wrapper service * (chore HP-1273): don't echo secrets on client creation failure --- gen3/bin/kube-setup-cedar-wrapper.sh | 55 ++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/gen3/bin/kube-setup-cedar-wrapper.sh b/gen3/bin/kube-setup-cedar-wrapper.sh index 9a899a770..c8f0d03c6 100644 --- a/gen3/bin/kube-setup-cedar-wrapper.sh +++ b/gen3/bin/kube-setup-cedar-wrapper.sh @@ -1,6 +1,58 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" +create_client_and_secret() { + local hostname=$(gen3 api hostname) + local client_name="cedar_ingest_client" + gen3_log_info "kube-setup-cedar-wrapper" "creating fence ${client_name} for $hostname" + # delete any existing fence cedar clients + g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client ${client_name} > /dev/null 2>&1 + local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client ${client_name} --grant-types client_credentials | tail -1) + # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') + if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then + gen3_log_err "kube-setup-cedar-wrapper" "Failed generating ${client_name}" + return 1 + else + local client_id="${BASH_REMATCH[2]}" + local client_secret="${BASH_REMATCH[3]}" + gen3_log_info "Create cedar-client secrets file" + cat - < /dev/null 2>&1; then + local have_cedar_client_secret="1" + else + gen3_log_info "No g3auto cedar-client key present in secret" + fi + + local client_name="cedar_ingest_client" + local client_list=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-list) + local client_count=$(echo "$client_list=" | grep -cE "'name':.*'${client_name}'") + gen3_log_info "CEDAR client count = ${client_count}" + + if [[ -z $have_cedar_client_secret ]] || [[ ${client_count} -lt 1 ]]; then + gen3_log_info "Creating new cedar-ingest client and secret" + local credsPath="$(gen3_secrets_folder)/g3auto/cedar/${cedar_creds_file}" + if ! create_client_and_secret > $credsPath; then + gen3_log_err "Failed to setup cedar-ingest secret" + return 1 + else + gen3 secrets sync + gen3 job run usersync + fi + fi +} + [[ -z "$GEN3_ROLL_ALL" ]] && gen3 kube-setup-secrets if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then @@ -8,6 +60,9 @@ if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then return 1 fi +gen3_log_info "Checking cedar-client creds" +setup_creds + if ! gen3 secrets decode cedar-g3auto cedar_api_key.txt > /dev/null 2>&1; then gen3_log_err "No CEDAR api key present in cedar-g3auto secret, not rolling CEDAR wrapper" return 1 From 75c7ec815a848e21136cb9daf533a84186ddb4f2 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Thu, 16 Nov 2023 09:25:48 -0800 Subject: [PATCH 244/362] Quarantine jenkins-dcp until migration to ES7 (#2417) --- files/scripts/ci-env-pool-reset.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index 362cfbfd5..c0c1f67c6 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -29,7 +29,6 @@ source "${GEN3_HOME}/gen3/gen3setup.sh" cat - > jenkins-envs-services.txt < Date: Fri, 17 Nov 2023 16:19:28 -0500 Subject: [PATCH 245/362] Chore/add jenkins2 ci worker service (#2418) * adding jenkins2-ci-worker service * renamed the file --- .../jenkins2-agent-service.yaml | 17 ++ .../jenkins2-ci-worker-deploy.yaml | 149 ++++++++++++++++++ .../jenkins2-ci-worker-pvc.yaml | 12 ++ 3 files changed, 178 insertions(+) create mode 100644 kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml create mode 100644 kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml create mode 100644 kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml diff --git a/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml new file mode 100644 index 000000000..7f4e58109 --- /dev/null +++ b/kube/services/jenkins2-ci-worker/jenkins2-agent-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + name: jenkins-agent-service + name: jenkins-agent + namespace: default +spec: + ports: + - name: slavelistener + port: 50000 + protocol: TCP + targetPort: 50000 + selector: + app: jenkins + sessionAffinity: None + type: ClusterIP diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml new file mode 100644 index 000000000..3dea38a5c --- /dev/null +++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml @@ -0,0 +1,149 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: jenkins-ci-worker-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: jenkins-ci-worker + template: + metadata: + labels: + app: jenkins-ci-worker + # for network policy + netnolimit: "yes" + annotations: + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + - matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + serviceAccountName: jenkins-service + securityContext: + runAsUser: 1000 + fsGroup: 1000 + initContainers: + - args: + - -c + - | + # fix permissions for /var/run/docker.sock + chmod 666 /var/run/docker.sock + echo "done" + command: + - /bin/bash + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + name: awshelper + resources: {} + securityContext: + allowPrivilegeEscalation: false + runAsUser: 0 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/run/docker.sock + name: dockersock + containers: + # + # See for details on running docker in a pod: + # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b + # + - name: jenkins-worker + image: "quay.io/cdis/gen3-ci-worker:master" + ports: + - containerPort: 8080 + env: + - name: JENKINS_URL + value: "https://jenkins2.planx-pla.net" + - name: JENKINS_SECRET + valueFrom: + secretKeyRef: + name: jenkins-ci-worker-g3auto + key: jenkins-jnlp-agent-secret + - name: JENKINS_AGENT_NAME + value: "gen3-ci-worker" + - name: JENKINS_TUNNEL + value: "jenkins-agent:50000" + - name: AWS_DEFAULT_REGION + value: us-east-1 + - name: JAVA_OPTS + value: "-Xmx3072m" + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: jenkins-secret + key: aws_secret_access_key + - name: GOOGLE_EMAIL_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: email + - name: GOOGLE_PASSWORD_AUX1 + valueFrom: + secretKeyRef: + name: google-acct1 + key: password + - name: GOOGLE_EMAIL_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: email + - name: GOOGLE_PASSWORD_AUX2 + valueFrom: + secretKeyRef: + name: google-acct2 + key: password + - name: GOOGLE_APP_CREDS_JSON + valueFrom: + secretKeyRef: + name: jenkins-g3auto + key: google_app_creds.json + resources: + limits: + cpu: 0.9 + memory: 4096Mi + ephemeral-storage: 500Mi + imagePullPolicy: Always + volumeMounts: + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.crt" + subPath: "service.crt" + - name: "cert-volume" + readOnly: true + mountPath: "/mnt/ssl/service.key" + subPath: "service.key" + - name: "ca-volume" + readOnly: true + mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" + subPath: "ca.pem" + - name: dockersock + mountPath: "/var/run/docker.sock" + imagePullPolicy: Always + volumes: + - name: cert-volume + secret: + secretName: "cert-jenkins-service" + - name: ca-volume + secret: + secretName: "service-ca" + - name: dockersock + hostPath: + path: /var/run/docker.sock diff --git a/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml new file mode 100644 index 000000000..047e4e966 --- /dev/null +++ b/kube/services/jenkins2-ci-worker/jenkins2-ci-worker-pvc.yaml @@ -0,0 +1,12 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: datadir-jenkins-ci + annotations: + volume.beta.kubernetes.io/storage-class: gp2 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 200Gi From 421b762dd8452303e591416ca5ef749206189948 Mon Sep 17 00:00:00 2001 From: Atharva Rane <41084525+atharvar28@users.noreply.github.com> Date: Mon, 20 Nov 2023 15:09:28 -0500 Subject: [PATCH 246/362] add kube-setup-jenkins2 script (#2419) * add kube-setup-jenkins2 script * change the env image name --- gen3/bin/kube-setup-jenkins2.sh | 71 +++++++++++++++++++++ kube/services/jenkins2/jenkins2-deploy.yaml | 2 +- 2 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 gen3/bin/kube-setup-jenkins2.sh diff --git a/gen3/bin/kube-setup-jenkins2.sh b/gen3/bin/kube-setup-jenkins2.sh new file mode 100644 index 000000000..f5233f978 --- /dev/null +++ b/gen3/bin/kube-setup-jenkins2.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# +# Just a little helper for deploying jenkins onto k8s the first time +# + +set -e + +export WORKSPACE="${WORKSPACE:-$HOME}" +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +gen3 kube-setup-secrets + +# +# Assume Jenkins should use 'jenkins' profile credentials in "${WORKSPACE}"/.aws/credentials +# +aws_access_key_id="$(aws configure get jenkins.aws_access_key_id)" +aws_secret_access_key="$(aws configure get jenkins.aws_secret_access_key)" +google_acct1_email="$(jq -r '.jenkins.google_acct1.email' < $(gen3_secrets_folder)/creds.json)" +google_acct1_password="$(jq -r '.jenkins.google_acct1.password' < $(gen3_secrets_folder)/creds.json)" +google_acct2_email="$(jq -r '.jenkins.google_acct2.email' < $(gen3_secrets_folder)/creds.json)" +google_acct2_password="$(jq -r '.jenkins.google_acct2.password' < $(gen3_secrets_folder)/creds.json)" + +if [ -z "$aws_access_key_id" -o -z "$aws_secret_access_key" ]; then + gen3_log_err 'not configuring jenkins - could not extract secrets from aws configure' + exit 1 +fi +if [[ -z "$google_acct1_email" || -z "$google_acct1_password" || -z "$google_acct2_email" || -z "$google_acct2_password" ]]; then + gen3_log_err "missing google credentials in '.jenkins' of creds.json" + exit 1 +fi + +if ! g3kubectl get secrets jenkins-secret > /dev/null 2>&1; then + # make it easy to rerun kube-setup-jenkins.sh + g3kubectl create secret generic jenkins-secret "--from-literal=aws_access_key_id=$aws_access_key_id" "--from-literal=aws_secret_access_key=$aws_secret_access_key" +fi +if ! g3kubectl get secrets google-acct1 > /dev/null 2>&1; then + g3kubectl create secret generic google-acct1 "--from-literal=email=${google_acct1_email}" "--from-literal=password=${google_acct1_password}" +fi +if ! g3kubectl get secrets google-acct2 > /dev/null 2>&1; then + g3kubectl create secret generic google-acct2 "--from-literal=email=${google_acct2_email}" "--from-literal=password=${google_acct2_password}" +fi + +if ! g3kubectl get storageclass gp2 > /dev/null 2>&1; then + g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/10storageclass.yaml" +fi +if ! g3kubectl get persistentvolumeclaim datadir-jenkins > /dev/null 2>&1; then + g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/00pvc.yaml" +fi + +# Note: jenkins service account is configured by `kube-setup-roles` +gen3 kube-setup-roles +# Note: only the 'default' namespace jenkins-service account gets a cluster rolebinding +g3kubectl apply -f "${GEN3_HOME}/kube/services/jenkins/clusterrolebinding-devops.yaml" + +# Note: requires Jenkins entry in cdis-manifest +gen3 roll jenkins2 +gen3 roll jenkins2-worker +gen3 roll jenkins2-ci-worker + +# +# Get the ARN of the SSL certificate for the commons - +# We'll optimistically assume it's a wildcard cert that +# is appropriate to also attach to the jenkins ELB +# +export ARN=$(g3kubectl get configmap global --output=jsonpath='{.data.revproxy_arn}') +if [[ ! -z $ARN ]]; then + envsubst <"${GEN3_HOME}/kube/services/jenkins/jenkins-service.yaml" | g3kubectl apply -f - +else + gen3_log_info "Global configmap not configured - not launching service (require SSL cert ARN)" +fi diff --git a/kube/services/jenkins2/jenkins2-deploy.yaml b/kube/services/jenkins2/jenkins2-deploy.yaml index ee838bae6..08365f811 100644 --- a/kube/services/jenkins2/jenkins2-deploy.yaml +++ b/kube/services/jenkins2/jenkins2-deploy.yaml @@ -48,7 +48,7 @@ spec: # https://estl.tech/accessing-docker-from-a-kubernetes-pod-68996709c04b # - name: jenkins - GEN3_JENKINS_IMAGE + GEN3_JENKINS2_IMAGE ports: - containerPort: 8080 name: http From c9803f5bc310e84c87582c1f7c47783e256e784b Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 28 Nov 2023 13:07:33 -0700 Subject: [PATCH 247/362] changing expected results to match sheepdog and fence-deploy.yaml changes (#2421) --- gen3/lib/testData/default/expectedFenceResult.yaml | 8 ++++++++ gen3/lib/testData/default/expectedSheepdogResult.yaml | 10 +++++++++- .../test1.manifest.g3k/expectedFenceResult.yaml | 7 +++++++ .../test1.manifest.g3k/expectedSheepdogResult.yaml | 7 +++++++ 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/gen3/lib/testData/default/expectedFenceResult.yaml b/gen3/lib/testData/default/expectedFenceResult.yaml index f6d76d790..98c360531 100644 --- a/gen3/lib/testData/default/expectedFenceResult.yaml +++ b/gen3/lib/testData/default/expectedFenceResult.yaml @@ -44,6 +44,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType @@ -136,6 +143,7 @@ spec: ports: - containerPort: 80 - containerPort: 443 + - containerPort: 6567 volumeMounts: # ----------------------------------------------------------------------------- # DEPRECATED! Remove when all commons are no longer using local_settings.py diff --git a/gen3/lib/testData/default/expectedSheepdogResult.yaml b/gen3/lib/testData/default/expectedSheepdogResult.yaml index b9db85a36..a2bd3efcc 100644 --- a/gen3/lib/testData/default/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/default/expectedSheepdogResult.yaml @@ -17,6 +17,7 @@ spec: template: metadata: labels: + netnolimit: "yes" app: sheepdog release: production public: "yes" @@ -39,12 +40,19 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType operator: In values: - - ONDEMAND + - SPOT automountServiceAccountToken: false volumes: - name: config-volume diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml index d4196c070..adc35ad2f 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml @@ -47,6 +47,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType diff --git a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml index f54fd3e03..08407ae52 100644 --- a/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml +++ b/gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml @@ -43,6 +43,13 @@ spec: nodeAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - spot + - weight: 99 preference: matchExpressions: - key: eks.amazonaws.com/capacityType From 41c61c2648db6ec1f373c5c93c09f4ac3d615005 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 1 Dec 2023 09:30:15 -0800 Subject: [PATCH 248/362] Update jenkins versions (#2422) * Update Jenkins * Update Dockerfile * Update Dockerfile --- Docker/jenkins/Jenkins-CI-Worker/Dockerfile | 2 +- Docker/jenkins/Jenkins-Worker/Dockerfile | 2 +- Docker/jenkins/Jenkins/Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile index 242d5e74d..6eeb8f4fd 100644 --- a/Docker/jenkins/Jenkins-CI-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-CI-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/inbound-agent:jdk11 +FROM jenkins/inbound-agent:jdk21 USER root diff --git a/Docker/jenkins/Jenkins-Worker/Dockerfile b/Docker/jenkins/Jenkins-Worker/Dockerfile index c824690de..fec6b3203 100644 --- a/Docker/jenkins/Jenkins-Worker/Dockerfile +++ b/Docker/jenkins/Jenkins-Worker/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/inbound-agent:jdk11 +FROM jenkins/inbound-agent:jdk21 USER root diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile index ae39ac574..8b8c58406 100644 --- a/Docker/jenkins/Jenkins/Dockerfile +++ b/Docker/jenkins/Jenkins/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.415-jdk11 +FROM jenkins/jenkins:2.434-jdk21 USER root From 93cd549a851a95697b00689bac240ec7463b1cb9 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Tue, 5 Dec 2023 17:17:33 -0500 Subject: [PATCH 249/362] Added a cronjob to reconcile karpenter resources, in case they don't get created by argo-events (#2426) * Setting up the Karpenter reconciler cronjob * Matching both configmaps * Putting configmap in right namespace * Fixing oopsie * Fixing namespaces * Duh * Removing dependency on Gen3, since that was a pain * Fixing name and adding auth * We need the label for network policies * This should work * Update application.yaml * Moving everything over to argo-events namespace * Removed configmap.yaml, and moved everything over to argo-events namespace --- .../argo-events/workflows/configmap.yaml | 25 ++++++- .../karpenter-reconciler/application.yaml | 22 ++++++ kube/services/karpenter-reconciler/auth.yaml | 44 ++++++++++++ .../karpenter-reconciler-cronjob.yaml | 71 +++++++++++++++++++ 4 files changed, 159 insertions(+), 3 deletions(-) create mode 100644 kube/services/karpenter-reconciler/application.yaml create mode 100644 kube/services/karpenter-reconciler/auth.yaml create mode 100644 kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index eb5f1b04f..9fc4b7826 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -21,9 +21,28 @@ data: - key: karpenter.k8s.aws/instance-family operator: In values: - - c6i - - c7i - - m7i + - c6a.large + - c6a.xlarge + - c6a.2xlarge + - c6a.4xlarge + - c6a.8xlarge + - c6a.12xlarge + - c6i.large + - c6i.xlarge + - c6i.2xlarge + - c6i.4xlarge + - c6i.8xlarge + - c6i.12xlarge + - m6a.2xlarge + - m6a.4xlarge + - m6a.8xlarge + - m6a.12xlarge + - m6a.16xlarge + - m6i.2xlarge + - m6i.4xlarge + - m6i.8xlarge + - m6i.12xlarge + - m6i.16xlarge taints: - key: role value: $WORKFLOW_NAME diff --git a/kube/services/karpenter-reconciler/application.yaml b/kube/services/karpenter-reconciler/application.yaml new file mode 100644 index 000000000..c2d945b47 --- /dev/null +++ b/kube/services/karpenter-reconciler/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: karpenter-reconciler-application + namespace: argocd +spec: + destination: + namespace: kube-system + server: https://kubernetes.argo-events.svc + project: argo-events + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/karpenter-reconciler + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/karpenter-reconciler/auth.yaml b/kube/services/karpenter-reconciler/auth.yaml new file mode 100644 index 000000000..c159028ab --- /dev/null +++ b/kube/services/karpenter-reconciler/auth.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: karpenter-reconciler + namespace: argo-events +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: karpenter-admin-binding-reconciler +subjects: + - kind: ServiceAccount + name: karpenter-reconciler + namespace: argo-events +roleRef: + kind: ClusterRole + name: karpenter-admin + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: workflow-viewer-reconciler +subjects: + - kind: ServiceAccount + name: karpenter-reconciler + namespace: argo-events +roleRef: + kind: ClusterRole + name: argo-argo-workflows-view + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: viewer-reconciler +subjects: + - kind: ServiceAccount + name: karpenter-reconciler + namespace: argo-events +roleRef: + kind: ClusterRole + name: system:aggregate-to-view + apiGroup: rbac.authorization.k8s.io diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml new file mode 100644 index 000000000..c5b501918 --- /dev/null +++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml @@ -0,0 +1,71 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: karpenter-reconciler-cronjob + namespace: argo-events +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccount: karpenter-reconciler + volumes: + - name: karpenter-templates-volume + configMap: + name: karpenter-templates + containers: + - name: karpenter-reconciler + image: quay.io/cdis/awshelper + volumeMounts: + - name: karpenter-templates-volume + mountPath: /manifests + env: + - name: PROVISIONER_TEMPLATE + value: /manifests/provisioner.yaml + - name: AWSNODETEMPLATE_TEMPLATE + value: /manifests/nodetemplate.yaml + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + if [ -z "$PROVISIONER_TEMPLATE" ]; then + PROVISIONER_TEMPLATE="provisioner.yaml" + fi + + if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then + AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" + fi + + ENVIRONMENT=$(kubectl get configmap global -o jsonpath="{.data.environment}") + + RAW_WORKFLOWS=$(kubectl get workflows -n argo -o yaml) + + WORKFLOWS=$(echo "${RAW_WORKFLOWS}" | yq -r '.items[] | [.metadata.name, .metadata.labels.gen3username] | join(" ")') + + WORKFLOW_ARRAY=() + + while IFS= read -r line; do + WORKFLOW_ARRAY+=("$line") + done <<< "$WORKFLOWS" + + for workflow in "${WORKFLOW_ARRAY[@]}" + do + workflow_name=$(echo "$workflow" | awk '{print $1}') + workflow_user=$(echo "$workflow" | awk '{print $2}') + + if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then + echo "No awsnodetemplate found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USER_NAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - + fi + + if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then + echo "No provisioner found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USER_NAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + fi + done + restartPolicy: OnFailure From 49051c01b73ebb30aa947c8d868dfe73916697af Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 6 Dec 2023 09:48:01 -0500 Subject: [PATCH 250/362] Update application.yaml (#2427) --- kube/services/karpenter-reconciler/application.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/karpenter-reconciler/application.yaml b/kube/services/karpenter-reconciler/application.yaml index c2d945b47..fb0fab871 100644 --- a/kube/services/karpenter-reconciler/application.yaml +++ b/kube/services/karpenter-reconciler/application.yaml @@ -6,8 +6,8 @@ metadata: spec: destination: namespace: kube-system - server: https://kubernetes.argo-events.svc - project: argo-events + server: https://kubernetes.default.svc + project: default source: repoURL: https://github.com/uc-cdis/cloud-automation.git targetRevision: master From 59d3e11d2aabe0f10f5daff8eade21c8fb839523 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:15:53 -0500 Subject: [PATCH 251/362] Feat/karpenter reconciler (#2428) * Setting up the Karpenter reconciler cronjob * Matching both configmaps * Putting configmap in right namespace * Fixing oopsie * Fixing namespaces * Duh * Removing dependency on Gen3, since that was a pain * Fixing name and adding auth * We need the label for network policies * This should work * Update application.yaml * Moving everything over to argo-events namespace * Removed configmap.yaml, and moved everything over to argo-events namespace * Fixing stuff * Fixing it so they can work together * Fixing a typo in the configmap * Username, not "user name" --- .../argo-events/workflows/configmap.yaml | 26 +++++++++---------- .../argo-events/workflows/sensor-created.yaml | 4 +-- .../karpenter-reconciler-cronjob.yaml | 7 ++--- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index 9fc4b7826..ae1c16653 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -8,7 +8,7 @@ data: apiVersion: karpenter.sh/v1alpha5 kind: Provisioner metadata: - name: workflow-$WORKFLOW_NAME + name: workflow-WORKFLOW_NAME spec: requirements: - key: karpenter.sh/capacity-type @@ -18,7 +18,7 @@ data: operator: In values: - amd64 - - key: karpenter.k8s.aws/instance-family + - key: node.kubernetes.io/instance-type operator: In values: - c6a.large @@ -45,15 +45,15 @@ data: - m6i.16xlarge taints: - key: role - value: $WORKFLOW_NAME + value: WORKFLOW_NAME effect: NoSchedule labels: - role: $WORKFLOW_NAME + role: WORKFLOW_NAME limits: resources: cpu: 2000 providerRef: - name: workflow-$WORKFLOW_NAME + name: workflow-WORKFLOW_NAME # Kill nodes after 30 days to ensure they stay up to date ttlSecondsUntilExpired: 2592000 ttlSecondsAfterEmpty: 10 @@ -62,18 +62,18 @@ data: apiVersion: karpenter.k8s.aws/v1alpha1 kind: AWSNodeTemplate metadata: - name: workflow-$WORKFLOW_NAME + name: workflow-WORKFLOW_NAME spec: subnetSelector: - karpenter.sh/discovery: $ENVIRONMENT + karpenter.sh/discovery: ENVIRONMENT securityGroupSelector: - karpenter.sh/discovery: $ENVIRONMENT-workflow + karpenter.sh/discovery: ENVIRONMENT-workflow tags: - Environment: $ENVIRONMENT - Name: eks-$ENVIRONMENT-workflow-karpenter - karpenter.sh/discovery: $ENVIRONMENT - workflowname: $WORKFLOW_NAME - gen3username: $GEN3_USERNAME + Environment: ENVIRONMENT + Name: eks-ENVIRONMENT-workflow-karpenter + karpenter.sh/discovery: ENVIRONMENT + workflowname: WORKFLOW_NAME + gen3username: GEN3_USERNAME gen3service: argo-workflows purpose: workflow metadataOptions: diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml index 7b1b9d62f..4221f5742 100644 --- a/kube/services/argo-events/workflows/sensor-created.yaml +++ b/kube/services/argo-events/workflows/sensor-created.yaml @@ -60,11 +60,11 @@ spec: - "-c" - | if ! kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - envsubst < /home/manifests/nodetemplate.yaml | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" | kubectl apply -f - fi if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - envsubst < /home/manifests/provisioner.yaml | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" | kubectl apply -f - fi env: - name: WORKFLOW_NAME diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml index c5b501918..4f82e9d43 100644 --- a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml +++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml @@ -41,7 +41,7 @@ spec: AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" fi - ENVIRONMENT=$(kubectl get configmap global -o jsonpath="{.data.environment}") + ENVIRONMENT=$(kubectl -n default get configmap global -o jsonpath="{.data.environment}") RAW_WORKFLOWS=$(kubectl get workflows -n argo -o yaml) @@ -60,12 +60,13 @@ spec: if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then echo "No awsnodetemplate found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USER_NAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - fi if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then echo "No provisioner found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USER_NAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + fi done restartPolicy: OnFailure From 6a10e7aa38a309733bac1989e894f452726f867d Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Fri, 8 Dec 2023 21:41:10 -0600 Subject: [PATCH 252/362] skip gender tags (#2376) * skip gender tags * dummy --- files/scripts/healdata/heal-cedar-data-ingest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index e95ab8604..4a7d88c3c 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -85,6 +85,8 @@ def update_filter_metadata(metadata_to_update): ] # Add any new tags from advSearchFilters for f in metadata_to_update["advSearchFilters"]: + if f["key"] == "Gender": + continue tag = {"name": f["value"], "category": f["key"]} if tag not in tags: tags.append(tag) From eaf04bf5b472b94558947f4842980576ce2ea1a5 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 11 Dec 2023 09:23:36 -0600 Subject: [PATCH 253/362] GPE-1081 (#2371) Co-authored-by: Edward Malinowski --- doc/s3-to-google-replication.md | 68 +++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 doc/s3-to-google-replication.md diff --git a/doc/s3-to-google-replication.md b/doc/s3-to-google-replication.md new file mode 100644 index 000000000..82d0374c7 --- /dev/null +++ b/doc/s3-to-google-replication.md @@ -0,0 +1,68 @@ +# S3 to Google Cloud Storage Replication Pipeline + +This document will guide you through setting up a replication pipeline from AWS S3 to Google Cloud Storage (GCS) using VPC Service Controls and Storage Transfer Service. This solution is compliant with security best practices, ensuring that data transfer between AWS S3 and GCS is secure and efficient. + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [Step-by-step Guide](#step-by-step-guide) + - [Setup VPC Service Controls](#setup-vpc-service-controls) + - [Initiate Storage Transfer Service](#initiate-storage-transfer-service) +- [Compliance Benefits](#compliance-benefits) +- [Cost Benefit Analysis](#cost-benefit-analysis) + +## Prerequisites + +1. **AWS account** with access to the S3 bucket. +2. **Google Cloud account** with permissions to create buckets in GCS and set up VPC Service Controls and Storage Transfer Service. +3. Familiarity with AWS IAM for S3 bucket access and Google Cloud IAM for GCS access. + +## Step-by-step Guide + +### Setup VPC Service Controls + +1. **Access the VPC Service Controls** in the Google Cloud Console. +2. **Create a new VPC Service Control perimeter**. + - Name the perimeter and choose the desired region. + - Add the necessary GCP services. Ensure to include `storagetransfer.googleapis.com` for Storage Transfer Service. +3. **Setup VPC Service Control Policy** to allow connections from AWS. + - Use the [documentation](https://cloud.google.com/vpc-service-controls/docs/set-up) to help set up. + +### Initiate Storage Transfer Service + +1. Navigate to **Storage Transfer Service** in the Google Cloud Console. +2. Click **Create Transfer Job**. +3. **Select Source**: Choose Amazon S3 bucket and provide the necessary details. + - Ensure to have necessary permissions for the S3 bucket in AWS IAM. +4. **Select Destination**: Choose your GCS bucket. +5. **Schedule & Advanced Settings**: Set the frequency and conditions for the transfer. Consider setting up notifications for job completion or errors. +6. **Review & Create**: Confirm the details and initiate the transfer job. + +## Compliance Benefits + +Setting up a secure replication pipeline from AWS S3 to GCS using VPC Service Controls and Storage Transfer Service offers the following compliance benefits: + +1. **Data Security**: The VPC Service Controls provide an additional layer of security by ensuring that the transferred data remains within a defined security perimeter, reducing potential data leak risks. +2. **Auditability**: Both AWS and GCS offer logging and monitoring tools that can provide audit trails for data transfer. This can help in meeting regulatory compliance requirements. +3. **Consistent Data Replication**: The Storage Transfer Service ensures that data in GCS is up to date with the source S3 bucket, which is essential for consistent backup and disaster recovery strategies. + +## Cost Benefit Analysis + +**Benefits**: + +1. **Data Redundancy**: Having data stored in multiple cloud providers can be a part of a robust disaster recovery strategy. +2. **Flexibility**: Replicating data to GCS provides flexibility in multi-cloud strategies, enabling seamless migrations or usage of GCP tools and services. +3. **Security**: Utilizing VPC Service Controls strengthens the security posture. + +**Costs**: + +1. **Data Transfer Costs**: Both AWS and Google Cloud might charge for data transfer. It's crucial to analyze the cost, especially for large data transfers. +2. **Storage Costs**: Storing data redundantly incurs additional storage costs in GCS. + +**Analysis**: + +To stay in compliance, we require multiple copies of our data in separate datacenters or clouds. After our security audit, we found the important of not keeping data in a single cloud. It may be expensive to transfer data from AWS to GCP and to store it in 2 clouds simultaniously, but if we need to, then this solution could be an easy way to achieve compliance. + +--- + +Please note that while this guide is based on the provided Google Cloud documentation, it's crucial to refer to the original [documentation](https://cloud.google.com/architecture/transferring-data-from-amazon-s3-to-cloud-storage-using-vpc-service-controls-and-storage-transfer-service) for the most accurate and up-to-date information. From df000c3fc49bbd916583002ce9cfdef9b1d27d45 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Mon, 11 Dec 2023 10:27:40 -0600 Subject: [PATCH 254/362] add csrf guppy (#2425) --- kube/services/revproxy/gen3.nginx.conf/guppy-service.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf index db2de5886..0e6b4f7e4 100644 --- a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf @@ -1,4 +1,7 @@ location /guppy/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } proxy_connect_timeout 600s; proxy_send_timeout 600s; proxy_read_timeout 600s; From a6340ec429c38cf327ceafdd74a6ffcc9b1d93e2 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 13 Dec 2023 08:43:40 -0700 Subject: [PATCH 255/362] reverting guppy nginx config change (#2429) --- kube/services/revproxy/gen3.nginx.conf/guppy-service.conf | 3 --- 1 file changed, 3 deletions(-) diff --git a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf index 0e6b4f7e4..db2de5886 100644 --- a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf @@ -1,7 +1,4 @@ location /guppy/ { - if ($csrf_check !~ ^ok-\S.+$) { - return 403 "failed csrf check"; - } proxy_connect_timeout 600s; proxy_send_timeout 600s; proxy_read_timeout 600s; From f06e5ac03293088c3d3452e2bc0e4d7c8b7b0a14 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 18 Dec 2023 11:42:46 -0700 Subject: [PATCH 256/362] adding node-to-node encryption (#2431) --- gen3/bin/create-es7-cluster.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gen3/bin/create-es7-cluster.sh b/gen3/bin/create-es7-cluster.sh index d18c4203f..553dc2652 100644 --- a/gen3/bin/create-es7-cluster.sh +++ b/gen3/bin/create-es7-cluster.sh @@ -40,6 +40,7 @@ else --vpc-options "SubnetIds=${subnet_ids[*]},SecurityGroupIds=${security_groups[*]}" \ --access-policies "$access_policies" \ --encryption-at-rest-options "Enabled=true,KmsKeyId=$kms_key_id"\ + --node-to-node-encryption-options "Enabled=true" > /dev/null 2>&1 # Wait for the new cluster to be available @@ -60,4 +61,4 @@ else if [ $retry_count -eq $max_retries ]; then echo "New cluster creation may still be in progress. Please check the AWS Management Console for the status." fi -fi \ No newline at end of file +fi From 21cdceb78b978bc7005412508665e35ee6d5ec42 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Wed, 20 Dec 2023 13:31:43 -0600 Subject: [PATCH 257/362] Update ohdsi-webapi-deploy.yaml (#2432) --- kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml index 65d6ed38c..a729ae7c4 100644 --- a/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml +++ b/kube/services/ohdsi-webapi/ohdsi-webapi-deploy.yaml @@ -83,7 +83,7 @@ spec: limits: memory: 4Gi - name: ohdsi-webapi-reverse-proxy - image: nginx:1.23 + image: 707767160287.dkr.ecr.us-east-1.amazonaws.com/gen3/nginx:1.23 ports: - containerPort: 80 volumeMounts: @@ -97,4 +97,4 @@ spec: cpu: 100m memory: 100Mi limits: - memory: 500Mi \ No newline at end of file + memory: 500Mi From fcc765f5ed9becdbb9172c7c0aad7d74fdc22d0b Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 22 Dec 2023 07:56:09 -0800 Subject: [PATCH 258/362] Upgrade jenkins (#2433) To 2.437 --- Docker/jenkins/Jenkins/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile index 8b8c58406..94fcd3f12 100644 --- a/Docker/jenkins/Jenkins/Dockerfile +++ b/Docker/jenkins/Jenkins/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.434-jdk21 +FROM jenkins/jenkins:2.437-jdk21 USER root From fcfebe0a2da425f406b6a78afefcfe0ae1cc9691 Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 3 Jan 2024 06:35:58 -0800 Subject: [PATCH 259/362] Upgrade jenkins (#2435) --- Docker/jenkins/Jenkins/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile index 94fcd3f12..7cce68b58 100644 --- a/Docker/jenkins/Jenkins/Dockerfile +++ b/Docker/jenkins/Jenkins/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.437-jdk21 +FROM jenkins/jenkins:2.439-jdk21 USER root From b859d3fc88c7b9c5e1a3dfda4c764a9cd16570b5 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Wed, 10 Jan 2024 12:00:25 -0600 Subject: [PATCH 260/362] PPS-588 add guppy csrf (#2430) * add guppy csrf * update msg --------- Co-authored-by: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> --- kube/services/revproxy/gen3.nginx.conf/guppy-service.conf | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf index db2de5886..e6d66ec12 100644 --- a/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf +++ b/kube/services/revproxy/gen3.nginx.conf/guppy-service.conf @@ -1,4 +1,8 @@ location /guppy/ { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check, make sure data-portal version >= 2023.12 or >= 5.19.0"; + } + proxy_connect_timeout 600s; proxy_send_timeout 600s; proxy_read_timeout 600s; From b7168ad9d2179a3d988d38bdccde3bc953ad714e Mon Sep 17 00:00:00 2001 From: Ajo Augustine Date: Tue, 16 Jan 2024 12:15:44 -0600 Subject: [PATCH 261/362] update service guppy memory limit (#2439) --- kube/services/guppy/guppy-deploy.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/guppy/guppy-deploy.yaml b/kube/services/guppy/guppy-deploy.yaml index 01a8905de..c3e8d121c 100644 --- a/kube/services/guppy/guppy-deploy.yaml +++ b/kube/services/guppy/guppy-deploy.yaml @@ -155,6 +155,6 @@ spec: resources: requests: cpu: 100m - memory: 128Mi + memory: 256Mi limits: - memory: 1200Mi + memory: 2000Mi From 1d594ce2d55842c2f295912858a6f3b5f8db09e8 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Fri, 19 Jan 2024 15:42:55 -0700 Subject: [PATCH 262/362] Update web_whitelist (add elastic.co) (#2442) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 0cbc0a6bc..83070d335 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -77,6 +77,7 @@ golang.org gopkg.in grafana.com grafana.github.io +helm.elastic.co http.us.debian.org ifconfig.io ingress.coralogix.us From 87704ae2a846d03903f58cfdc869dbd76e5d8aae Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 22 Jan 2024 14:39:51 -0700 Subject: [PATCH 263/362] VPC CNI Migration From Calico (#2440) * Adding "internet: yes" label This will allow the "netpolicy-external-egress" networkpolicy to apply to revproxy and allow for all egress traffic. * updating the aws vpc cni version in our "kube-setup-system-services" script * adding "sudo mount -t bpf bpffs /sys/fs/bpf" to Karpenter node templates as AWS VPC CNI addon requires it in order to manage networkpolicies --- gen3/bin/kube-setup-system-services.sh | 2 +- kube/services/karpenter/nodeTemplateDefault.yaml | 1 + kube/services/karpenter/nodeTemplateGPU.yaml | 1 + kube/services/karpenter/nodeTemplateJupyter.yaml | 1 + kube/services/karpenter/nodeTemplateWorkflow.yaml | 1 + kube/services/revproxy/revproxy-deploy.yaml | 1 + 6 files changed, 6 insertions(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh index 609ee01c7..0afa7d586 100644 --- a/gen3/bin/kube-setup-system-services.sh +++ b/gen3/bin/kube-setup-system-services.sh @@ -19,7 +19,7 @@ gen3_load "gen3/gen3setup" kubeproxy=${kubeproxy:-1.24.7} coredns=${coredns:-1.8.7} kubednsautoscaler=${kubednsautoscaler:-1.8.6} -cni=${cni:-1.12.2} +cni=${cni:-1.14.1} calico=${calico:-1.7.8} diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml index a3dbf6480..107c5e6cc 100644 --- a/kube/services/karpenter/nodeTemplateDefault.yaml +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -37,6 +37,7 @@ spec: sudo dracut -f # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + sudo mount -t bpf bpffs /sys/fs/bpf --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml index 5270b697f..c4fd535d7 100644 --- a/kube/services/karpenter/nodeTemplateGPU.yaml +++ b/kube/services/karpenter/nodeTemplateGPU.yaml @@ -37,6 +37,7 @@ spec: sudo dracut -f # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + sudo mount -t bpf bpffs /sys/fs/bpf --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml index 74f24926a..bca4436d1 100644 --- a/kube/services/karpenter/nodeTemplateJupyter.yaml +++ b/kube/services/karpenter/nodeTemplateJupyter.yaml @@ -37,6 +37,7 @@ spec: sudo dracut -f # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + sudo mount -t bpf bpffs /sys/fs/bpf --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml index ec2b81a60..22c95aba1 100644 --- a/kube/services/karpenter/nodeTemplateWorkflow.yaml +++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml @@ -37,6 +37,7 @@ spec: sudo dracut -f # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" + sudo mount -t bpf bpffs /sys/fs/bpf --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index 9d5caab1b..9f10ce90b 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -21,6 +21,7 @@ spec: app: revproxy # allow access from workspaces userhelper: "yes" + internet: "yes" GEN3_DATE_LABEL spec: affinity: From 013ce31b4bb8eb49bbff2674f889d0ac28b6cc82 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Fri, 26 Jan 2024 12:50:15 -0500 Subject: [PATCH 264/362] This was still set to 5 for some reason (#2445) --- kube/services/argo/values.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 67fa05a09..473f7041e 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,5 +1,5 @@ controller: - parallelism: 5 + parallelism: 3 metricsConfig: # -- Enables prometheus metrics server enabled: true @@ -28,11 +28,11 @@ controller: } ] } - } + } resourceRateLimit: limit: 40 - burst: 4 + burst: 4 # -- enable persistence using postgres persistence: @@ -49,7 +49,7 @@ controller: port: 5432 database: GEN3_ARGO_DB_NAME tableName: argo_workflows - # # the database secrets must be in the same namespace of the controller + # # the database secrets must be in the same namespace of the controller userNameSecret: name: argo-db-creds key: db_username @@ -58,7 +58,7 @@ controller: key: db_password nodeStatusOffLoad: true - workflowDefaults: + workflowDefaults: spec: archiveLogs: true @@ -77,11 +77,11 @@ server: baseHref: "/argo/" # -- Extra arguments to provide to the Argo server binary, such as for disabling authentication. extraArgs: - - --auth-mode=server - - --auth-mode=client + - --auth-mode=server + - --auth-mode=client extraEnv: - - name: ARGO_HTTP1 - value: "true" + - name: ARGO_HTTP1 + value: "true" resources: requests: memory: 8Gi From 368eeb0f3046ae90f4c7ee01658fe30752daf9c9 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 31 Jan 2024 10:31:27 -0600 Subject: [PATCH 265/362] chore(karpenter-upgrade): Updraded karpenter to a supported version when running eks 1.25+ (#2450) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-karpenter.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 8ba8ed9d9..4dba4eb40 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -24,7 +24,9 @@ gen3_deploy_karpenter() { karpenter=$(g3k_config_lookup .global.karpenter_version) fi export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor` - if [ "${clusterversion}" = "24+" ]; then + if [ "${clusterversion}" = "25+" ]; then + karpenter=${karpenter:-v0.27.0} + elif [ "${clusterversion}" = "24+" ]; then karpenter=${karpenter:-v0.24.0} else karpenter=${karpenter:-v0.22.0} From 12baa786e1771e0d4f5533844fee978fd80a51a8 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 31 Jan 2024 12:32:54 -0700 Subject: [PATCH 266/362] adding "migrate to vpc cni" script to cloud-auto (#2446) * adding "migrate to vpc cni" script to cloud-auto * updating the migrate to vpc script * changing to master branch * removing uneeded comments * Updated karpenter templates * adding changes --------- Co-authored-by: Edward Malinowski Co-authored-by: emalinowski --- gen3/bin/migrate-to-vpc-cni.sh | 138 ++++++++++++++++++ .../karpenter/nodeTemplateDefault.yaml | 8 +- kube/services/karpenter/nodeTemplateGPU.yaml | 7 +- .../karpenter/nodeTemplateJupyter.yaml | 7 +- .../karpenter/nodeTemplateWorkflow.yaml | 7 +- 5 files changed, 163 insertions(+), 4 deletions(-) create mode 100644 gen3/bin/migrate-to-vpc-cni.sh diff --git a/gen3/bin/migrate-to-vpc-cni.sh b/gen3/bin/migrate-to-vpc-cni.sh new file mode 100644 index 000000000..510d9ebef --- /dev/null +++ b/gen3/bin/migrate-to-vpc-cni.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +#Get the K8s NS +ctx="$(g3kubectl config current-context)" +ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" + +# Set the cluster name variable +CLUSTER_NAME=`gen3 api environment` + +# Check if in default ns +if [[ ("$ctxNamespace" != "default" && "$ctxNamespace" != "null") ]]; then + gen3_log_err "Namespace must be default" + exit 1 +fi + +# Cd into Cloud-automation repo and pull the latest from master +gen3_log_info "Pulling the latest from Cloud-Auto" +cd /home/$CLUSTER_NAME/cloud-automation || { gen3_log_err "Cloud-automation repo not found"; exit 1; } +#### Change to master +git checkout master || { gen3_log_err "Failed to checkout master branch"; exit 1; } +git pull || { gen3_log_err "Failed to pull from the repository"; exit 1; } + +# Update the Karpenter Node Template +gen3_log_info "Apply new Karpenter Node Template" +if [[ -d $(g3k_manifest_init)/$(g3k_hostname)/manifests/karpenter ]]; then + gen3_log_info "Karpenter setup in manifest. Open a cdismanifest PR and add this line to aws node templates: https://github.com/uc-cdis/cloud-automation/blob/master/kube/services/karpenter/nodeTemplateDefault.yaml#L40" + while true; do + read -p "Have you updated your manifest? (yes/no): " yn + case $yn in + [Yy]* ) + gen3_log_info "Proceeding with Karpenter deployment..." + gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; } + break + ;; + [Nn]* ) + gen3_log_info "Please update the cdismanifest before proceeding." + exit 1 + ;; + * ) + gen3_log_info "Please answer yes or no." + ;; + esac + done +else + gen3 kube-setup-karpenter deploy --force || { gen3_log_err "kube-setup-karpenter failed"; exit 1; } +fi + +# Cordon all the nodes before running gen3 roll all" +gen3_log_info "Cordoning all nodes" +kubectl get nodes --no-headers -o custom-columns=":metadata.name" | grep -v '^fargate' | xargs -I{} kubectl cordon {} + +# Run a "gen3 roll all" so all nodes use the new mounted BPF File System +gen3_log_info "Cycling all the nodes by running gen3 roll all" +gen3 roll all --fast || exit 1 + +# Confirm that all nodes have been rotated +while true; do + read -p "Roll all complete. Have all cordoned nodes been rotated? (yes/no): " yn + case $yn in + [Yy]* ) + gen3_log_info "Continuing with script..." + break + ;; + [Nn]* ) + gen3_log_info "Please drain any remaining nodes with 'kubectl drain --ignore-daemonsets --delete-emptydir-data'" + ;; + * ) + gen3_log_info "Please answer yes or no." + ;; + esac +done + + +# Delete all existing network policies +gen3_log_info "Deleting networkpolicies" +kubectl delete networkpolicies --all + +# Delete all Calico related resources from the “kube-system” namespace +gen3_log_info "Deleting all Calico related resources" +kubectl get deployments -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete deployment -n kube-system +kubectl get daemonsets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete daemonset -n kube-system +kubectl get services -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete service -n kube-system +kubectl get replicasets -n kube-system | grep calico | awk '{print $1}' | xargs kubectl delete replicaset -n kube-system + +# Backup the current VPC CNI configuration in case of rollback +gen3_log_info "Backing up current VPC CNI Configuration..." +kubectl get daemonset aws-node -n kube-system -o yaml > aws-k8s-cni-old.yaml || { gen3_log_err "Error backig up VPC CNI configuration"; exit 1; } + +# Check to ensure we are not using an AWS plugin to manage the VPC CNI Plugin +if aws eks describe-addon --cluster-name "$CLUSTER_NAME" --addon-name vpc-cni --query addon.addonVersion --output text 2>/dev/null; then + gen3_log_err "Error: VPC CNI Plugin is managed by AWS. Please log into the AWS UI and delete the VPC CNI Plugin in Amazon EKS, then re-run this script." + exit 1 +else + gen3_log_info "No managed VPC CNI Plugin found, proceeding with the script." +fi + +# Apply the new VPC CNI Version +gen3_log_info "Applying new version of VPC CNI" +g3kubectl apply -f https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v1.14.1/config/master/aws-k8s-cni.yaml || { gen3_log_err "Failed to apply new VPC CNI version"; exit 1; } + +# Check the version to make sure it updated +NEW_VERSION=$(kubectl describe daemonset aws-node --namespace kube-system | grep amazon-k8s-cni: | cut -d : -f 3) +gen3_log_info "Current version of aws-k8s-cni is: $NEW_VERSION" +if [ "$NEW_VERSION" != "v1.14.1" ]; then + gen3_log_info "The version of aws-k8s-cni has not been updated correctly." + exit 1 +fi + +# Edit the amazon-vpc-cni configmap to enable network policy controller +gen3_log_info "Enabling NetworkPolicies in VPC CNI Configmap" +kubectl patch configmap -n kube-system amazon-vpc-cni --type merge -p '{"data":{"enable-network-policy-controller":"true"}}' || { gen3_log_err "Configmap patch failed"; exit 1; } + +# Edit the aws-node daemonset +gen3_log_info "Enabling NetworkPolicies in aws-node Daemonset" +kubectl patch daemonset aws-node -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/1/args", "value": ["--enable-network-policy=true", "--enable-ipv6=false", "--enable-cloudwatch-logs=false", "--metrics-bind-addr=:8162", "--health-probe-bind-addr=:8163"]}]' || { gen3_log_err "Daemonset edit failed"; exit 1; } + +# Ensure all the aws-nodes are running +kubectl get pods -n kube-system | grep aws +while true; do + read -p "Do all the aws-node pods in the kube-system ns have 2/2 containers running? (yes/no): " yn + case $yn in + [Yy]* ) + gen3_log_info "Running kube-setup-networkpolicy..." + gen3 kube-setup-networkpolicy || exit 1 + break + ;; + [Nn]* ) + gen3_log_err "Look at aws-node logs to figure out what went wrong. View this document for more details: https://docs.google.com/document/d/1fcBTciQSSwjvHktEnO_7EObY-xR_EvJ2NtgUa70wvL8" + gen3_log_info "Rollback instructions are also available in the above document" + ;; + * ) + gen3_log_info "Please answer yes or no." + ;; + esac +done \ No newline at end of file diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml index 107c5e6cc..114de5aba 100644 --- a/kube/services/karpenter/nodeTemplateDefault.yaml +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -37,11 +37,17 @@ spec: sudo dracut -f # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - sudo mount -t bpf bpffs /sys/fs/bpf --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" + mounts: + - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + + --BOUNDARY + + Content-Type: text/cloud-config; charset="us-ascii" + power_state: delay: now mode: reboot diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml index c4fd535d7..cd3eb7386 100644 --- a/kube/services/karpenter/nodeTemplateGPU.yaml +++ b/kube/services/karpenter/nodeTemplateGPU.yaml @@ -37,7 +37,12 @@ spec: sudo dracut -f # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - sudo mount -t bpf bpffs /sys/fs/bpf + + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" + + mounts: + - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml index bca4436d1..af2da8436 100644 --- a/kube/services/karpenter/nodeTemplateJupyter.yaml +++ b/kube/services/karpenter/nodeTemplateJupyter.yaml @@ -37,7 +37,12 @@ spec: sudo dracut -f # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - sudo mount -t bpf bpffs /sys/fs/bpf + + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" + + mounts: + - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml index 22c95aba1..8609d0746 100644 --- a/kube/services/karpenter/nodeTemplateWorkflow.yaml +++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml @@ -37,7 +37,12 @@ spec: sudo dracut -f # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - sudo mount -t bpf bpffs /sys/fs/bpf + + --BOUNDARY + Content-Type: text/cloud-config; charset="us-ascii" + + mounts: + - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" From c6358c90bf58ca2b38eeb290ef721f0ed40ae2dc Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Thu, 1 Feb 2024 12:59:21 -0800 Subject: [PATCH 267/362] Update jenkins version (#2455) --- Docker/jenkins/Jenkins/Dockerfile | 2 +- Docker/jenkins/Jenkins2/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Docker/jenkins/Jenkins/Dockerfile b/Docker/jenkins/Jenkins/Dockerfile index 7cce68b58..04ebe5864 100644 --- a/Docker/jenkins/Jenkins/Dockerfile +++ b/Docker/jenkins/Jenkins/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.439-jdk21 +FROM jenkins/jenkins:2.426.3-lts-jdk21 USER root diff --git a/Docker/jenkins/Jenkins2/Dockerfile b/Docker/jenkins/Jenkins2/Dockerfile index 9976a07c2..e6b73bc76 100644 --- a/Docker/jenkins/Jenkins2/Dockerfile +++ b/Docker/jenkins/Jenkins2/Dockerfile @@ -1,4 +1,4 @@ -FROM jenkins/jenkins:2.415-jdk11 +FROM jenkins/jenkins:2.426.3-lts-jdk21 USER root From 916d1ca7f7a47103eee518574c16a8aa2b2c1f7c Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Fri, 2 Feb 2024 12:35:42 -0600 Subject: [PATCH 268/362] update script (#2454) Co-authored-by: Michael Lukowski --- files/scripts/healdata/heal-cedar-data-ingest.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 4a7d88c3c..71575e3c5 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -24,11 +24,14 @@ "Questionnaire/Survey/Assessment - unvalidated instrument": "Questionnaire/Survey/Assessment", "Cis Male": "Male", "Cis Female": "Female", - "Trans Male": "Female-to-male transsexual", - "Trans Female": "Male-to-female transsexual", - "Agender, Non-binary, gender non-conforming": "Other", - "Gender Queer": "Other", - "Intersex": "Intersexed", + "Trans Male": "Transgender man/trans man/female-to-male (FTM)", + "Female-to-male transsexual": "Transgender man/trans man/female-to-male (FTM)", + "Trans Female": "Transgender woman/trans woman/male-to-female (MTF)", + "Male-to-female transsexual": "Transgender woman/trans woman/male-to-female (MTF)", + "Agender, Non-binary, gender non-conforming": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Gender Queer": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Intersex": "Genderqueer/gender nonconforming/neither exclusively male nor female", + "Intersexed": "Genderqueer/gender nonconforming/neither exclusively male nor female", "Buisness Development": "Business Development" } From f6ded7a98e76135657ef8b2e2c0e0455e571414d Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 5 Feb 2024 13:46:57 -0600 Subject: [PATCH 269/362] Chore/squid al23 (#2456) * chore(squid-al23): Updated squid bootstrap script to work with al23 * chore(squid-al23): Updated squid bootstrap script to work with al23 * chore(squid-al23): Updated squid bootstrap script to work with al23 * chore(squid-al23): Updated squid bootstrap script to work with al23 --------- Co-authored-by: Edward Malinowski --- flavors/squid_auto/squid_running_on_docker.sh | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 05607f304..7504cc35e 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -8,6 +8,9 @@ DISTRO=$(awk -F '[="]*' '/^NAME/ { print $2 }' < /etc/os-release) WORK_USER="ubuntu" if [[ $DISTRO == "Amazon Linux" ]]; then WORK_USER="ec2-user" + if [[ $(awk -F '[="]*' '/^VERSION_ID/ { print $2 }' < /etc/os-release) == "2023" ]]; then + DISTRO="al2023" + fi fi HOME_FOLDER="/home/${WORK_USER}" SUB_FOLDER="${HOME_FOLDER}/cloud-automation" @@ -201,8 +204,10 @@ function install_awslogs { if [[ $DISTRO == "Ubuntu" ]]; then wget ${AWSLOGS_DOWNLOAD_URL} -O amazon-cloudwatch-agent.deb dpkg -i -E ./amazon-cloudwatch-agent.deb - else + elif [[ $DISTRO == "Amazon Linux" ]]; then sudo yum install amazon-cloudwatch-agent nc -y + elif [[ $DISTRO == "al2023" ]]; then + sudo dnf install amazon-cloudwatch-agent nc -y fi # Configure the AWS logs @@ -292,6 +297,19 @@ function main(){ --volume ${SQUID_CACHE_DIR}:${SQUID_CACHE_DIR} \ --volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \ quay.io/cdis/squid:${SQUID_IMAGE_TAG} + + max_attempts=3 + attempt_counter=0 + while [ $attempt_counter -lt $max_attempts ]; do + sleep 10 + if [[ -z "$(sudo lsof -i:3128)" ]]; then + echo "Squid not healthy, restarting." + docker restart squid + else + echo "Squid healthy" + break + fi + done } main From f24a0dacc4053058d5bb949d5e21c938e68b82eb Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 6 Feb 2024 10:45:03 -0600 Subject: [PATCH 270/362] chore(squid-al23): Updated squid bootstrap script to work with al23 (#2457) Co-authored-by: Edward Malinowski --- flavors/squid_auto/squid_running_on_docker.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 7504cc35e..2b0f07e45 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -298,7 +298,7 @@ function main(){ --volume ${SQUID_CONFIG_DIR}:${SQUID_CONFIG_DIR}:ro \ quay.io/cdis/squid:${SQUID_IMAGE_TAG} - max_attempts=3 + max_attempts=10 attempt_counter=0 while [ $attempt_counter -lt $max_attempts ]; do sleep 10 @@ -307,7 +307,6 @@ function main(){ docker restart squid else echo "Squid healthy" - break fi done } From 57120c9e9fea0b79c411f3d88844fca8987fb11f Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 6 Feb 2024 12:26:06 -0600 Subject: [PATCH 271/362] chore(tigera-helm): Updated squid to support calico chart repo (#2458) Co-authored-by: Edward Malinowski --- files/squid_whitelist/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 44f468097..3dca3946a 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -100,6 +100,7 @@ .sourceforge.net .southsideweekly.com .theanvil.io +.tigera.io .twistlock.com .ubuntu.com .ucsc.edu From 8c00a2306cba07ff3894aa4688ac14cbdfc6724e Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Tue, 6 Feb 2024 15:53:14 -0600 Subject: [PATCH 272/362] Update web_whitelist --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 83070d335..625c20b29 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -34,6 +34,7 @@ cernvm.cern.ch charts.bitnami.com charts.helm.sh cloud.r-project.org +coredns.github.io coreos.com covidstoplight.org cpan.mirrors.tds.net From 36ea4d58f7f16cb5a05a844dd6db545036994f4d Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 7 Feb 2024 10:09:09 -0600 Subject: [PATCH 273/362] fix(alb-policy): policy fix (#2459) Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-ingress.sh | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-ingress.sh b/gen3/bin/kube-setup-ingress.sh index d0bcff9a4..b75470f73 100644 --- a/gen3/bin/kube-setup-ingress.sh +++ b/gen3/bin/kube-setup-ingress.sh @@ -232,6 +232,28 @@ gen3_ingress_setup_role() { } } }, + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:AddTags" + ], + "Resource": [ + "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*", + "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*" + ], + "Condition": { + "StringEquals": { + "elasticloadbalancing:CreateAction": [ + "CreateTargetGroup", + "CreateLoadBalancer" + ] + }, + "Null": { + "aws:RequestTag/elbv2.k8s.aws/cluster": "false" + } + } + }, { "Effect": "Allow", "Action": [ @@ -329,4 +351,4 @@ g3kubectl apply -f "${GEN3_HOME}/kube/services/revproxy/revproxy-service.yaml" envsubst <$scriptDir/ingress.yaml | g3kubectl apply -f - if [ "$deployWaf" = true ]; then gen3_ingress_setup_waf -fi \ No newline at end of file +fi From 301744b69a069cfddccd8a67fc77aa5362955dd0 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 7 Feb 2024 10:51:02 -0700 Subject: [PATCH 274/362] removing "--short" flag as it is now deprecated (#2462) --- gen3/bin/kube-setup-karpenter.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 4dba4eb40..c8762c2e5 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -23,7 +23,7 @@ gen3_deploy_karpenter() { if g3k_config_lookup .global.karpenter_version; then karpenter=$(g3k_config_lookup .global.karpenter_version) fi - export clusterversion=`kubectl version --short -o json | jq -r .serverVersion.minor` + export clusterversion=`kubectl version -o json | jq -r .serverVersion.minor` if [ "${clusterversion}" = "25+" ]; then karpenter=${karpenter:-v0.27.0} elif [ "${clusterversion}" = "24+" ]; then From d01c0fa1fd8b612ab7b03c1d9a408e5e8bf36656 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Wed, 7 Feb 2024 10:53:02 -0700 Subject: [PATCH 275/362] reverting the BPF (#2461) --- kube/services/karpenter/nodeTemplateDefault.yaml | 8 ++++---- kube/services/karpenter/nodeTemplateGPU.yaml | 8 ++++---- kube/services/karpenter/nodeTemplateJupyter.yaml | 8 ++++---- kube/services/karpenter/nodeTemplateWorkflow.yaml | 8 ++++---- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/kube/services/karpenter/nodeTemplateDefault.yaml b/kube/services/karpenter/nodeTemplateDefault.yaml index 114de5aba..6ba8b3a0f 100644 --- a/kube/services/karpenter/nodeTemplateDefault.yaml +++ b/kube/services/karpenter/nodeTemplateDefault.yaml @@ -38,11 +38,11 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - --BOUNDARY - Content-Type: text/cloud-config; charset="us-ascii" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" - mounts: - - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] --BOUNDARY diff --git a/kube/services/karpenter/nodeTemplateGPU.yaml b/kube/services/karpenter/nodeTemplateGPU.yaml index cd3eb7386..925e7a9a0 100644 --- a/kube/services/karpenter/nodeTemplateGPU.yaml +++ b/kube/services/karpenter/nodeTemplateGPU.yaml @@ -38,11 +38,11 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - --BOUNDARY - Content-Type: text/cloud-config; charset="us-ascii" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" - mounts: - - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateJupyter.yaml b/kube/services/karpenter/nodeTemplateJupyter.yaml index af2da8436..1c8970ad6 100644 --- a/kube/services/karpenter/nodeTemplateJupyter.yaml +++ b/kube/services/karpenter/nodeTemplateJupyter.yaml @@ -38,11 +38,11 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - --BOUNDARY - Content-Type: text/cloud-config; charset="us-ascii" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" - mounts: - - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" diff --git a/kube/services/karpenter/nodeTemplateWorkflow.yaml b/kube/services/karpenter/nodeTemplateWorkflow.yaml index 8609d0746..6e47b22f9 100644 --- a/kube/services/karpenter/nodeTemplateWorkflow.yaml +++ b/kube/services/karpenter/nodeTemplateWorkflow.yaml @@ -38,11 +38,11 @@ spec: # configure grub sudo /sbin/grubby --update-kernel=ALL --args="fips=1" - --BOUNDARY - Content-Type: text/cloud-config; charset="us-ascii" + # --BOUNDARY + # Content-Type: text/cloud-config; charset="us-ascii" - mounts: - - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] + # mounts: + # - ['fstype': 'bpf', 'mountpoint': '/sys/fs/bpf', 'opts': 'rw,relatime'] --BOUNDARY Content-Type: text/cloud-config; charset="us-ascii" From 21000363e7c3d562de45dfa7d07ffd24260a300d Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 7 Feb 2024 12:54:00 -0600 Subject: [PATCH 276/362] chore(remove-coredns-autoscaler): Removed dns autoscaler deployment (#2460) Co-authored-by: Edward Malinowski --- gen3/bin/kube-roll-all.sh | 2 +- gen3/bin/kube-setup-system-services.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index c9cec5a25..6a67f2bdd 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -274,7 +274,7 @@ if [[ "$GEN3_ROLL_FAST" != "true" ]]; then else gen3 kube-setup-autoscaler & fi - gen3 kube-setup-kube-dns-autoscaler & + #gen3 kube-setup-kube-dns-autoscaler & gen3 kube-setup-metrics deploy || true gen3 kube-setup-tiller || true # diff --git a/gen3/bin/kube-setup-system-services.sh b/gen3/bin/kube-setup-system-services.sh index 0afa7d586..c26a04cb5 100644 --- a/gen3/bin/kube-setup-system-services.sh +++ b/gen3/bin/kube-setup-system-services.sh @@ -39,7 +39,7 @@ calico_yaml="https://raw.githubusercontent.com/aws/amazon-vpc-cni-k8s/v${calico} g3kubectl set image daemonset.apps/kube-proxy -n kube-system kube-proxy=${kube_proxy_image} g3kubectl set image --namespace kube-system deployment.apps/coredns coredns=${coredns_image} -g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f - +#g3k_kv_filter "${GEN3_HOME}/kube/services/kube-dns-autoscaler/dns-horizontal-autoscaler.yaml" SERVICE "coredns" IMAGE "$kubednsautoscaler_image" | g3kubectl apply -f - g3kubectl apply -f ${cni_image} g3kubectl apply -f ${calico_yaml} From 5f7aed08fa60ce1364c8016ff95ad9df6d34bfa1 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 7 Feb 2024 12:55:03 -0600 Subject: [PATCH 277/362] fix(squid-cron): Added crontab to al23 squid (#2463) Co-authored-by: Edward Malinowski --- flavors/squid_auto/squid_running_on_docker.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 2b0f07e45..557809b69 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -63,6 +63,8 @@ fi function install_basics(){ if [[ $DISTRO == "Ubuntu" ]]; then apt -y install atop + elif [[ $DISTRO == "al2023" ]]; then + sudo dnf install cronie nc -y fi } @@ -207,7 +209,7 @@ function install_awslogs { elif [[ $DISTRO == "Amazon Linux" ]]; then sudo yum install amazon-cloudwatch-agent nc -y elif [[ $DISTRO == "al2023" ]]; then - sudo dnf install amazon-cloudwatch-agent nc -y + sudo dnf install amazon-cloudwatch-agent -y fi # Configure the AWS logs From 0ebd73040e0c3eba67de10b85061a60c969890c9 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Fri, 9 Feb 2024 09:39:38 -0700 Subject: [PATCH 278/362] Update ingress.yaml to use newer fips based tls policy (#2447) --- kube/services/ingress/ingress.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml index 65916679a..3f1f31259 100644 --- a/kube/services/ingress/ingress.yaml +++ b/kube/services/ingress/ingress.yaml @@ -11,7 +11,7 @@ metadata: alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=600 alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' - alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-2021-06 + alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-Res-FIPS-2023-04 spec: ingressClassName: alb rules: From 1bf159a57d59dac52544c65fc01f057f53e0b0a7 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 9 Feb 2024 14:38:55 -0600 Subject: [PATCH 279/362] fix syntax (#2464) Co-authored-by: Edward Malinowski --- flavors/squid_auto/squid_running_on_docker.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 557809b69..94fe22122 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -303,6 +303,7 @@ function main(){ max_attempts=10 attempt_counter=0 while [ $attempt_counter -lt $max_attempts ]; do + ((attempt_counter++)) sleep 10 if [[ -z "$(sudo lsof -i:3128)" ]]; then echo "Squid not healthy, restarting." From 0f98195cf8b794b044c063b19281a39aca43b3ee Mon Sep 17 00:00:00 2001 From: emalinowski Date: Fri, 9 Feb 2024 15:32:21 -0600 Subject: [PATCH 280/362] prevent reboot (#2465) Co-authored-by: Edward Malinowski --- flavors/squid_auto/squid_running_on_docker.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 94fe22122..2d7cf8e68 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -303,7 +303,7 @@ function main(){ max_attempts=10 attempt_counter=0 while [ $attempt_counter -lt $max_attempts ]; do - ((attempt_counter++)) + #((attempt_counter++)) sleep 10 if [[ -z "$(sudo lsof -i:3128)" ]]; then echo "Squid not healthy, restarting." From 8e02b097b822927ae1c2768fa93b828e2150d732 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 12 Feb 2024 16:07:45 -0500 Subject: [PATCH 281/362] Feat/argo va testing revproxy modifications (#2466) * It can't be that easy * It wasn't that easy. What we're going to do is use the manifest to tell us what namespace to look in, and then look there * Dumb typo * Yet another dumb typo * Moving to a different setup, where specify the argo server URL in the manifest. This is more flexible * I'm blaming the last commit on Monday brain * It's a .conf file, not a yaml file * Need to update in both places --- gen3/bin/kube-setup-revproxy.sh | 17 ++++++++--------- ...o-workflows-server.conf => argo-server.conf} | 2 +- 2 files changed, 9 insertions(+), 10 deletions(-) rename kube/services/revproxy/gen3.nginx.conf/{argo-argo-workflows-server.conf => argo-server.conf} (86%) diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index fcc2ef3b7..5db9850a1 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -111,15 +111,14 @@ for name in $(g3kubectl get services -o json | jq -r '.items[] | .metadata.name' fi done -if g3kubectl get namespace argo > /dev/null 2>&1; -then - for argo in $(g3kubectl get services -n argo -o jsonpath='{.items[*].metadata.name}'); - do - filePath="$scriptDir/gen3.nginx.conf/${argo}.conf" - if [[ -f "$filePath" ]]; then - confFileList+=("--from-file" "$filePath") - fi - done + +if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then + argo_server_service_url=$(g3k_manifest_lookup .argo.argo_server_service_url) + g3k_kv_filter "${scriptDir}/gen3.nginx.conf/argo-server.conf" SERVICE_URL "${argo_server_service_url}" > /tmp/argo-server-with-url.conf + filePath="/tmp/argo-server-with-url.conf" + if [[ -f "$filePath" ]]; then + confFileList+=("--from-file" "$filePath") + fi fi if g3kubectl get namespace argocd > /dev/null 2>&1; diff --git a/kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf b/kube/services/revproxy/gen3.nginx.conf/argo-server.conf similarity index 86% rename from kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf rename to kube/services/revproxy/gen3.nginx.conf/argo-server.conf index cb8def3aa..1cdd4608c 100644 --- a/kube/services/revproxy/gen3.nginx.conf/argo-argo-workflows-server.conf +++ b/kube/services/revproxy/gen3.nginx.conf/argo-server.conf @@ -7,7 +7,7 @@ auth_request /gen3-authz; set $proxy_service "argo"; - set $upstream http://argo-argo-workflows-server.argo.svc.cluster.local:2746; + set $upstream SERVICE_URL; rewrite ^/argo/(.*) /$1 break; From e081b2385cffdfb832063fbd7c3930f0310ed738 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 12 Feb 2024 17:02:16 -0600 Subject: [PATCH 282/362] fix(karpenter-policy): Added kms permissions for karpenter (#2467) * fix(karpenter-policy): Added kms permissions for karpenter * fix(karpenter-policy): Added kms permissions for karpenter * fix(karpenter-policy): Added kms permissions for karpenter --------- Co-authored-by: Edward Malinowski Co-authored-by: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> --- gen3/bin/kube-setup-karpenter.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index c8762c2e5..2737ed6ee 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -79,6 +79,14 @@ gen3_deploy_karpenter() { "Effect": "Allow", "Resource": "*", "Sid": "ConditionalEC2Termination" + }, + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": [ + "kms:*" + ], + "Resource": "*" } ], "Version": "2012-10-17" From 11b94fb4e86ac779bf855a451bd16c7e7ecbe5d1 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Tue, 13 Feb 2024 09:47:57 -0600 Subject: [PATCH 283/362] Feat/argo wrapper namespace config (#2468) * feat: argo-wrapper-namespace-config update * feat: some extra updates * fix: name for file * feat: updates --- gen3/bin/kube-setup-argo-wrapper.sh | 13 ++++++++++++- kube/services/argo-wrapper/argo-wrapper-deploy.yaml | 9 ++++++++- kube/services/argo-wrapper/config.ini | 4 ++++ 3 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 kube/services/argo-wrapper/config.ini diff --git a/gen3/bin/kube-setup-argo-wrapper.sh b/gen3/bin/kube-setup-argo-wrapper.sh index 5727a703e..306050b12 100644 --- a/gen3/bin/kube-setup-argo-wrapper.sh +++ b/gen3/bin/kube-setup-argo-wrapper.sh @@ -19,5 +19,16 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then gen3 roll argo-wrapper g3kubectl apply -f "${GEN3_HOME}/kube/services/argo-wrapper/argo-wrapper-service.yaml" + if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then + argo_server_service_url=$(g3k_manifest_lookup .argo.argo_server_service_url) + + export ARGO_HOST=${argo_server_service_url} + export ARGO_NAMESPACE=argo-$(gen3 db namespace) + envsubst <"${GEN3_HOME}/kube/services/argo-wrapper/config.ini" > /tmp/config.ini + + g3kubectl delete configmap argo-wrapper-namespace-config + g3kubectl create configmap argo-wrapper-namespace-config --from-file /tmp/config.ini + fi + gen3_log_info "the argo-wrapper service has been deployed onto the kubernetes cluster" -fi \ No newline at end of file +fi diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index 65f68d98a..00d118746 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -58,7 +58,10 @@ spec: configMap: name: manifest-argo optional: true - + - name: argo-wrapper-namespace-config + configMap: + name: argo-wrapper-namespace-config + containers: - name: argo-wrapper GEN3_ARGO-WRAPPER_IMAGE @@ -70,3 +73,7 @@ spec: readOnly: true mountPath: /argo.json subPath: argo.json + - name: argo-wrapper-namespace-config + readOnly: true + mountPath: /src/config.ini + subPath: config.ini diff --git a/kube/services/argo-wrapper/config.ini b/kube/services/argo-wrapper/config.ini new file mode 100644 index 000000000..334438ac2 --- /dev/null +++ b/kube/services/argo-wrapper/config.ini @@ -0,0 +1,4 @@ +[DEFAULT] +ARGO_ACCESS_METHOD = access +ARGO_HOST = $ARGO_HOST +ARGO_NAMESPACE = $ARGO_NAMESPACE From 464f6ebc68f1481c83597920ae411226cdff7ee0 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 14 Feb 2024 12:22:23 -0600 Subject: [PATCH 284/362] Update squid_running_on_docker.sh (#2469) --- flavors/squid_auto/squid_running_on_docker.sh | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/flavors/squid_auto/squid_running_on_docker.sh b/flavors/squid_auto/squid_running_on_docker.sh index 2d7cf8e68..812a9f738 100644 --- a/flavors/squid_auto/squid_running_on_docker.sh +++ b/flavors/squid_auto/squid_running_on_docker.sh @@ -74,10 +74,18 @@ function install_docker(){ # Docker ############################################################### # Install docker from sources - curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add - - add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable" - apt update - apt install -y docker-ce + if [[ $DISTRO == "Ubuntu" ]]; then + curl -fsSL ${DOCKER_DOWNLOAD_URL}/gpg | sudo apt-key add - + add-apt-repository "deb [arch=amd64] ${DOCKER_DOWNLOAD_URL} $(lsb_release -cs) stable" + apt update + apt install -y docker-ce + else + sudo yum update -y + sudo yum install -y docker + # Start and enable Docker service + sudo systemctl start docker + sudo systemctl enable docker + fi mkdir -p /etc/docker cp ${SUB_FOLDER}/flavors/squid_auto/startup_configs/docker-daemon.json /etc/docker/daemon.json chmod -R 0644 /etc/docker From 982e7b6fb1d7bf8fb1ba167e1adf2de7ba995a8b Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Thu, 15 Feb 2024 10:57:46 -0500 Subject: [PATCH 285/362] Lowering parallelism to 1 for VA workflows (#2470) * Lowering parallelism to 1 for VA workflows * Caught changes we don't want --- kube/services/argo/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 473f7041e..7c2a04531 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,5 +1,5 @@ controller: - parallelism: 3 + parallelism: 1 metricsConfig: # -- Enables prometheus metrics server enabled: true From 0015d1c83ba3e9d0f9cbda1d7eb774e8a57ee68a Mon Sep 17 00:00:00 2001 From: vzpgb <45467497+vzpgb@users.noreply.github.com> Date: Fri, 16 Feb 2024 10:19:27 -0600 Subject: [PATCH 286/362] Increases VA prod parallel workflows to 3 (#2473) --- kube/services/argo/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 7c2a04531..473f7041e 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,5 +1,5 @@ controller: - parallelism: 1 + parallelism: 3 metricsConfig: # -- Enables prometheus metrics server enabled: true From f4e99c4c17f28bc4e4ad0bdd476f6b65b94ab183 Mon Sep 17 00:00:00 2001 From: burtonk <117617405+k-burt-uch@users.noreply.github.com> Date: Mon, 19 Feb 2024 15:08:18 -0600 Subject: [PATCH 287/362] Add azure blob storage to web_wildcard_whitelist (#2475) --- packer/configs/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/packer/configs/web_wildcard_whitelist b/packer/configs/web_wildcard_whitelist index c58eeefe8..621dec3d5 100644 --- a/packer/configs/web_wildcard_whitelist +++ b/packer/configs/web_wildcard_whitelist @@ -44,4 +44,5 @@ .yahooapis.com .cloudfront.net .docker.io +.blob.core.windows.net .googleapis.com From 1f8632ff493afad17e14d610eb35c01e1b81e359 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 19 Feb 2024 16:12:31 -0600 Subject: [PATCH 288/362] Add whitelist for snap to work (#2476) --- files/squid_whitelist/web_whitelist | 1 - files/squid_whitelist/web_wildcard_whitelist | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 625c20b29..c191b2e8c 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -15,7 +15,6 @@ ctds-planx.atlassian.net data.cityofchicago.org dataguids.org api.login.yahoo.com -api.snapcraft.io apt.kubernetes.io argoproj.github.io archive.cloudera.com diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 3dca3946a..b71ee76c2 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -97,6 +97,8 @@ .sks-keyservers.net .slack.com .slack-msgs.com +.snapcraft.io +.snapcraftcontent.com .sourceforge.net .southsideweekly.com .theanvil.io From fe5b5502603ec4158c9f077fad83eb421402763d Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 20 Feb 2024 14:56:05 -0600 Subject: [PATCH 289/362] gen3 logs snapshot: also get initContainers logs (#2478) --- gen3/lib/logs/snapshot.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/gen3/lib/logs/snapshot.sh b/gen3/lib/logs/snapshot.sh index 31cb80283..d3d3b2c6c 100644 --- a/gen3/lib/logs/snapshot.sh +++ b/gen3/lib/logs/snapshot.sh @@ -36,10 +36,11 @@ gen3_logs_snapshot_container() { # Snapshot all the pods # gen3_logs_snapshot_all() { + # For each pod for which we can list the containers, get the pod name and get its list of containers + # (container names + initContainers names). Diplay them as lines of " ". g3kubectl get pods -o json | \ - jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | map( {pod: .metadata.name, containers: .spec.containers | map(.name) } ) | map( .pod as $pod | .containers | map( { pod: $pod, cont: .})[]) | map(select(.cont != "pause" and .cont != "jupyterhub"))[] | .pod + " " + .cont' | \ + jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | map( {pod: .metadata.name, containers: [(.spec.containers | select(.!=null) | map(.name)), (.spec.initContainers | select(.!=null) | map(.name)) | add ] } ) | map( .pod as $pod | .containers | map( { pod: $pod, cont: .})[]) | map(select(.cont != "pause" and .cont != "jupyterhub"))[] | .pod + " " + .cont' | \ while read -r line; do gen3_logs_snapshot_container $line done } - From ff88b7b9ab6898587c96c78bb49ae6456cddabad Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 21 Feb 2024 11:46:29 -0500 Subject: [PATCH 290/362] Adding namespace and overall parallelism so we can run 5 workflows in preprod, and 3 in prod at the same time (#2479) --- kube/services/argo/values.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 473f7041e..2b46ced0f 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,5 +1,6 @@ controller: - parallelism: 3 + parallelism: 8 + namespaceParallelism: 3 metricsConfig: # -- Enables prometheus metrics server enabled: true From d8fd2813a1e13e40bdb58d33abd2a6101f398bba Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 22 Feb 2024 10:47:16 -0600 Subject: [PATCH 291/362] feat(argo-wrapper): update mountpath (#2481) --- kube/services/argo-wrapper/argo-wrapper-deploy.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml index 00d118746..89ec29ecc 100644 --- a/kube/services/argo-wrapper/argo-wrapper-deploy.yaml +++ b/kube/services/argo-wrapper/argo-wrapper-deploy.yaml @@ -75,5 +75,5 @@ spec: subPath: argo.json - name: argo-wrapper-namespace-config readOnly: true - mountPath: /src/config.ini + mountPath: /argowrapper/config.ini subPath: config.ini From 2df523111e44e3e1eb3ccc5563066ea1c13f4505 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 22 Feb 2024 11:03:28 -0600 Subject: [PATCH 292/362] feat(argo-wrapper): update config.ini, missed option (#2482) --- kube/services/argo-wrapper/config.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/kube/services/argo-wrapper/config.ini b/kube/services/argo-wrapper/config.ini index 334438ac2..1a1f20c29 100644 --- a/kube/services/argo-wrapper/config.ini +++ b/kube/services/argo-wrapper/config.ini @@ -2,3 +2,4 @@ ARGO_ACCESS_METHOD = access ARGO_HOST = $ARGO_HOST ARGO_NAMESPACE = $ARGO_NAMESPACE +COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = http://cohort-middleware-service/cohortdefinition-stats/by-source-id/{}/by-team-project?team-project={} From e51ed100e8b3b338b7029777a8daadf8b0e22002 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Thu, 22 Feb 2024 11:21:56 -0600 Subject: [PATCH 293/362] fix(argo-wrapper): fix typo (#2483) --- kube/services/argo-wrapper/config.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/argo-wrapper/config.ini b/kube/services/argo-wrapper/config.ini index 1a1f20c29..40ac392fd 100644 --- a/kube/services/argo-wrapper/config.ini +++ b/kube/services/argo-wrapper/config.ini @@ -2,4 +2,4 @@ ARGO_ACCESS_METHOD = access ARGO_HOST = $ARGO_HOST ARGO_NAMESPACE = $ARGO_NAMESPACE -COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = http://cohort-middleware-service/cohortdefinition-stats/by-source-id/{}/by-team-project?team-project={} +COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = http://cohort-middleware-service/cohortdefinition-stats/by-source-id/{}/by-team-project?team-project={} From c5c54865cc2261bfd91b0d83cdbc50b8381254ef Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Fri, 23 Feb 2024 08:17:29 -0800 Subject: [PATCH 294/362] Prevent eviction of ETL job pod while running (#2484) --- kube/services/jobs/etl-job.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml index fa201c99a..6b9b887ec 100644 --- a/kube/services/jobs/etl-job.yaml +++ b/kube/services/jobs/etl-job.yaml @@ -2,6 +2,8 @@ apiVersion: batch/v1 kind: Job metadata: + annotations: + karpenter.sh/do-not-evict: "true" name: etl spec: backoffLimit: 0 From 13fb2397dfaac8165c7a71e1fce769bda08ee760 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Tue, 27 Feb 2024 11:51:18 -0500 Subject: [PATCH 295/362] Fix/argo wrapper default url (#2486) * Adding a secret to contain all values for any workflow templates * Adding an override namespace argument to kube-setup-argo * Fixing a typo * You'd think I'd never written Bash before * Adding the last part of the namespace override logic * Debug prints * Trying to figure out why the override namespace logic doesn't work * Just checking * Desperate * I think we can't use echo in gen3 scripts * Sanity check * Just a commit * Checking if we get the argo_namespace correctly * Making the print clearer * It's debug print time * Fixing references * Adding some stuff * Removing debugs, and adding --overwrite * Adding double quotes * Fixing kube-setup-argo-wrapper to have default values for argo-service URL and argo namespace * Fixing /tmp/config.ini collisions * Turns out that wasn't the way to go, instead we'll just delete /tmp/config.ini to account for multiple environments * Removed some extra stuff that snuck in --- .secrets.baseline | 2 +- gen3/bin/kube-setup-argo-wrapper.sh | 23 ++++++++++++++++------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 0a8fe9cc9..936e306e5 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2023-10-26T21:32:44Z", + "generated_at": "2024-02-23T20:30:41Z", "plugins_used": [ { "name": "AWSKeyDetector" diff --git a/gen3/bin/kube-setup-argo-wrapper.sh b/gen3/bin/kube-setup-argo-wrapper.sh index 306050b12..9f7cc52ce 100644 --- a/gen3/bin/kube-setup-argo-wrapper.sh +++ b/gen3/bin/kube-setup-argo-wrapper.sh @@ -18,17 +18,26 @@ if [[ -z "$GEN3_SOURCE_ONLY" ]]; then gen3 roll argo-wrapper g3kubectl apply -f "${GEN3_HOME}/kube/services/argo-wrapper/argo-wrapper-service.yaml" + if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then - argo_server_service_url=$(g3k_manifest_lookup .argo.argo_server_service_url) + export ARGO_HOST=$(g3k_manifest_lookup .argo.argo_server_service_url) + else + export ARGO_HOST="http://argo-argo-workflows-server.argo.svc.cluster.local:2746" + fi + + if g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json 2> /dev/null; then + export ARGO_NAMESPACE=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + else + export ARGO_NAMESPACE="argo" + fi - export ARGO_HOST=${argo_server_service_url} - export ARGO_NAMESPACE=argo-$(gen3 db namespace) - envsubst <"${GEN3_HOME}/kube/services/argo-wrapper/config.ini" > /tmp/config.ini + envsubst <"${GEN3_HOME}/kube/services/argo-wrapper/config.ini" > /tmp/config.ini - g3kubectl delete configmap argo-wrapper-namespace-config - g3kubectl create configmap argo-wrapper-namespace-config --from-file /tmp/config.ini - fi + g3kubectl delete configmap argo-wrapper-namespace-config + g3kubectl create configmap argo-wrapper-namespace-config --from-file /tmp/config.ini + + rm /tmp/config.ini gen3_log_info "the argo-wrapper service has been deployed onto the kubernetes cluster" fi From 1a44bef0712ca5ffb4262615999787ef536644be Mon Sep 17 00:00:00 2001 From: emalinowski Date: Wed, 28 Feb 2024 06:20:07 -0600 Subject: [PATCH 296/362] fix(jenkins-zone): Added zone (#2487) Co-authored-by: Edward Malinowski Co-authored-by: Ajo Augustine --- kube/services/jenkins/jenkins-deploy.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kube/services/jenkins/jenkins-deploy.yaml b/kube/services/jenkins/jenkins-deploy.yaml index c0eae2040..954e996f2 100644 --- a/kube/services/jenkins/jenkins-deploy.yaml +++ b/kube/services/jenkins/jenkins-deploy.yaml @@ -38,6 +38,10 @@ spec: operator: In values: - on-demand + - key: topology.kubernetes.io/zone + operator: In + values: + - us-east-1a serviceAccountName: jenkins-service securityContext: runAsUser: 1000 From 6c27fc9a7fe6cd3affd7a108db3c80b51c664ad1 Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Thu, 29 Feb 2024 09:59:10 -0600 Subject: [PATCH 297/362] updating the cedar data ingest (#2472) * updating the cedar data ingest * fix unsanitized request * address some comments --- .../healdata/heal-cedar-data-ingest.py | 96 +++++++++++++++++++ 1 file changed, 96 insertions(+) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 71575e3c5..d3ff246c3 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -35,6 +35,16 @@ "Buisness Development": "Business Development" } +# repository links +REPOSITORY_STUDY_ID_LINK_TEMPLATE = { + "NIDDK Central": "https://repository.niddk.nih.gov/studies//", + "NIDA Data Share": "https://datashare.nida.nih.gov/study/", + "NICHD DASH": "https://dash.nichd.nih.gov/study/", + "ICPSR": "https://www.icpsr.umich.edu/web/ICPSR/studies/", + "BioSystics-AP": "https://biosystics-ap.com/assays/assaystudy//", +} + + # Defines field that we don't want to include in the filters OMITTED_VALUES_MAPPING = { "study_metadata.human_subject_applicability.gender_applicability": "Not applicable" @@ -114,6 +124,31 @@ def get_client_token(client_id: str, client_secret: str): return token +def get_related_studies(serial_num, hostname): + related_study_result = [] + + if serial_num: + mds = requests.get(f"https://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000") + if mds.status_code == 200: + related_study_metadata = mds.json() + + for ( + related_study_metadata_key, + related_study_metadata_value, + ) in related_study_metadata.items(): + title = ( + related_study_metadata_value.get( + "gen3_discovery", {} + ) + .get("study_metadata", {}) + .get("minimal_info", {}) + .get("study_name", "") + ) + link = f"https://{hostname}/portal/discovery/{related_study_metadata_key}/" + related_study_result.append({"title": title, "link": link}) + return related_study_result + + parser = argparse.ArgumentParser() parser.add_argument("--directory", help="CEDAR Directory ID for registering ") @@ -214,6 +249,67 @@ def get_client_token(client_id: str, client_secret: str): mds_res["gen3_discovery"]["study_metadata"].update(cedar_record) mds_res["gen3_discovery"]["study_metadata"]["metadata_location"]["other_study_websites"] = cedar_record_other_study_websites + # setup citations + doi_citation = mds_res["gen3_discovery"]["study_metadata"].get("doi_citation", "") + mds_res["gen3_discovery"]["study_metadata"]["citation"]["heal_platform_citation"] = doi_citation + + + # setup repository_study_link + data_repositories = ( + mds_res.get("study_metadata", {}) + .get("metadata_location", {}) + .get("data_repositories", []) + ) + repository_citation = "Users must also include a citation to the data as specified by the local repository." + repository_citation_additional_text = ' The link to the study page at the local repository can be found in the "Data" tab.' + for repository in data_repositories: + if ( + repository["repository_name"] + and repository["repository_name"] + in REPOSITORY_STUDY_ID_LINK_TEMPLATE + and repository["repository_study_ID"] + ): + repository_study_link = REPOSITORY_STUDY_ID_LINK_TEMPLATE[ + repository["repository_name"] + ].replace("", repository["repository_study_ID"]) + repository.update({"repository_study_link": repository_study_link}) + if repository_citation_additional_text not in repository_citation: + repository_citation += repository_citation_additional_text + if len(data_repositories): + data_repositories[0] = { + **data_repositories[0], + "repository_citation": repository_citation, + } + mds_res["gen3_discovery"]["study_metadata"][ + "metadata_location" + ]["data_repositories"] = data_repositories + + + + # set up related studies + serial_num = None + try: + serial_num = ( + mds_res + .get("nih_reporter", {}) + .get("project_num_split", {}) + .get("serial_num", None) + ) + except Exception: + print(f"Unable to get serial number for study") + + if serial_num == None: + print(f"Unable to get serial number for study") + + related_study_result = get_related_studies(serial_num, hostname) + existing_related_study_result = mds_res.get("related_studies", []) + for related_study in related_study_result: + if related_study not in existing_related_study_result: + existing_related_study_result.append(copy.deepcopy(related_study)) + mds_res["gen3_discovery"][ + "related_studies" + ] = copy.deepcopy(existing_related_study_result) + # merge data from cedar that is not study level metadata into a level higher deleted_keys = [] for key, value in mds_res["gen3_discovery"]["study_metadata"].items(): From 072279d59732ff0109e9ca18bb05479b3bdcd577 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Thu, 29 Feb 2024 11:16:29 -0500 Subject: [PATCH 298/362] Feat/s3 bucket template (#2485) * Adding a secret to contain all values for any workflow templates * Adding an override namespace argument to kube-setup-argo * Fixing a typo * You'd think I'd never written Bash before * Adding the last part of the namespace override logic * Debug prints * Trying to figure out why the override namespace logic doesn't work * Just checking * Desperate * I think we can't use echo in gen3 scripts * Sanity check * Just a commit * Checking if we get the argo_namespace correctly * Making the print clearer * It's debug print time * Fixing references * Adding some stuff * Removing debugs, and adding --overwrite * Adding double quotes --- .secrets.baseline | 4 +-- gen3/bin/kube-setup-argo.sh | 66 ++++++++++++++++++++++++++----------- 2 files changed, 49 insertions(+), 21 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 936e306e5..b7e06622d 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2024-02-23T20:30:41Z", + "generated_at": "2024-02-23T20:30:41Z" "plugins_used": [ { "name": "AWSKeyDetector" @@ -342,7 +342,7 @@ "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", "is_secret": false, "is_verified": false, - "line_number": 191, + "line_number": 206, "type": "Secret Keyword" } ], diff --git a/gen3/bin/kube-setup-argo.sh b/gen3/bin/kube-setup-argo.sh index ff2438833..20676145b 100644 --- a/gen3/bin/kube-setup-argo.sh +++ b/gen3/bin/kube-setup-argo.sh @@ -5,10 +5,25 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" gen3_load "gen3/lib/kube-setup-init" +override_namespace=false +force=false + +for arg in "${@}"; do + if [ "$arg" == "--override-namespace" ]; then + override_namespace=true + elif [ "$arg" == "--force" ]; then + force=true + else + #Print usage info and exit + gen3_log_info "Usage: gen3 kube-setup-argo [--override-namespace] [--force]" + exit 1 + fi +done ctx="$(g3kubectl config current-context)" ctxNamespace="$(g3kubectl config view -ojson | jq -r ".contexts | map(select(.name==\"$ctx\")) | .[0] | .context.namespace")" +argo_namespace=$(g3k_config_lookup '.argo_namespace' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) function setup_argo_buckets { local accountNumber @@ -32,13 +47,13 @@ function setup_argo_buckets { roleName="gen3-argo-${environment//_/-}-role" bucketPolicy="argo-bucket-policy-${nameSpace}" internalBucketPolicy="argo-internal-bucket-policy-${nameSpace}" - if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."s3-bucket"') ]]; then - if [[ ! -z $(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then + if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."downloadable-s3-bucket"') ]]; then + if [[ ! -z $(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) ]]; then gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" - bucketName=$(g3k_config_lookup '."s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + bucketName=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) else gen3_log_info "Using S3 bucket found in manifest: ${bucketName}" - bucketName=$(g3k_config_lookup '.argo."s3-bucket"') + bucketName=$(g3k_config_lookup '.argo."downloadable-s3-bucket"') fi fi if [[ ! -z $(g3k_config_lookup '."internal-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) || ! -z $(g3k_config_lookup '.argo."internal-s3-bucket"') ]]; then @@ -131,19 +146,19 @@ EOF g3kubectl create namespace argo || true g3kubectl label namespace argo app=argo || true # Grant admin access within the argo namespace to the default SA in the argo namespace - g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n argo || true + g3kubectl create rolebinding argo-admin --clusterrole=admin --serviceaccount=argo:default -n $argo_namespace || true fi gen3_log_info "Creating IAM role ${roleName}" if aws iam get-role --role-name "${roleName}" > /dev/null 2>&1; then gen3_log_info "IAM role ${roleName} already exists.." roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) gen3_log_info "Role annotate" - g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n argo - g3kubectl annotate serviceaccount argo eks.amazonaws.com/role-arn=${roleArn} -n $nameSpace + g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $argo_namespace + g3kubectl annotate serviceaccount argo eks.amazonaws.com/role-arn=${roleArn} --overwrite -n $nameSpace else gen3 awsrole create $roleName argo $nameSpace -f all_namespaces roleArn=$(aws iam get-role --role-name "${roleName}" --query 'Role.Arn' --output text) - g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n argo + g3kubectl annotate serviceaccount default eks.amazonaws.com/role-arn=${roleArn} -n $argo_namespace fi # Grant admin access within the current namespace to the argo SA in the current namespace @@ -177,34 +192,47 @@ EOF for serviceName in indexd; do secretName="${serviceName}-creds" # Only delete if secret is found to prevent early exits - if [[ ! -z $(g3kubectl get secrets -n argo | grep $secretName) ]]; then - g3kubectl delete secret "$secretName" -n argo > /dev/null 2>&1 + if [[ ! -z $(g3kubectl get secrets -n $argo_namespace | grep $secretName) ]]; then + g3kubectl delete secret "$secretName" -n $argo_namespace > /dev/null 2>&1 fi done sleep 1 # I think delete is async - give backend a second to finish indexdFencePassword=$(cat $(gen3_secrets_folder)/creds.json | jq -r .indexd.user_db.$indexd_admin_user) - g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n argo + g3kubectl create secret generic "indexd-creds" --from-literal=user=$indexd_admin_user --from-literal=password=$indexdFencePassword -n $argo_namespace fi } function setup_argo_db() { - if ! secret="$(g3kubectl get secret argo-db-creds -n argo 2> /dev/null)"; then + if ! secret="$(g3kubectl get secret argo-db-creds -n $argo_namespace 2> /dev/null)"; then gen3_log_info "Setting up argo db persistence" gen3 db setup argo || true dbCreds=$(gen3 secrets decode argo-g3auto dbcreds.json) - g3kubectl create secret -n argo generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database) + g3kubectl create secret -n $argo_namespace generic argo-db-creds --from-literal=db_host=$(echo $dbCreds | jq -r .db_host) --from-literal=db_username=$(echo $dbCreds | jq -r .db_username) --from-literal=db_password=$(echo $dbCreds | jq -r .db_password) --from-literal=db_database=$(echo $dbCreds | jq -r .db_database) else gen3_log_info "Argo DB setup already completed" fi } - setup_argo_buckets +function setup_argo_template_secret() { + gen3_log_info "Started the template secret process" + downloadable_bucket_name=$(g3k_config_lookup '."downloadable-s3-bucket"' $(g3k_manifest_init)/$(g3k_hostname)/manifests/argo/argo.json) + # Check if the secret already exists + if [[ ! -z $(g3kubectl get secret argo-template-values-secret -n $argo_namespace) ]]; then + gen3_log_info "Argo template values secret already exists, assuming it's stale and deleting" + g3kubectl delete secret argo-template-values-secret -n $argo_namespace + fi + gen3_log_info "Creating argo template values secret" + g3kubectl create secret generic argo-template-values-secret --from-literal=DOWNLOADABLE_BUCKET=$downloadable_bucket_name -n $argo_namespace +} + +setup_argo_buckets # only do this if we are running in the default namespace -if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then +if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" || "$override_namespace" == true ]]; then setup_argo_db - if (! helm status argo -n argo > /dev/null 2>&1 ) || [[ "$1" == "--force" ]]; then - DBHOST=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_host | base64 -d) - DBNAME=$(kubectl get secrets -n argo argo-db-creds -o json | jq -r .data.db_database | base64 -d) + setup_argo_template_secret + if (! helm status argo -n $argo_namespace > /dev/null 2>&1 ) || [[ "$force" == true ]]; then + DBHOST=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_host | base64 -d) + DBNAME=$(kubectl get secrets -n $argo_namespace argo-db-creds -o json | jq -r .data.db_database | base64 -d) if [[ -z $internalBucketName ]]; then BUCKET=$bucketName else @@ -218,7 +246,7 @@ if [[ "$ctxNamespace" == "default" || "$ctxNamespace" == "null" ]]; then helm repo add argo https://argoproj.github.io/argo-helm --force-update 2> >(grep -v 'This is insecure' >&2) helm repo update 2> >(grep -v 'This is insecure' >&2) - helm upgrade --install argo argo/argo-workflows -n argo -f ${valuesFile} --version 0.29.1 + helm upgrade --install argo argo/argo-workflows -n $argo_namespace -f ${valuesFile} --version 0.29.1 else gen3_log_info "kube-setup-argo exiting - argo already deployed, use --force to redeploy" fi From 90b66091c0d1d8b89e7182ad84201e99903d8e44 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Thu, 29 Feb 2024 13:29:05 -0600 Subject: [PATCH 299/362] Update heal-cedar-data-ingest.py (#2490) --- files/scripts/healdata/heal-cedar-data-ingest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index d3ff246c3..1da4ac2d5 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -128,7 +128,7 @@ def get_related_studies(serial_num, hostname): related_study_result = [] if serial_num: - mds = requests.get(f"https://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000") + mds = requests.get(f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000") if mds.status_code == 200: related_study_metadata = mds.json() From bd6bc767c0461f8fc1f1d90fa8a755be1e7fc381 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Thu, 29 Feb 2024 13:49:47 -0600 Subject: [PATCH 300/362] fix missed import (#2491) --- .secrets.baseline | 346 ++++-------------- .../healdata/heal-cedar-data-ingest.py | 2 +- 2 files changed, 73 insertions(+), 275 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index b7e06622d..2583e269f 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -1,9 +1,9 @@ { "exclude": { - "files": "^.secrets.baseline$", + "files": null, "lines": null }, - "generated_at": "2024-02-23T20:30:41Z" + "generated_at": "2024-02-29T19:38:46Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -61,14 +61,12 @@ "Chef/repo/data_bags/README.md": [ { "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366", - "is_secret": false, "is_verified": false, "line_number": 45, "type": "Secret Keyword" }, { "hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee", - "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -77,25 +75,22 @@ "Docker/jenkins/Jenkins-CI-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, - "line_number": 121, + "line_number": 124, "type": "Secret Keyword" } ], "Docker/jenkins/Jenkins-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, - "line_number": 143, + "line_number": 139, "type": "Secret Keyword" } ], "Docker/jenkins/Jenkins/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 107, "type": "Secret Keyword" @@ -104,7 +99,6 @@ "Docker/jenkins/Jenkins2/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 108, "type": "Secret Keyword" @@ -113,7 +107,6 @@ "Docker/sidecar/service.key": [ { "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", - "is_secret": false, "is_verified": false, "line_number": 1, "type": "Private Key" @@ -122,7 +115,6 @@ "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ { "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -131,7 +123,6 @@ "ansible/roles/awslogs/defaults/main.yaml": [ { "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Basic Auth Credentials" @@ -140,14 +131,12 @@ "ansible/roles/slurm/README.md": [ { "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", - "is_secret": false, "is_verified": false, "line_number": 86, "type": "Base64 High Entropy String" }, { "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476", - "is_secret": false, "is_verified": false, "line_number": 86, "type": "Secret Keyword" @@ -156,7 +145,6 @@ "apis_configs/config_helper.py": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, "is_verified": false, "line_number": 66, "type": "Basic Auth Credentials" @@ -165,7 +153,6 @@ "apis_configs/fence_credentials.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 23, "type": "Secret Keyword" @@ -174,21 +161,18 @@ "apis_configs/fence_settings.py": [ { "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Basic Auth Credentials" }, { "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3", - "is_secret": false, "is_verified": false, "line_number": 58, "type": "Secret Keyword" }, { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, "is_verified": false, "line_number": 80, "type": "Basic Auth Credentials" @@ -197,7 +181,6 @@ "apis_configs/indexd_settings.py": [ { "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc", - "is_secret": false, "is_verified": false, "line_number": 59, "type": "Basic Auth Credentials" @@ -206,7 +189,6 @@ "apis_configs/peregrine_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -215,7 +197,6 @@ "apis_configs/sheepdog_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -224,7 +205,6 @@ "doc/Gen3-data-upload.md": [ { "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -233,7 +213,6 @@ "doc/api.md": [ { "hashed_secret": "625de83a7517422051911680cc803921ff99db90", - "is_secret": false, "is_verified": false, "line_number": 47, "type": "Hex High Entropy String" @@ -242,28 +221,24 @@ "doc/gen3OnK8s.md": [ { "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069", - "is_secret": false, "is_verified": false, "line_number": 113, "type": "Secret Keyword" }, { "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2", - "is_secret": false, "is_verified": false, "line_number": 143, "type": "Secret Keyword" }, { "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48", - "is_secret": false, "is_verified": false, "line_number": 170, "type": "Secret Keyword" }, { "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce", - "is_secret": false, "is_verified": false, "line_number": 189, "type": "Secret Keyword" @@ -272,7 +247,6 @@ "doc/kube-setup-data-ingestion-job.md": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Secret Keyword" @@ -281,7 +255,6 @@ "doc/logs.md": [ { "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Secret Keyword" @@ -290,7 +263,6 @@ "doc/slurm_cluster.md": [ { "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6", - "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" @@ -299,14 +271,12 @@ "files/dashboard/usage-reports/package-lock.json": [ { "hashed_secret": "e095101882f706c4de95e0f75c5bcb9666e3f448", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "5422e4f96964d5739998b25ac214520c1b113e5b", - "is_secret": false, "is_verified": false, "line_number": 15, "type": "Base64 High Entropy String" @@ -315,14 +285,12 @@ "gen3/bin/api.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 407, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, "is_verified": false, "line_number": 477, "type": "Secret Keyword" @@ -331,7 +299,6 @@ "gen3/bin/kube-dev-namespace.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 135, "type": "Secret Keyword" @@ -340,7 +307,6 @@ "gen3/bin/kube-setup-argo.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 206, "type": "Secret Keyword" @@ -349,7 +315,6 @@ "gen3/bin/kube-setup-aurora-monitoring.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 59, "type": "Secret Keyword" @@ -358,7 +323,6 @@ "gen3/bin/kube-setup-certs.sh": [ { "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99", - "is_secret": false, "is_verified": false, "line_number": 50, "type": "Secret Keyword" @@ -367,14 +331,12 @@ "gen3/bin/kube-setup-dashboard.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 40, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, "is_verified": false, "line_number": 41, "type": "Secret Keyword" @@ -383,14 +345,12 @@ "gen3/bin/kube-setup-data-ingestion-job.sh": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" }, { "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c", - "is_secret": false, "is_verified": false, "line_number": 102, "type": "Secret Keyword" @@ -399,7 +359,6 @@ "gen3/bin/kube-setup-dicom-server.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 43, "type": "Secret Keyword" @@ -408,7 +367,6 @@ "gen3/bin/kube-setup-dicom.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -417,14 +375,26 @@ "gen3/bin/kube-setup-jenkins.sh": [ { "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "is_verified": false, + "line_number": 22, + "type": "Secret Keyword" + } + ], + "gen3/bin/kube-setup-jenkins2.sh": [ + { + "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", + "is_verified": false, + "line_number": 18, + "type": "Secret Keyword" + }, + { + "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", "is_verified": false, "line_number": 22, "type": "Secret Keyword" @@ -433,7 +403,6 @@ "gen3/bin/kube-setup-metadata.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 35, "type": "Secret Keyword" @@ -442,21 +411,18 @@ "gen3/bin/kube-setup-revproxy.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 38, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 55, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 57, "type": "Secret Keyword" @@ -465,21 +431,18 @@ "gen3/bin/kube-setup-secrets.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 79, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 82, "type": "Secret Keyword" }, { "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8", - "is_secret": false, "is_verified": false, "line_number": 95, "type": "Secret Keyword" @@ -488,14 +451,12 @@ "gen3/bin/kube-setup-sftp.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 36, "type": "Secret Keyword" }, { "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4", - "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -504,7 +465,6 @@ "gen3/bin/kube-setup-sheepdog.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 33, "type": "Secret Keyword" @@ -513,28 +473,24 @@ "gen3/bin/kube-setup-sower-jobs.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 120, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 122, "type": "Secret Keyword" @@ -543,21 +499,18 @@ "gen3/bin/kube-setup-ssjdispatcher.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 117, "type": "Secret Keyword" }, { "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93", - "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 197, "type": "Secret Keyword" @@ -566,14 +519,12 @@ "gen3/lib/aws.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, "is_verified": false, "line_number": 640, "type": "Secret Keyword" }, { "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8", - "is_secret": false, "is_verified": false, "line_number": 660, "type": "Secret Keyword" @@ -582,14 +533,12 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ { "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", - "is_secret": false, "is_verified": false, "line_number": 33, "type": "Basic Auth Credentials" }, { "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db", - "is_secret": false, "is_verified": false, "line_number": 286, "type": "Secret Keyword" @@ -598,7 +547,6 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -607,7 +555,6 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -616,7 +563,6 @@ "gen3/lib/logs/utils.sh": [ { "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5", - "is_secret": false, "is_verified": false, "line_number": 3, "type": "Secret Keyword" @@ -625,7 +571,6 @@ "gen3/lib/manifestDefaults/hatchery/hatchery.json": [ { "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039", - "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -634,14 +579,12 @@ "gen3/lib/onprem.sh": [ { "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", - "is_secret": false, "is_verified": false, "line_number": 68, "type": "Secret Keyword" }, { "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", - "is_secret": false, "is_verified": false, "line_number": 84, "type": "Secret Keyword" @@ -650,14 +593,12 @@ "gen3/lib/secrets/rotate-postgres.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 250, "type": "Secret Keyword" @@ -666,49 +607,42 @@ "gen3/lib/testData/etlconvert/expected2.yaml": [ { "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", - "is_secret": false, "is_verified": false, "line_number": 13, "type": "Base64 High Entropy String" }, { "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", - "is_secret": false, "is_verified": false, "line_number": 16, "type": "Base64 High Entropy String" }, { "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", - "is_secret": false, "is_verified": false, "line_number": 33, "type": "Base64 High Entropy String" }, { "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", - "is_secret": false, "is_verified": false, "line_number": 35, "type": "Base64 High Entropy String" }, { "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", - "is_secret": false, "is_verified": false, "line_number": 36, "type": "Base64 High Entropy String" @@ -717,7 +651,6 @@ "gen3/test/secretsTest.sh": [ { "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", - "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" @@ -726,28 +659,24 @@ "gen3/test/terraformTest.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, "is_verified": false, "line_number": 156, "type": "Secret Keyword" }, { "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", - "is_secret": false, "is_verified": false, "line_number": 172, "type": "Base64 High Entropy String" }, { "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", - "is_secret": false, "is_verified": false, "line_number": 175, "type": "Base64 High Entropy String" }, { "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c", - "is_secret": false, "is_verified": false, "line_number": 175, "type": "Secret Keyword" @@ -756,21 +685,18 @@ "kube/services/argocd/values.yaml": [ { "hashed_secret": "27c6929aef41ae2bcadac15ca6abcaff72cda9cd", - "is_secret": false, "is_verified": false, "line_number": 360, "type": "Private Key" }, { "hashed_secret": "edbd5e119f94badb9f99a67ac6ff4c7a5204ad61", - "is_secret": false, "is_verified": false, "line_number": 379, "type": "Secret Keyword" }, { "hashed_secret": "91dfd9ddb4198affc5c194cd8ce6d338fde470e2", - "is_secret": false, "is_verified": false, "line_number": 412, "type": "Secret Keyword" @@ -779,7 +705,6 @@ "kube/services/datadog/values.yaml": [ { "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b", - "is_secret": false, "is_verified": false, "line_number": 23, "type": "Secret Keyword" @@ -788,401 +713,362 @@ "kube/services/fenceshib/fenceshib-configmap.yaml": [ { "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c", - "is_secret": false, "is_verified": false, "line_number": 375, "type": "Base64 High Entropy String" }, { "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f", - "is_secret": false, "is_verified": false, "line_number": 376, "type": "Base64 High Entropy String" }, { "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b", - "is_secret": false, "is_verified": false, "line_number": 377, "type": "Base64 High Entropy String" }, { "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5", - "is_secret": false, "is_verified": false, "line_number": 378, "type": "Base64 High Entropy String" }, { "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a", - "is_secret": false, "is_verified": false, "line_number": 379, "type": "Base64 High Entropy String" }, { "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9", - "is_secret": false, "is_verified": false, "line_number": 380, "type": "Base64 High Entropy String" }, { "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12", - "is_secret": false, "is_verified": false, "line_number": 381, "type": "Base64 High Entropy String" }, { "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7", - "is_secret": false, "is_verified": false, "line_number": 382, "type": "Base64 High Entropy String" }, { "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a", - "is_secret": false, "is_verified": false, "line_number": 383, "type": "Base64 High Entropy String" }, { "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3", - "is_secret": false, "is_verified": false, "line_number": 384, "type": "Base64 High Entropy String" }, { "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c", - "is_secret": false, "is_verified": false, "line_number": 385, "type": "Base64 High Entropy String" }, { "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52", - "is_secret": false, "is_verified": false, "line_number": 386, "type": "Base64 High Entropy String" }, { "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110", - "is_secret": false, "is_verified": false, "line_number": 387, "type": "Base64 High Entropy String" }, { "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7", - "is_secret": false, "is_verified": false, "line_number": 388, "type": "Base64 High Entropy String" }, { "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e", - "is_secret": false, "is_verified": false, "line_number": 389, "type": "Base64 High Entropy String" }, { "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1", - "is_secret": false, "is_verified": false, "line_number": 390, "type": "Base64 High Entropy String" }, { "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53", - "is_secret": false, "is_verified": false, "line_number": 391, "type": "Base64 High Entropy String" }, { "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a", - "is_secret": false, "is_verified": false, "line_number": 392, "type": "Base64 High Entropy String" }, { "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294", - "is_secret": false, "is_verified": false, "line_number": 393, "type": "Base64 High Entropy String" }, { "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41", - "is_secret": false, "is_verified": false, "line_number": 394, "type": "Base64 High Entropy String" }, { "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1", - "is_secret": false, "is_verified": false, "line_number": 395, "type": "Base64 High Entropy String" }, { "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb", - "is_secret": false, "is_verified": false, "line_number": 396, "type": "Base64 High Entropy String" }, { "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433", - "is_secret": false, "is_verified": false, "line_number": 397, "type": "Base64 High Entropy String" }, { "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a", - "is_secret": false, "is_verified": false, "line_number": 398, "type": "Base64 High Entropy String" }, { "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2", - "is_secret": false, "is_verified": false, "line_number": 399, "type": "Base64 High Entropy String" }, { - "hashed_secret": "7803ae08cdc22a5e0b025eff3c9ef0628eedc165", - "is_secret": false, + "hashed_secret": "fbad0bc8f7792b03f89cd3780eb7cf79f284c525", "is_verified": false, "line_number": 419, "type": "Base64 High Entropy String" }, { - "hashed_secret": "b8b61e87f5b58b0eeb597b2122ea0cea2ccab3d9", - "is_secret": false, + "hashed_secret": "3f6480956a775dacb44e2c39aa3d4722a347f7ab", "is_verified": false, "line_number": 420, "type": "Base64 High Entropy String" }, { - "hashed_secret": "787745fc904c3bd7eddc3d1aab683a376c13890f", - "is_secret": false, + "hashed_secret": "17f32ae55b14d708ca121722c2cae37189f19daf", "is_verified": false, "line_number": 423, "type": "Base64 High Entropy String" }, { - "hashed_secret": "81361d672f238f505a6246ef9b655ee2f48d67e7", - "is_secret": false, + "hashed_secret": "08a74689ca077515d406093720a7e5675fb42bb8", "is_verified": false, "line_number": 424, "type": "Base64 High Entropy String" }, { - "hashed_secret": "7c98bff76ac3f273d15ed9bc3dd5294d323ab577", - "is_secret": false, + "hashed_secret": "fa577bb3b2600d2d522dcfea8f1e34896760fcf2", "is_verified": false, "line_number": 425, "type": "Base64 High Entropy String" }, { - "hashed_secret": "46038fc88daceed8dd46817ca45c72ae0270fdd4", - "is_secret": false, + "hashed_secret": "37254f15cca211a1bd5f7ceb23de2b3eb8fb33aa", "is_verified": false, "line_number": 426, "type": "Base64 High Entropy String" }, { - "hashed_secret": "acad0c57b4f5cbed1b4863ed06d02784180a9f92", - "is_secret": false, + "hashed_secret": "86865593e038509467b91c2d5f36ccc09c3f422b", "is_verified": false, "line_number": 427, "type": "Base64 High Entropy String" }, { - "hashed_secret": "1b57f49a6ee337c16ecd6aabfc0dff3b3821cd09", - "is_secret": false, + "hashed_secret": "a899a8d9e114b2a8e108f90e6a72c056db22489f", "is_verified": false, "line_number": 428, "type": "Base64 High Entropy String" }, { - "hashed_secret": "5b688158be36e8b3f265a462ed599dcf69290084", - "is_secret": false, + "hashed_secret": "756b4825f886afd83c25563ac9d45f318d695c48", "is_verified": false, "line_number": 429, "type": "Base64 High Entropy String" }, { - "hashed_secret": "965996e12c8b50b3c325d96003e8984a4ece658a", - "is_secret": false, + "hashed_secret": "89882eeb0aca97717a7e4afcf4bc08d077813c7f", "is_verified": false, "line_number": 430, "type": "Base64 High Entropy String" }, { - "hashed_secret": "584f0c58e764e948af1a35c9e60447aa0f84c6f5", - "is_secret": false, + "hashed_secret": "347140d7b7ceb4e501c3c9c2ea4f29338e2f145e", "is_verified": false, "line_number": 431, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bcaf897786d060a675ee9d654a84ae8baf96e9d0", - "is_secret": false, + "hashed_secret": "61dbf70eb10d609e60c7b87faf8f755ff48abc46", "is_verified": false, "line_number": 432, "type": "Base64 High Entropy String" }, { - "hashed_secret": "0c09277fa183e06d32065f9386a3b4190b445df3", - "is_secret": false, + "hashed_secret": "24cd54c4b2f58378bba008cb2df68ac663fba7c8", "is_verified": false, "line_number": 433, "type": "Base64 High Entropy String" }, { - "hashed_secret": "5a51be06b305d6664e4afd25f21869b0f8b5039b", - "is_secret": false, + "hashed_secret": "fa4f9626ae4b98f4b61203c5bafb6f21c9c31e5d", "is_verified": false, "line_number": 434, "type": "Base64 High Entropy String" }, { - "hashed_secret": "b38404f8853d734e3d03577b2c1084b4540c8708", - "is_secret": false, + "hashed_secret": "b1370003d9cc1e346c83dba33e0418c7775a0c15", "is_verified": false, "line_number": 435, "type": "Base64 High Entropy String" }, { - "hashed_secret": "126ccc602cffcb8292beb57137f7f6719e317b72", - "is_secret": false, + "hashed_secret": "c66526e195e423a7ba7d68ac661cdcd8600dcd1f", "is_verified": false, "line_number": 436, "type": "Base64 High Entropy String" }, { - "hashed_secret": "6681c1d7e1d327642a32cb8864ad51e4b8f981e5", - "is_secret": false, + "hashed_secret": "d29d7044f0944eb30e02cf445f6998e3343dd811", "is_verified": false, "line_number": 437, "type": "Base64 High Entropy String" }, { - "hashed_secret": "7f7b1f316ece195e5f584fe2faf6f9edc6942c6f", - "is_secret": false, + "hashed_secret": "80a869460f33722387d8d58e7d9d2e1bbd5d1fe1", + "is_verified": false, + "line_number": 438, + "type": "Base64 High Entropy String" + }, + { + "hashed_secret": "4a06e2a02cbc665adccb4162dc57836895da65b8", "is_verified": false, "line_number": 439, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bb908c7bc655057f2edc42815c5dff82e9dea529", - "is_secret": false, + "hashed_secret": "ba2549f35835dfa101d3f660f7604dc78e3e226f", "is_verified": false, "line_number": 440, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bc2a0d18e3dd142df7b34e95342d47bf8aadabcb", - "is_secret": false, + "hashed_secret": "f354d4ee5fdb94ad29c7b3600264467f45b80eaa", "is_verified": false, "line_number": 441, "type": "Base64 High Entropy String" }, { - "hashed_secret": "d60f0bcea109bb6edb6e45fd387f5f2c86e49e1a", - "is_secret": false, + "hashed_secret": "bf17b587868ba7c3db9865b114261b5b8f1df870", "is_verified": false, "line_number": 442, "type": "Base64 High Entropy String" }, { - "hashed_secret": "e549dd40a741557cc1c4e377df0a141354e22688", - "is_secret": false, + "hashed_secret": "de1fd7a0d32cba528b4d80818c6601f2588d5383", "is_verified": false, "line_number": 443, "type": "Base64 High Entropy String" }, { - "hashed_secret": "2dd2486dae84cad50387c20bf687b6fbc6162b58", - "is_secret": false, + "hashed_secret": "bcad65055f6de654541db2bf27d4e27bd54d94c7", "is_verified": false, "line_number": 444, "type": "Base64 High Entropy String" }, { - "hashed_secret": "71622010fc7eb09d9273f59c548bde6a5da5dc0e", - "is_secret": false, + "hashed_secret": "f2e16f2dd532f65f79341342fdf57a093fc408d8", "is_verified": false, "line_number": 445, "type": "Base64 High Entropy String" }, { - "hashed_secret": "6f0115cf53bd49ec990c562ac6cbfc452c83cd46", - "is_secret": false, + "hashed_secret": "bb036a679a7d2df9fd2ca57068a446bf7f7dd106", "is_verified": false, "line_number": 446, "type": "Base64 High Entropy String" }, { - "hashed_secret": "70dddd534b2f9bb70871fefe0845b79c3b69363f", - "is_secret": false, + "hashed_secret": "5aa6568b1e8185578a6e964f5c322783ad349554", + "is_verified": false, + "line_number": 447, + "type": "Base64 High Entropy String" + }, + { + "hashed_secret": "4d14835ff0b0bf5aad480296cb705c74ac65f413", "is_verified": false, "line_number": 448, "type": "Base64 High Entropy String" }, { - "hashed_secret": "acf3536b0416aa99608b0be17e87655370ece829", - "is_secret": false, + "hashed_secret": "3f23f77dcf454ad73c4d61c44fd9aa584ef946c1", + "is_verified": false, + "line_number": 451, + "type": "Base64 High Entropy String" + }, + { + "hashed_secret": "1739fe5e5dfcf851b64f8b7b11538f1de29ce0b5", "is_verified": false, - "line_number": 449, + "line_number": 452, "type": "Base64 High Entropy String" }, { - "hashed_secret": "1d13ee35c7279c1fae1c6474ed47611994273e41", - "is_secret": false, + "hashed_secret": "8129db302110714fc735e3494bd82a65690e0963", "is_verified": false, - "line_number": 450, + "line_number": 453, "type": "Base64 High Entropy String" }, { - "hashed_secret": "d38cf89b25bd7378cdb4e00b4b59293001dd500b", - "is_secret": false, + "hashed_secret": "b48bfc62091164086a703115a0e68bdb09212591", "is_verified": false, - "line_number": 451, + "line_number": 454, "type": "Base64 High Entropy String" }, { - "hashed_secret": "1648f34ce2f1b563a8ed1c6d5d55b5e76a395903", - "is_secret": false, + "hashed_secret": "a10284feaf27f84081073a3267e3dce24ca7b911", "is_verified": false, - "line_number": 452, + "line_number": 455, "type": "Base64 High Entropy String" }, { - "hashed_secret": "9bf63f6f49fb01ff80959bc5a60c8688df92cc02", - "is_secret": false, + "hashed_secret": "3fd80f31de4be8dde9d2b421e832c7d4043fd49a", "is_verified": false, - "line_number": 453, + "line_number": 456, "type": "Base64 High Entropy String" } ], "kube/services/jobs/indexd-authz-job.yaml": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, "is_verified": false, "line_number": 87, "type": "Basic Auth Credentials" @@ -1191,14 +1077,12 @@ "kube/services/monitoring/grafana-values.yaml": [ { "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385", - "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4", - "is_secret": false, "is_verified": false, "line_number": 166, "type": "Secret Keyword" @@ -1207,7 +1091,6 @@ "kube/services/revproxy/helpers.js": [ { "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" @@ -1216,7 +1099,6 @@ "kube/services/revproxy/helpersTest.js": [ { "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", - "is_secret": false, "is_verified": false, "line_number": 22, "type": "JSON Web Token" @@ -1225,7 +1107,6 @@ "kube/services/superset/superset-deploy.yaml": [ { "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7", - "is_secret": false, "is_verified": false, "line_number": 38, "type": "Secret Keyword" @@ -1234,14 +1115,12 @@ "kube/services/superset/superset-redis.yaml": [ { "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", - "is_secret": false, "is_verified": false, "line_number": 165, "type": "Secret Keyword" }, { "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9", - "is_secret": false, "is_verified": false, "line_number": 265, "type": "Secret Keyword" @@ -1250,35 +1129,30 @@ "kube/services/superset/values.yaml": [ { "hashed_secret": "6f803b24314c39062efe38d0c1da8c472f47eab3", - "is_secret": false, "is_verified": false, "line_number": 54, "type": "Secret Keyword" }, { "hashed_secret": "6eae3a5b062c6d0d79f070c26e6d62486b40cb46", - "is_secret": false, "is_verified": false, "line_number": 86, "type": "Secret Keyword" }, { "hashed_secret": "3eb416223e9e69e6bb8ee19793911ad1ad2027d8", - "is_secret": false, "is_verified": false, "line_number": 212, "type": "Secret Keyword" }, { "hashed_secret": "ff55435345834a3fe224936776c2aa15f6ed5358", - "is_secret": false, "is_verified": false, "line_number": 396, "type": "Secret Keyword" }, { "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", - "is_secret": false, "is_verified": false, "line_number": 503, "type": "Secret Keyword" @@ -1287,280 +1161,240 @@ "package-lock.json": [ { "hashed_secret": "0656ad0df3af4633dc369f13d5e8806973c5fd9d", - "is_secret": false, "is_verified": false, "line_number": 1481, "type": "Base64 High Entropy String" }, { "hashed_secret": "00091d875d922437c5fc9e6067a08e78c2482e87", - "is_secret": false, "is_verified": false, "line_number": 1489, "type": "Base64 High Entropy String" }, { "hashed_secret": "c4e5cc37e115bf7d86e76e3d799705bf691e4d00", - "is_secret": false, "is_verified": false, "line_number": 1521, "type": "Base64 High Entropy String" }, { "hashed_secret": "0512e37fbedf1d16828680a038a241b4780a5c04", - "is_secret": false, "is_verified": false, "line_number": 1547, "type": "Base64 High Entropy String" }, { "hashed_secret": "01868fd50edbfe6eb91e5b01209b543adc6857af", - "is_secret": false, "is_verified": false, "line_number": 1611, "type": "Base64 High Entropy String" }, { "hashed_secret": "a6f48bf1e398deffc7fd31da17c3506b46c97a93", - "is_secret": false, "is_verified": false, "line_number": 1640, "type": "Base64 High Entropy String" }, { "hashed_secret": "85ce358dbdec0996cf3ccd2bf1c6602af68c181e", - "is_secret": false, "is_verified": false, "line_number": 1648, "type": "Base64 High Entropy String" }, { "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", - "is_secret": false, "is_verified": false, "line_number": 1664, "type": "Base64 High Entropy String" }, { "hashed_secret": "7098a3e6d6d2ec0a40f04fe12509c5c6f4c49c0e", - "is_secret": false, "is_verified": false, "line_number": 1683, "type": "Base64 High Entropy String" }, { "hashed_secret": "1664ad175bba1795a7ecad572bae7e0740b94f56", - "is_secret": false, "is_verified": false, "line_number": 1733, "type": "Base64 High Entropy String" }, { "hashed_secret": "1ec4ce2eb945ce2f816dcb6ebdd1e10247f439a3", - "is_secret": false, "is_verified": false, "line_number": 1742, "type": "Base64 High Entropy String" }, { "hashed_secret": "a7af5768a6d936e36f28e1030d7f894d7aaf555e", - "is_secret": false, "is_verified": false, "line_number": 1755, "type": "Base64 High Entropy String" }, { "hashed_secret": "6fbc7dd864586173160874f2a86ca7d2d552cb85", - "is_secret": false, "is_verified": false, "line_number": 1769, "type": "Base64 High Entropy String" }, { "hashed_secret": "81a961f2c89c6209328b74a8768e30fd76c3ac72", - "is_secret": false, "is_verified": false, "line_number": 1855, "type": "Base64 High Entropy String" }, { "hashed_secret": "797d4751c536c421cb82b9f62e0a804af30d78f5", - "is_secret": false, "is_verified": false, "line_number": 1889, "type": "Base64 High Entropy String" }, { "hashed_secret": "0d55babfa89f240142c0adfc7b560500a1d3ae7c", - "is_secret": false, "is_verified": false, "line_number": 1894, "type": "Base64 High Entropy String" }, { "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", - "is_secret": false, "is_verified": false, "line_number": 1921, "type": "Base64 High Entropy String" }, { "hashed_secret": "4cf9419259c0ce8eee84b468af3c72db8b001620", - "is_secret": false, "is_verified": false, "line_number": 1950, "type": "Base64 High Entropy String" }, { "hashed_secret": "24816e3eb4308e247bde7c1d09ffb7b79c519b71", - "is_secret": false, "is_verified": false, "line_number": 1983, "type": "Base64 High Entropy String" }, { "hashed_secret": "e9adfe8a333d45f4776fe0eab31608be5d7b6a7d", - "is_secret": false, "is_verified": false, "line_number": 2004, "type": "Base64 High Entropy String" }, { "hashed_secret": "03d6fb388dd1b185129b14221f7127715822ece6", - "is_secret": false, "is_verified": false, "line_number": 2013, "type": "Base64 High Entropy String" }, { "hashed_secret": "ee161bb3f899720f95cee50a5f9ef9c9ed96278b", - "is_secret": false, "is_verified": false, "line_number": 2046, "type": "Base64 High Entropy String" }, { "hashed_secret": "ebeb5b574fa1ed24a40248275e6136759e766466", - "is_secret": false, "is_verified": false, "line_number": 2078, "type": "Base64 High Entropy String" }, { "hashed_secret": "a6a555a428522ccf439fd516ce7c7e269274363f", - "is_secret": false, "is_verified": false, "line_number": 2083, "type": "Base64 High Entropy String" }, { "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", - "is_secret": false, "is_verified": false, "line_number": 2111, "type": "Base64 High Entropy String" }, { "hashed_secret": "3f1646b60abe74297d2f37a1eee5dc771ad834fc", - "is_secret": false, "is_verified": false, "line_number": 2138, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd933c71e82d5519ae0cb0779b370d02f6935759", - "is_secret": false, "is_verified": false, "line_number": 2143, "type": "Base64 High Entropy String" }, { "hashed_secret": "7090aa59cb52ad1f1810b08c4ac1ddf5c8fce523", - "is_secret": false, "is_verified": false, "line_number": 2150, "type": "Base64 High Entropy String" }, { "hashed_secret": "756444bea4ea3d67844d8ddf58ad32356e9c2430", - "is_secret": false, "is_verified": false, "line_number": 2188, "type": "Base64 High Entropy String" }, { "hashed_secret": "f74135fdd6b8dafdfb01ebbc61c5e5c24ee27cf8", - "is_secret": false, "is_verified": false, "line_number": 2291, "type": "Base64 High Entropy String" }, { "hashed_secret": "56fbae787f4aed7d0632e95840d71bd378d3a36f", - "is_secret": false, "is_verified": false, "line_number": 2303, "type": "Base64 High Entropy String" }, { "hashed_secret": "81cb6be182eb79444202c4563080aee75296a672", - "is_secret": false, "is_verified": false, "line_number": 2308, "type": "Base64 High Entropy String" }, { "hashed_secret": "f0f3f7bce32184893046ac5f8cc80da56c3ca539", - "is_secret": false, "is_verified": false, "line_number": 2317, "type": "Base64 High Entropy String" }, { "hashed_secret": "097893233346336f4003acfb6eb173ee59e648f0", - "is_secret": false, "is_verified": false, "line_number": 2327, "type": "Base64 High Entropy String" }, { "hashed_secret": "bb14c3b4ef4a9f2e86ffdd44b88d9b6729419671", - "is_secret": false, "is_verified": false, "line_number": 2332, "type": "Base64 High Entropy String" }, { "hashed_secret": "71344a35cff67ef081920095d1406601fb5e9b97", - "is_secret": false, "is_verified": false, "line_number": 2340, "type": "Base64 High Entropy String" }, { "hashed_secret": "eb3db6990fd43477a35dfeffc90b3f1ffa83c7bd", - "is_secret": false, "is_verified": false, "line_number": 2349, "type": "Base64 High Entropy String" }, { "hashed_secret": "266288bdc14807b538d1e48a5891e361fa9b4a14", - "is_secret": false, "is_verified": false, "line_number": 2357, "type": "Base64 High Entropy String" }, { "hashed_secret": "800477261175fd21f23e7321923e1fba6ae55471", - "is_secret": false, "is_verified": false, "line_number": 2369, "type": "Base64 High Entropy String" }, { "hashed_secret": "3f0c251b9c2c21454445a98fde6915ceacde2136", - "is_secret": false, "is_verified": false, "line_number": 2387, "type": "Base64 High Entropy String" @@ -1569,7 +1403,6 @@ "tf_files/aws/cognito/README.md": [ { "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70", - "is_secret": false, "is_verified": false, "line_number": 106, "type": "Secret Keyword" @@ -1578,14 +1411,12 @@ "tf_files/aws/commons/README.md": [ { "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed", - "is_secret": false, "is_verified": false, "line_number": 60, "type": "Secret Keyword" }, { "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e", - "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -1594,7 +1425,6 @@ "tf_files/aws/eks/sample.tfvars": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, "line_number": 107, "type": "Hex High Entropy String" @@ -1603,7 +1433,6 @@ "tf_files/aws/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, "line_number": 133, "type": "Hex High Entropy String" @@ -1612,14 +1441,12 @@ "tf_files/aws/modules/common-logging/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, "is_verified": false, "line_number": 57, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, "is_verified": false, "line_number": 59, "type": "Hex High Entropy String" @@ -1628,28 +1455,24 @@ "tf_files/aws/modules/common-logging/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -1658,21 +1481,18 @@ "tf_files/aws/modules/common-logging/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" @@ -1681,7 +1501,6 @@ "tf_files/aws/modules/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, "line_number": 113, "type": "Hex High Entropy String" @@ -1690,14 +1509,12 @@ "tf_files/aws/modules/management-logs/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, "is_verified": false, "line_number": 54, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, "is_verified": false, "line_number": 56, "type": "Hex High Entropy String" @@ -1706,28 +1523,24 @@ "tf_files/aws/modules/management-logs/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -1736,42 +1549,36 @@ "tf_files/aws/modules/management-logs/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Base64 High Entropy String" }, { "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" }, { "hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" @@ -1780,7 +1587,6 @@ "tf_files/aws/slurm/README.md": [ { "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", - "is_secret": false, "is_verified": false, "line_number": 83, "type": "Secret Keyword" @@ -1789,7 +1595,6 @@ "tf_files/azure/cloud.tf": [ { "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4", - "is_secret": false, "is_verified": false, "line_number": 424, "type": "Secret Keyword" @@ -1798,7 +1603,6 @@ "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -1807,7 +1611,6 @@ "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -1816,7 +1619,6 @@ "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, "line_number": 262, "type": "Secret Keyword" @@ -1825,21 +1627,18 @@ "tf_files/gcp/commons/sample.tfvars": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 11, "type": "Secret Keyword" }, { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105", - "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" @@ -1848,7 +1647,6 @@ "tf_files/shared/modules/k8s_configs/creds.tpl": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 8, "type": "Secret Keyword" diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 1da4ac2d5..730a3b36e 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -1,5 +1,5 @@ import argparse -import json +import copy import sys import requests import pydash From 8103e82b1044daca4290831db63653b40b24b97c Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Thu, 29 Feb 2024 12:58:32 -0800 Subject: [PATCH 301/362] Add jenkins-dcp to the env pool reset script (#2489) --- files/scripts/ci-env-pool-reset.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/files/scripts/ci-env-pool-reset.sh b/files/scripts/ci-env-pool-reset.sh index c0c1f67c6..362cfbfd5 100644 --- a/files/scripts/ci-env-pool-reset.sh +++ b/files/scripts/ci-env-pool-reset.sh @@ -29,6 +29,7 @@ source "${GEN3_HOME}/gen3/gen3setup.sh" cat - > jenkins-envs-services.txt < Date: Thu, 29 Feb 2024 15:20:15 -0800 Subject: [PATCH 302/362] (HP-572) create dynamodb table for gen3-license workspace (#2441) * (HP-572) create dynamodb table for gen3-license workspace * (HP-572) get GSI from hatchery config * (HP-572) remove extra quotes from 'GSI' * (HP-572) move distribute-license for backwards compatibility --------- Co-authored-by: Mingfei Shao <2475897+mfshao@users.noreply.github.com> --- gen3/bin/kube-setup-hatchery.sh | 57 ++++++++++++++++++++++++++++++--- 1 file changed, 53 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index 691fb354a..5454d1e24 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -20,11 +20,60 @@ gen3 jupyter j-namespace setup # (g3k_kv_filter ${GEN3_HOME}/kube/services/hatchery/serviceaccount.yaml BINDING_ONE "name: hatchery-binding1-$namespace" BINDING_TWO "name: hatchery-binding2-$namespace" CURRENT_NAMESPACE "namespace: $namespace" | g3kubectl apply -f -) || true +function exists_or_create_gen3_license_table() { + # Create dynamodb table for gen3-license if it does not exist. + TARGET_TABLE="$1" + echo "Checking for dynamoDB table: ${TARGET_TABLE}" -# cron job to distribute licenses if using Stata workspaces -if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ]; -then - gen3 job cron distribute-licenses '* * * * *' + FOUND_TABLE=`aws dynamodb list-tables | jq -r .TableNames | jq -c -r '.[]' | grep $TARGET_TABLE` + if [ -n "$FOUND_TABLE" ]; then + echo "Target table already exists in dynamoDB: $FOUND_TABLE" + else + echo "Creating table ${TARGET_TABLE}" + GSI=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-global-secondary-index"'` + if [[ -z "$GSI" || "$GSI" == "null" ]]; then + echo "Error: No global-secondary-index in configuration" + return 0 + fi + aws dynamodb create-table \ + --no-cli-pager \ + --table-name "$TARGET_TABLE" \ + --attribute-definitions AttributeName=itemId,AttributeType=S \ + AttributeName=environment,AttributeType=S \ + AttributeName=isActive,AttributeType=S \ + --key-schema AttributeName=itemId,KeyType=HASH \ + AttributeName=environment,KeyType=RANGE \ + --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 \ + --global-secondary-indexes \ + "[ + { + \"IndexName\": \"$GSI\", + \"KeySchema\": [{\"AttributeName\":\"environment\",\"KeyType\":\"HASH\"}, + {\"AttributeName\":\"isActive\",\"KeyType\":\"RANGE\"}], + \"Projection\":{ + \"ProjectionType\":\"INCLUDE\", + \"NonKeyAttributes\":[\"itemId\",\"userId\",\"licenseId\",\"licenseType\"] + }, + \"ProvisionedThroughput\": { + \"ReadCapacityUnits\": 5, + \"WriteCapacityUnits\": 3 + } + } + ]" + fi +} + +TARGET_TABLE=`g3kubectl get configmaps/manifest-hatchery -o json | jq -r '.data."license-user-maps-dynamodb-table"'` +if [[ -z "$TARGET_TABLE" || "$TARGET_TABLE" == "null" ]]; then + echo "No gen3-license table in configuration" + # cron job to distribute licenses if using Stata workspaces but not using dynamoDB + if [ "$(g3kubectl get configmaps/manifest-hatchery -o yaml | grep "\"image\": .*stata.*")" ]; + then + gen3 job cron distribute-licenses '* * * * *' + fi +else + echo "Found gen3-license table in configuration: $TARGET_TABLE" + exists_or_create_gen3_license_table "$TARGET_TABLE" fi policy=$( cat < Date: Fri, 1 Mar 2024 15:46:10 -0600 Subject: [PATCH 303/362] Fix/revert (#2493) * Revert "fix missed import (#2491)" This reverts commit bd6bc767c0461f8fc1f1d90fa8a755be1e7fc381. * Revert "Update heal-cedar-data-ingest.py (#2490)" This reverts commit 90b66091c0d1d8b89e7182ad84201e99903d8e44. * Revert "updating the cedar data ingest (#2472)" This reverts commit 6c27fc9a7fe6cd3affd7a108db3c80b51c664ad1. --- .secrets.baseline | 346 ++++++++++++++---- .../healdata/heal-cedar-data-ingest.py | 98 +---- 2 files changed, 275 insertions(+), 169 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index 2583e269f..b7e06622d 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -1,9 +1,9 @@ { "exclude": { - "files": null, + "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2024-02-29T19:38:46Z", + "generated_at": "2024-02-23T20:30:41Z" "plugins_used": [ { "name": "AWSKeyDetector" @@ -61,12 +61,14 @@ "Chef/repo/data_bags/README.md": [ { "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366", + "is_secret": false, "is_verified": false, "line_number": 45, "type": "Secret Keyword" }, { "hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee", + "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -75,22 +77,25 @@ "Docker/jenkins/Jenkins-CI-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, - "line_number": 124, + "line_number": 121, "type": "Secret Keyword" } ], "Docker/jenkins/Jenkins-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, - "line_number": 139, + "line_number": 143, "type": "Secret Keyword" } ], "Docker/jenkins/Jenkins/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, "line_number": 107, "type": "Secret Keyword" @@ -99,6 +104,7 @@ "Docker/jenkins/Jenkins2/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "is_secret": false, "is_verified": false, "line_number": 108, "type": "Secret Keyword" @@ -107,6 +113,7 @@ "Docker/sidecar/service.key": [ { "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", + "is_secret": false, "is_verified": false, "line_number": 1, "type": "Private Key" @@ -115,6 +122,7 @@ "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ { "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -123,6 +131,7 @@ "ansible/roles/awslogs/defaults/main.yaml": [ { "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", + "is_secret": false, "is_verified": false, "line_number": 30, "type": "Basic Auth Credentials" @@ -131,12 +140,14 @@ "ansible/roles/slurm/README.md": [ { "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", + "is_secret": false, "is_verified": false, "line_number": 86, "type": "Base64 High Entropy String" }, { "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476", + "is_secret": false, "is_verified": false, "line_number": 86, "type": "Secret Keyword" @@ -145,6 +156,7 @@ "apis_configs/config_helper.py": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", + "is_secret": false, "is_verified": false, "line_number": 66, "type": "Basic Auth Credentials" @@ -153,6 +165,7 @@ "apis_configs/fence_credentials.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 23, "type": "Secret Keyword" @@ -161,18 +174,21 @@ "apis_configs/fence_settings.py": [ { "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Basic Auth Credentials" }, { "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3", + "is_secret": false, "is_verified": false, "line_number": 58, "type": "Secret Keyword" }, { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", + "is_secret": false, "is_verified": false, "line_number": 80, "type": "Basic Auth Credentials" @@ -181,6 +197,7 @@ "apis_configs/indexd_settings.py": [ { "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc", + "is_secret": false, "is_verified": false, "line_number": 59, "type": "Basic Auth Credentials" @@ -189,6 +206,7 @@ "apis_configs/peregrine_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", + "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -197,6 +215,7 @@ "apis_configs/sheepdog_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", + "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -205,6 +224,7 @@ "doc/Gen3-data-upload.md": [ { "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e", + "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -213,6 +233,7 @@ "doc/api.md": [ { "hashed_secret": "625de83a7517422051911680cc803921ff99db90", + "is_secret": false, "is_verified": false, "line_number": 47, "type": "Hex High Entropy String" @@ -221,24 +242,28 @@ "doc/gen3OnK8s.md": [ { "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069", + "is_secret": false, "is_verified": false, "line_number": 113, "type": "Secret Keyword" }, { "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2", + "is_secret": false, "is_verified": false, "line_number": 143, "type": "Secret Keyword" }, { "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48", + "is_secret": false, "is_verified": false, "line_number": 170, "type": "Secret Keyword" }, { "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce", + "is_secret": false, "is_verified": false, "line_number": 189, "type": "Secret Keyword" @@ -247,6 +272,7 @@ "doc/kube-setup-data-ingestion-job.md": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 30, "type": "Secret Keyword" @@ -255,6 +281,7 @@ "doc/logs.md": [ { "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Secret Keyword" @@ -263,6 +290,7 @@ "doc/slurm_cluster.md": [ { "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6", + "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" @@ -271,12 +299,14 @@ "files/dashboard/usage-reports/package-lock.json": [ { "hashed_secret": "e095101882f706c4de95e0f75c5bcb9666e3f448", + "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "5422e4f96964d5739998b25ac214520c1b113e5b", + "is_secret": false, "is_verified": false, "line_number": 15, "type": "Base64 High Entropy String" @@ -285,12 +315,14 @@ "gen3/bin/api.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 407, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", + "is_secret": false, "is_verified": false, "line_number": 477, "type": "Secret Keyword" @@ -299,6 +331,7 @@ "gen3/bin/kube-dev-namespace.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 135, "type": "Secret Keyword" @@ -307,6 +340,7 @@ "gen3/bin/kube-setup-argo.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "is_secret": false, "is_verified": false, "line_number": 206, "type": "Secret Keyword" @@ -315,6 +349,7 @@ "gen3/bin/kube-setup-aurora-monitoring.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 59, "type": "Secret Keyword" @@ -323,6 +358,7 @@ "gen3/bin/kube-setup-certs.sh": [ { "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99", + "is_secret": false, "is_verified": false, "line_number": 50, "type": "Secret Keyword" @@ -331,12 +367,14 @@ "gen3/bin/kube-setup-dashboard.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "is_secret": false, "is_verified": false, "line_number": 40, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", + "is_secret": false, "is_verified": false, "line_number": 41, "type": "Secret Keyword" @@ -345,12 +383,14 @@ "gen3/bin/kube-setup-data-ingestion-job.sh": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" }, { "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c", + "is_secret": false, "is_verified": false, "line_number": 102, "type": "Secret Keyword" @@ -359,6 +399,7 @@ "gen3/bin/kube-setup-dicom-server.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 43, "type": "Secret Keyword" @@ -367,6 +408,7 @@ "gen3/bin/kube-setup-dicom.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -375,26 +417,14 @@ "gen3/bin/kube-setup-jenkins.sh": [ { "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_verified": false, - "line_number": 22, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-jenkins2.sh": [ - { - "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", - "is_verified": false, - "line_number": 18, - "type": "Secret Keyword" - }, - { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 22, "type": "Secret Keyword" @@ -403,6 +433,7 @@ "gen3/bin/kube-setup-metadata.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 35, "type": "Secret Keyword" @@ -411,18 +442,21 @@ "gen3/bin/kube-setup-revproxy.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "is_secret": false, "is_verified": false, "line_number": 38, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 55, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 57, "type": "Secret Keyword" @@ -431,18 +465,21 @@ "gen3/bin/kube-setup-secrets.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 79, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 82, "type": "Secret Keyword" }, { "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8", + "is_secret": false, "is_verified": false, "line_number": 95, "type": "Secret Keyword" @@ -451,12 +488,14 @@ "gen3/bin/kube-setup-sftp.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 36, "type": "Secret Keyword" }, { "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4", + "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -465,6 +504,7 @@ "gen3/bin/kube-setup-sheepdog.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 33, "type": "Secret Keyword" @@ -473,24 +513,28 @@ "gen3/bin/kube-setup-sower-jobs.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", + "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 120, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 122, "type": "Secret Keyword" @@ -499,18 +543,21 @@ "gen3/bin/kube-setup-ssjdispatcher.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 117, "type": "Secret Keyword" }, { "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93", + "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 197, "type": "Secret Keyword" @@ -519,12 +566,14 @@ "gen3/lib/aws.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", + "is_secret": false, "is_verified": false, "line_number": 640, "type": "Secret Keyword" }, { "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8", + "is_secret": false, "is_verified": false, "line_number": 660, "type": "Secret Keyword" @@ -533,12 +582,14 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ { "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", + "is_secret": false, "is_verified": false, "line_number": 33, "type": "Basic Auth Credentials" }, { "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db", + "is_secret": false, "is_verified": false, "line_number": 286, "type": "Secret Keyword" @@ -547,6 +598,7 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -555,6 +607,7 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -563,6 +616,7 @@ "gen3/lib/logs/utils.sh": [ { "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5", + "is_secret": false, "is_verified": false, "line_number": 3, "type": "Secret Keyword" @@ -571,6 +625,7 @@ "gen3/lib/manifestDefaults/hatchery/hatchery.json": [ { "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039", + "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -579,12 +634,14 @@ "gen3/lib/onprem.sh": [ { "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", + "is_secret": false, "is_verified": false, "line_number": 68, "type": "Secret Keyword" }, { "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", + "is_secret": false, "is_verified": false, "line_number": 84, "type": "Secret Keyword" @@ -593,12 +650,14 @@ "gen3/lib/secrets/rotate-postgres.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "is_secret": false, "is_verified": false, "line_number": 250, "type": "Secret Keyword" @@ -607,42 +666,49 @@ "gen3/lib/testData/etlconvert/expected2.yaml": [ { "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", + "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", + "is_secret": false, "is_verified": false, "line_number": 13, "type": "Base64 High Entropy String" }, { "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", + "is_secret": false, "is_verified": false, "line_number": 16, "type": "Base64 High Entropy String" }, { "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", + "is_secret": false, "is_verified": false, "line_number": 33, "type": "Base64 High Entropy String" }, { "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", + "is_secret": false, "is_verified": false, "line_number": 35, "type": "Base64 High Entropy String" }, { "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", + "is_secret": false, "is_verified": false, "line_number": 36, "type": "Base64 High Entropy String" @@ -651,6 +717,7 @@ "gen3/test/secretsTest.sh": [ { "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", + "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" @@ -659,24 +726,28 @@ "gen3/test/terraformTest.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", + "is_secret": false, "is_verified": false, "line_number": 156, "type": "Secret Keyword" }, { "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", + "is_secret": false, "is_verified": false, "line_number": 172, "type": "Base64 High Entropy String" }, { "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", + "is_secret": false, "is_verified": false, "line_number": 175, "type": "Base64 High Entropy String" }, { "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c", + "is_secret": false, "is_verified": false, "line_number": 175, "type": "Secret Keyword" @@ -685,18 +756,21 @@ "kube/services/argocd/values.yaml": [ { "hashed_secret": "27c6929aef41ae2bcadac15ca6abcaff72cda9cd", + "is_secret": false, "is_verified": false, "line_number": 360, "type": "Private Key" }, { "hashed_secret": "edbd5e119f94badb9f99a67ac6ff4c7a5204ad61", + "is_secret": false, "is_verified": false, "line_number": 379, "type": "Secret Keyword" }, { "hashed_secret": "91dfd9ddb4198affc5c194cd8ce6d338fde470e2", + "is_secret": false, "is_verified": false, "line_number": 412, "type": "Secret Keyword" @@ -705,6 +779,7 @@ "kube/services/datadog/values.yaml": [ { "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b", + "is_secret": false, "is_verified": false, "line_number": 23, "type": "Secret Keyword" @@ -713,362 +788,401 @@ "kube/services/fenceshib/fenceshib-configmap.yaml": [ { "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c", + "is_secret": false, "is_verified": false, "line_number": 375, "type": "Base64 High Entropy String" }, { "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f", + "is_secret": false, "is_verified": false, "line_number": 376, "type": "Base64 High Entropy String" }, { "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b", + "is_secret": false, "is_verified": false, "line_number": 377, "type": "Base64 High Entropy String" }, { "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5", + "is_secret": false, "is_verified": false, "line_number": 378, "type": "Base64 High Entropy String" }, { "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a", + "is_secret": false, "is_verified": false, "line_number": 379, "type": "Base64 High Entropy String" }, { "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9", + "is_secret": false, "is_verified": false, "line_number": 380, "type": "Base64 High Entropy String" }, { "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12", + "is_secret": false, "is_verified": false, "line_number": 381, "type": "Base64 High Entropy String" }, { "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7", + "is_secret": false, "is_verified": false, "line_number": 382, "type": "Base64 High Entropy String" }, { "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a", + "is_secret": false, "is_verified": false, "line_number": 383, "type": "Base64 High Entropy String" }, { "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3", + "is_secret": false, "is_verified": false, "line_number": 384, "type": "Base64 High Entropy String" }, { "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c", + "is_secret": false, "is_verified": false, "line_number": 385, "type": "Base64 High Entropy String" }, { "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52", + "is_secret": false, "is_verified": false, "line_number": 386, "type": "Base64 High Entropy String" }, { "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110", + "is_secret": false, "is_verified": false, "line_number": 387, "type": "Base64 High Entropy String" }, { "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7", + "is_secret": false, "is_verified": false, "line_number": 388, "type": "Base64 High Entropy String" }, { "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e", + "is_secret": false, "is_verified": false, "line_number": 389, "type": "Base64 High Entropy String" }, { "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1", + "is_secret": false, "is_verified": false, "line_number": 390, "type": "Base64 High Entropy String" }, { "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53", + "is_secret": false, "is_verified": false, "line_number": 391, "type": "Base64 High Entropy String" }, { "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a", + "is_secret": false, "is_verified": false, "line_number": 392, "type": "Base64 High Entropy String" }, { "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294", + "is_secret": false, "is_verified": false, "line_number": 393, "type": "Base64 High Entropy String" }, { "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41", + "is_secret": false, "is_verified": false, "line_number": 394, "type": "Base64 High Entropy String" }, { "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1", + "is_secret": false, "is_verified": false, "line_number": 395, "type": "Base64 High Entropy String" }, { "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb", + "is_secret": false, "is_verified": false, "line_number": 396, "type": "Base64 High Entropy String" }, { "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433", + "is_secret": false, "is_verified": false, "line_number": 397, "type": "Base64 High Entropy String" }, { "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a", + "is_secret": false, "is_verified": false, "line_number": 398, "type": "Base64 High Entropy String" }, { "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2", + "is_secret": false, "is_verified": false, "line_number": 399, "type": "Base64 High Entropy String" }, { - "hashed_secret": "fbad0bc8f7792b03f89cd3780eb7cf79f284c525", + "hashed_secret": "7803ae08cdc22a5e0b025eff3c9ef0628eedc165", + "is_secret": false, "is_verified": false, "line_number": 419, "type": "Base64 High Entropy String" }, { - "hashed_secret": "3f6480956a775dacb44e2c39aa3d4722a347f7ab", + "hashed_secret": "b8b61e87f5b58b0eeb597b2122ea0cea2ccab3d9", + "is_secret": false, "is_verified": false, "line_number": 420, "type": "Base64 High Entropy String" }, { - "hashed_secret": "17f32ae55b14d708ca121722c2cae37189f19daf", + "hashed_secret": "787745fc904c3bd7eddc3d1aab683a376c13890f", + "is_secret": false, "is_verified": false, "line_number": 423, "type": "Base64 High Entropy String" }, { - "hashed_secret": "08a74689ca077515d406093720a7e5675fb42bb8", + "hashed_secret": "81361d672f238f505a6246ef9b655ee2f48d67e7", + "is_secret": false, "is_verified": false, "line_number": 424, "type": "Base64 High Entropy String" }, { - "hashed_secret": "fa577bb3b2600d2d522dcfea8f1e34896760fcf2", + "hashed_secret": "7c98bff76ac3f273d15ed9bc3dd5294d323ab577", + "is_secret": false, "is_verified": false, "line_number": 425, "type": "Base64 High Entropy String" }, { - "hashed_secret": "37254f15cca211a1bd5f7ceb23de2b3eb8fb33aa", + "hashed_secret": "46038fc88daceed8dd46817ca45c72ae0270fdd4", + "is_secret": false, "is_verified": false, "line_number": 426, "type": "Base64 High Entropy String" }, { - "hashed_secret": "86865593e038509467b91c2d5f36ccc09c3f422b", + "hashed_secret": "acad0c57b4f5cbed1b4863ed06d02784180a9f92", + "is_secret": false, "is_verified": false, "line_number": 427, "type": "Base64 High Entropy String" }, { - "hashed_secret": "a899a8d9e114b2a8e108f90e6a72c056db22489f", + "hashed_secret": "1b57f49a6ee337c16ecd6aabfc0dff3b3821cd09", + "is_secret": false, "is_verified": false, "line_number": 428, "type": "Base64 High Entropy String" }, { - "hashed_secret": "756b4825f886afd83c25563ac9d45f318d695c48", + "hashed_secret": "5b688158be36e8b3f265a462ed599dcf69290084", + "is_secret": false, "is_verified": false, "line_number": 429, "type": "Base64 High Entropy String" }, { - "hashed_secret": "89882eeb0aca97717a7e4afcf4bc08d077813c7f", + "hashed_secret": "965996e12c8b50b3c325d96003e8984a4ece658a", + "is_secret": false, "is_verified": false, "line_number": 430, "type": "Base64 High Entropy String" }, { - "hashed_secret": "347140d7b7ceb4e501c3c9c2ea4f29338e2f145e", + "hashed_secret": "584f0c58e764e948af1a35c9e60447aa0f84c6f5", + "is_secret": false, "is_verified": false, "line_number": 431, "type": "Base64 High Entropy String" }, { - "hashed_secret": "61dbf70eb10d609e60c7b87faf8f755ff48abc46", + "hashed_secret": "bcaf897786d060a675ee9d654a84ae8baf96e9d0", + "is_secret": false, "is_verified": false, "line_number": 432, "type": "Base64 High Entropy String" }, { - "hashed_secret": "24cd54c4b2f58378bba008cb2df68ac663fba7c8", + "hashed_secret": "0c09277fa183e06d32065f9386a3b4190b445df3", + "is_secret": false, "is_verified": false, "line_number": 433, "type": "Base64 High Entropy String" }, { - "hashed_secret": "fa4f9626ae4b98f4b61203c5bafb6f21c9c31e5d", + "hashed_secret": "5a51be06b305d6664e4afd25f21869b0f8b5039b", + "is_secret": false, "is_verified": false, "line_number": 434, "type": "Base64 High Entropy String" }, { - "hashed_secret": "b1370003d9cc1e346c83dba33e0418c7775a0c15", + "hashed_secret": "b38404f8853d734e3d03577b2c1084b4540c8708", + "is_secret": false, "is_verified": false, "line_number": 435, "type": "Base64 High Entropy String" }, { - "hashed_secret": "c66526e195e423a7ba7d68ac661cdcd8600dcd1f", + "hashed_secret": "126ccc602cffcb8292beb57137f7f6719e317b72", + "is_secret": false, "is_verified": false, "line_number": 436, "type": "Base64 High Entropy String" }, { - "hashed_secret": "d29d7044f0944eb30e02cf445f6998e3343dd811", + "hashed_secret": "6681c1d7e1d327642a32cb8864ad51e4b8f981e5", + "is_secret": false, "is_verified": false, "line_number": 437, "type": "Base64 High Entropy String" }, { - "hashed_secret": "80a869460f33722387d8d58e7d9d2e1bbd5d1fe1", - "is_verified": false, - "line_number": 438, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "4a06e2a02cbc665adccb4162dc57836895da65b8", + "hashed_secret": "7f7b1f316ece195e5f584fe2faf6f9edc6942c6f", + "is_secret": false, "is_verified": false, "line_number": 439, "type": "Base64 High Entropy String" }, { - "hashed_secret": "ba2549f35835dfa101d3f660f7604dc78e3e226f", + "hashed_secret": "bb908c7bc655057f2edc42815c5dff82e9dea529", + "is_secret": false, "is_verified": false, "line_number": 440, "type": "Base64 High Entropy String" }, { - "hashed_secret": "f354d4ee5fdb94ad29c7b3600264467f45b80eaa", + "hashed_secret": "bc2a0d18e3dd142df7b34e95342d47bf8aadabcb", + "is_secret": false, "is_verified": false, "line_number": 441, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bf17b587868ba7c3db9865b114261b5b8f1df870", + "hashed_secret": "d60f0bcea109bb6edb6e45fd387f5f2c86e49e1a", + "is_secret": false, "is_verified": false, "line_number": 442, "type": "Base64 High Entropy String" }, { - "hashed_secret": "de1fd7a0d32cba528b4d80818c6601f2588d5383", + "hashed_secret": "e549dd40a741557cc1c4e377df0a141354e22688", + "is_secret": false, "is_verified": false, "line_number": 443, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bcad65055f6de654541db2bf27d4e27bd54d94c7", + "hashed_secret": "2dd2486dae84cad50387c20bf687b6fbc6162b58", + "is_secret": false, "is_verified": false, "line_number": 444, "type": "Base64 High Entropy String" }, { - "hashed_secret": "f2e16f2dd532f65f79341342fdf57a093fc408d8", + "hashed_secret": "71622010fc7eb09d9273f59c548bde6a5da5dc0e", + "is_secret": false, "is_verified": false, "line_number": 445, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bb036a679a7d2df9fd2ca57068a446bf7f7dd106", + "hashed_secret": "6f0115cf53bd49ec990c562ac6cbfc452c83cd46", + "is_secret": false, "is_verified": false, "line_number": 446, "type": "Base64 High Entropy String" }, { - "hashed_secret": "5aa6568b1e8185578a6e964f5c322783ad349554", - "is_verified": false, - "line_number": 447, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "4d14835ff0b0bf5aad480296cb705c74ac65f413", + "hashed_secret": "70dddd534b2f9bb70871fefe0845b79c3b69363f", + "is_secret": false, "is_verified": false, "line_number": 448, "type": "Base64 High Entropy String" }, { - "hashed_secret": "3f23f77dcf454ad73c4d61c44fd9aa584ef946c1", - "is_verified": false, - "line_number": 451, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "1739fe5e5dfcf851b64f8b7b11538f1de29ce0b5", + "hashed_secret": "acf3536b0416aa99608b0be17e87655370ece829", + "is_secret": false, "is_verified": false, - "line_number": 452, + "line_number": 449, "type": "Base64 High Entropy String" }, { - "hashed_secret": "8129db302110714fc735e3494bd82a65690e0963", + "hashed_secret": "1d13ee35c7279c1fae1c6474ed47611994273e41", + "is_secret": false, "is_verified": false, - "line_number": 453, + "line_number": 450, "type": "Base64 High Entropy String" }, { - "hashed_secret": "b48bfc62091164086a703115a0e68bdb09212591", + "hashed_secret": "d38cf89b25bd7378cdb4e00b4b59293001dd500b", + "is_secret": false, "is_verified": false, - "line_number": 454, + "line_number": 451, "type": "Base64 High Entropy String" }, { - "hashed_secret": "a10284feaf27f84081073a3267e3dce24ca7b911", + "hashed_secret": "1648f34ce2f1b563a8ed1c6d5d55b5e76a395903", + "is_secret": false, "is_verified": false, - "line_number": 455, + "line_number": 452, "type": "Base64 High Entropy String" }, { - "hashed_secret": "3fd80f31de4be8dde9d2b421e832c7d4043fd49a", + "hashed_secret": "9bf63f6f49fb01ff80959bc5a60c8688df92cc02", + "is_secret": false, "is_verified": false, - "line_number": 456, + "line_number": 453, "type": "Base64 High Entropy String" } ], "kube/services/jobs/indexd-authz-job.yaml": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", + "is_secret": false, "is_verified": false, "line_number": 87, "type": "Basic Auth Credentials" @@ -1077,12 +1191,14 @@ "kube/services/monitoring/grafana-values.yaml": [ { "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385", + "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4", + "is_secret": false, "is_verified": false, "line_number": 166, "type": "Secret Keyword" @@ -1091,6 +1207,7 @@ "kube/services/revproxy/helpers.js": [ { "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", + "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" @@ -1099,6 +1216,7 @@ "kube/services/revproxy/helpersTest.js": [ { "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", + "is_secret": false, "is_verified": false, "line_number": 22, "type": "JSON Web Token" @@ -1107,6 +1225,7 @@ "kube/services/superset/superset-deploy.yaml": [ { "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7", + "is_secret": false, "is_verified": false, "line_number": 38, "type": "Secret Keyword" @@ -1115,12 +1234,14 @@ "kube/services/superset/superset-redis.yaml": [ { "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", + "is_secret": false, "is_verified": false, "line_number": 165, "type": "Secret Keyword" }, { "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9", + "is_secret": false, "is_verified": false, "line_number": 265, "type": "Secret Keyword" @@ -1129,30 +1250,35 @@ "kube/services/superset/values.yaml": [ { "hashed_secret": "6f803b24314c39062efe38d0c1da8c472f47eab3", + "is_secret": false, "is_verified": false, "line_number": 54, "type": "Secret Keyword" }, { "hashed_secret": "6eae3a5b062c6d0d79f070c26e6d62486b40cb46", + "is_secret": false, "is_verified": false, "line_number": 86, "type": "Secret Keyword" }, { "hashed_secret": "3eb416223e9e69e6bb8ee19793911ad1ad2027d8", + "is_secret": false, "is_verified": false, "line_number": 212, "type": "Secret Keyword" }, { "hashed_secret": "ff55435345834a3fe224936776c2aa15f6ed5358", + "is_secret": false, "is_verified": false, "line_number": 396, "type": "Secret Keyword" }, { "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", + "is_secret": false, "is_verified": false, "line_number": 503, "type": "Secret Keyword" @@ -1161,240 +1287,280 @@ "package-lock.json": [ { "hashed_secret": "0656ad0df3af4633dc369f13d5e8806973c5fd9d", + "is_secret": false, "is_verified": false, "line_number": 1481, "type": "Base64 High Entropy String" }, { "hashed_secret": "00091d875d922437c5fc9e6067a08e78c2482e87", + "is_secret": false, "is_verified": false, "line_number": 1489, "type": "Base64 High Entropy String" }, { "hashed_secret": "c4e5cc37e115bf7d86e76e3d799705bf691e4d00", + "is_secret": false, "is_verified": false, "line_number": 1521, "type": "Base64 High Entropy String" }, { "hashed_secret": "0512e37fbedf1d16828680a038a241b4780a5c04", + "is_secret": false, "is_verified": false, "line_number": 1547, "type": "Base64 High Entropy String" }, { "hashed_secret": "01868fd50edbfe6eb91e5b01209b543adc6857af", + "is_secret": false, "is_verified": false, "line_number": 1611, "type": "Base64 High Entropy String" }, { "hashed_secret": "a6f48bf1e398deffc7fd31da17c3506b46c97a93", + "is_secret": false, "is_verified": false, "line_number": 1640, "type": "Base64 High Entropy String" }, { "hashed_secret": "85ce358dbdec0996cf3ccd2bf1c6602af68c181e", + "is_secret": false, "is_verified": false, "line_number": 1648, "type": "Base64 High Entropy String" }, { "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", + "is_secret": false, "is_verified": false, "line_number": 1664, "type": "Base64 High Entropy String" }, { "hashed_secret": "7098a3e6d6d2ec0a40f04fe12509c5c6f4c49c0e", + "is_secret": false, "is_verified": false, "line_number": 1683, "type": "Base64 High Entropy String" }, { "hashed_secret": "1664ad175bba1795a7ecad572bae7e0740b94f56", + "is_secret": false, "is_verified": false, "line_number": 1733, "type": "Base64 High Entropy String" }, { "hashed_secret": "1ec4ce2eb945ce2f816dcb6ebdd1e10247f439a3", + "is_secret": false, "is_verified": false, "line_number": 1742, "type": "Base64 High Entropy String" }, { "hashed_secret": "a7af5768a6d936e36f28e1030d7f894d7aaf555e", + "is_secret": false, "is_verified": false, "line_number": 1755, "type": "Base64 High Entropy String" }, { "hashed_secret": "6fbc7dd864586173160874f2a86ca7d2d552cb85", + "is_secret": false, "is_verified": false, "line_number": 1769, "type": "Base64 High Entropy String" }, { "hashed_secret": "81a961f2c89c6209328b74a8768e30fd76c3ac72", + "is_secret": false, "is_verified": false, "line_number": 1855, "type": "Base64 High Entropy String" }, { "hashed_secret": "797d4751c536c421cb82b9f62e0a804af30d78f5", + "is_secret": false, "is_verified": false, "line_number": 1889, "type": "Base64 High Entropy String" }, { "hashed_secret": "0d55babfa89f240142c0adfc7b560500a1d3ae7c", + "is_secret": false, "is_verified": false, "line_number": 1894, "type": "Base64 High Entropy String" }, { "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", + "is_secret": false, "is_verified": false, "line_number": 1921, "type": "Base64 High Entropy String" }, { "hashed_secret": "4cf9419259c0ce8eee84b468af3c72db8b001620", + "is_secret": false, "is_verified": false, "line_number": 1950, "type": "Base64 High Entropy String" }, { "hashed_secret": "24816e3eb4308e247bde7c1d09ffb7b79c519b71", + "is_secret": false, "is_verified": false, "line_number": 1983, "type": "Base64 High Entropy String" }, { "hashed_secret": "e9adfe8a333d45f4776fe0eab31608be5d7b6a7d", + "is_secret": false, "is_verified": false, "line_number": 2004, "type": "Base64 High Entropy String" }, { "hashed_secret": "03d6fb388dd1b185129b14221f7127715822ece6", + "is_secret": false, "is_verified": false, "line_number": 2013, "type": "Base64 High Entropy String" }, { "hashed_secret": "ee161bb3f899720f95cee50a5f9ef9c9ed96278b", + "is_secret": false, "is_verified": false, "line_number": 2046, "type": "Base64 High Entropy String" }, { "hashed_secret": "ebeb5b574fa1ed24a40248275e6136759e766466", + "is_secret": false, "is_verified": false, "line_number": 2078, "type": "Base64 High Entropy String" }, { "hashed_secret": "a6a555a428522ccf439fd516ce7c7e269274363f", + "is_secret": false, "is_verified": false, "line_number": 2083, "type": "Base64 High Entropy String" }, { "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", + "is_secret": false, "is_verified": false, "line_number": 2111, "type": "Base64 High Entropy String" }, { "hashed_secret": "3f1646b60abe74297d2f37a1eee5dc771ad834fc", + "is_secret": false, "is_verified": false, "line_number": 2138, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd933c71e82d5519ae0cb0779b370d02f6935759", + "is_secret": false, "is_verified": false, "line_number": 2143, "type": "Base64 High Entropy String" }, { "hashed_secret": "7090aa59cb52ad1f1810b08c4ac1ddf5c8fce523", + "is_secret": false, "is_verified": false, "line_number": 2150, "type": "Base64 High Entropy String" }, { "hashed_secret": "756444bea4ea3d67844d8ddf58ad32356e9c2430", + "is_secret": false, "is_verified": false, "line_number": 2188, "type": "Base64 High Entropy String" }, { "hashed_secret": "f74135fdd6b8dafdfb01ebbc61c5e5c24ee27cf8", + "is_secret": false, "is_verified": false, "line_number": 2291, "type": "Base64 High Entropy String" }, { "hashed_secret": "56fbae787f4aed7d0632e95840d71bd378d3a36f", + "is_secret": false, "is_verified": false, "line_number": 2303, "type": "Base64 High Entropy String" }, { "hashed_secret": "81cb6be182eb79444202c4563080aee75296a672", + "is_secret": false, "is_verified": false, "line_number": 2308, "type": "Base64 High Entropy String" }, { "hashed_secret": "f0f3f7bce32184893046ac5f8cc80da56c3ca539", + "is_secret": false, "is_verified": false, "line_number": 2317, "type": "Base64 High Entropy String" }, { "hashed_secret": "097893233346336f4003acfb6eb173ee59e648f0", + "is_secret": false, "is_verified": false, "line_number": 2327, "type": "Base64 High Entropy String" }, { "hashed_secret": "bb14c3b4ef4a9f2e86ffdd44b88d9b6729419671", + "is_secret": false, "is_verified": false, "line_number": 2332, "type": "Base64 High Entropy String" }, { "hashed_secret": "71344a35cff67ef081920095d1406601fb5e9b97", + "is_secret": false, "is_verified": false, "line_number": 2340, "type": "Base64 High Entropy String" }, { "hashed_secret": "eb3db6990fd43477a35dfeffc90b3f1ffa83c7bd", + "is_secret": false, "is_verified": false, "line_number": 2349, "type": "Base64 High Entropy String" }, { "hashed_secret": "266288bdc14807b538d1e48a5891e361fa9b4a14", + "is_secret": false, "is_verified": false, "line_number": 2357, "type": "Base64 High Entropy String" }, { "hashed_secret": "800477261175fd21f23e7321923e1fba6ae55471", + "is_secret": false, "is_verified": false, "line_number": 2369, "type": "Base64 High Entropy String" }, { "hashed_secret": "3f0c251b9c2c21454445a98fde6915ceacde2136", + "is_secret": false, "is_verified": false, "line_number": 2387, "type": "Base64 High Entropy String" @@ -1403,6 +1569,7 @@ "tf_files/aws/cognito/README.md": [ { "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70", + "is_secret": false, "is_verified": false, "line_number": 106, "type": "Secret Keyword" @@ -1411,12 +1578,14 @@ "tf_files/aws/commons/README.md": [ { "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed", + "is_secret": false, "is_verified": false, "line_number": 60, "type": "Secret Keyword" }, { "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e", + "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -1425,6 +1594,7 @@ "tf_files/aws/eks/sample.tfvars": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", + "is_secret": false, "is_verified": false, "line_number": 107, "type": "Hex High Entropy String" @@ -1433,6 +1603,7 @@ "tf_files/aws/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", + "is_secret": false, "is_verified": false, "line_number": 133, "type": "Hex High Entropy String" @@ -1441,12 +1612,14 @@ "tf_files/aws/modules/common-logging/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", + "is_secret": false, "is_verified": false, "line_number": 57, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", + "is_secret": false, "is_verified": false, "line_number": 59, "type": "Hex High Entropy String" @@ -1455,24 +1628,28 @@ "tf_files/aws/modules/common-logging/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", + "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -1481,18 +1658,21 @@ "tf_files/aws/modules/common-logging/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" @@ -1501,6 +1681,7 @@ "tf_files/aws/modules/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", + "is_secret": false, "is_verified": false, "line_number": 113, "type": "Hex High Entropy String" @@ -1509,12 +1690,14 @@ "tf_files/aws/modules/management-logs/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", + "is_secret": false, "is_verified": false, "line_number": 54, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", + "is_secret": false, "is_verified": false, "line_number": 56, "type": "Hex High Entropy String" @@ -1523,24 +1706,28 @@ "tf_files/aws/modules/management-logs/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", + "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -1549,36 +1736,42 @@ "tf_files/aws/modules/management-logs/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Base64 High Entropy String" }, { "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" }, { "hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4", + "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" @@ -1587,6 +1780,7 @@ "tf_files/aws/slurm/README.md": [ { "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", + "is_secret": false, "is_verified": false, "line_number": 83, "type": "Secret Keyword" @@ -1595,6 +1789,7 @@ "tf_files/azure/cloud.tf": [ { "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4", + "is_secret": false, "is_verified": false, "line_number": 424, "type": "Secret Keyword" @@ -1603,6 +1798,7 @@ "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", + "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -1611,6 +1807,7 @@ "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", + "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -1619,6 +1816,7 @@ "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", + "is_secret": false, "is_verified": false, "line_number": 262, "type": "Secret Keyword" @@ -1627,18 +1825,21 @@ "tf_files/gcp/commons/sample.tfvars": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_secret": false, "is_verified": false, "line_number": 11, "type": "Secret Keyword" }, { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", + "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105", + "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" @@ -1647,6 +1848,7 @@ "tf_files/shared/modules/k8s_configs/creds.tpl": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "is_secret": false, "is_verified": false, "line_number": 8, "type": "Secret Keyword" diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 730a3b36e..71575e3c5 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -1,5 +1,5 @@ import argparse -import copy +import json import sys import requests import pydash @@ -35,16 +35,6 @@ "Buisness Development": "Business Development" } -# repository links -REPOSITORY_STUDY_ID_LINK_TEMPLATE = { - "NIDDK Central": "https://repository.niddk.nih.gov/studies//", - "NIDA Data Share": "https://datashare.nida.nih.gov/study/", - "NICHD DASH": "https://dash.nichd.nih.gov/study/", - "ICPSR": "https://www.icpsr.umich.edu/web/ICPSR/studies/", - "BioSystics-AP": "https://biosystics-ap.com/assays/assaystudy//", -} - - # Defines field that we don't want to include in the filters OMITTED_VALUES_MAPPING = { "study_metadata.human_subject_applicability.gender_applicability": "Not applicable" @@ -124,31 +114,6 @@ def get_client_token(client_id: str, client_secret: str): return token -def get_related_studies(serial_num, hostname): - related_study_result = [] - - if serial_num: - mds = requests.get(f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000") - if mds.status_code == 200: - related_study_metadata = mds.json() - - for ( - related_study_metadata_key, - related_study_metadata_value, - ) in related_study_metadata.items(): - title = ( - related_study_metadata_value.get( - "gen3_discovery", {} - ) - .get("study_metadata", {}) - .get("minimal_info", {}) - .get("study_name", "") - ) - link = f"https://{hostname}/portal/discovery/{related_study_metadata_key}/" - related_study_result.append({"title": title, "link": link}) - return related_study_result - - parser = argparse.ArgumentParser() parser.add_argument("--directory", help="CEDAR Directory ID for registering ") @@ -249,67 +214,6 @@ def get_related_studies(serial_num, hostname): mds_res["gen3_discovery"]["study_metadata"].update(cedar_record) mds_res["gen3_discovery"]["study_metadata"]["metadata_location"]["other_study_websites"] = cedar_record_other_study_websites - # setup citations - doi_citation = mds_res["gen3_discovery"]["study_metadata"].get("doi_citation", "") - mds_res["gen3_discovery"]["study_metadata"]["citation"]["heal_platform_citation"] = doi_citation - - - # setup repository_study_link - data_repositories = ( - mds_res.get("study_metadata", {}) - .get("metadata_location", {}) - .get("data_repositories", []) - ) - repository_citation = "Users must also include a citation to the data as specified by the local repository." - repository_citation_additional_text = ' The link to the study page at the local repository can be found in the "Data" tab.' - for repository in data_repositories: - if ( - repository["repository_name"] - and repository["repository_name"] - in REPOSITORY_STUDY_ID_LINK_TEMPLATE - and repository["repository_study_ID"] - ): - repository_study_link = REPOSITORY_STUDY_ID_LINK_TEMPLATE[ - repository["repository_name"] - ].replace("", repository["repository_study_ID"]) - repository.update({"repository_study_link": repository_study_link}) - if repository_citation_additional_text not in repository_citation: - repository_citation += repository_citation_additional_text - if len(data_repositories): - data_repositories[0] = { - **data_repositories[0], - "repository_citation": repository_citation, - } - mds_res["gen3_discovery"]["study_metadata"][ - "metadata_location" - ]["data_repositories"] = data_repositories - - - - # set up related studies - serial_num = None - try: - serial_num = ( - mds_res - .get("nih_reporter", {}) - .get("project_num_split", {}) - .get("serial_num", None) - ) - except Exception: - print(f"Unable to get serial number for study") - - if serial_num == None: - print(f"Unable to get serial number for study") - - related_study_result = get_related_studies(serial_num, hostname) - existing_related_study_result = mds_res.get("related_studies", []) - for related_study in related_study_result: - if related_study not in existing_related_study_result: - existing_related_study_result.append(copy.deepcopy(related_study)) - mds_res["gen3_discovery"][ - "related_studies" - ] = copy.deepcopy(existing_related_study_result) - # merge data from cedar that is not study level metadata into a level higher deleted_keys = [] for key, value in mds_res["gen3_discovery"]["study_metadata"].items(): From 6ee728ad3e375b1a0e521ae256ef13ca29529067 Mon Sep 17 00:00:00 2001 From: Alexander VanTol Date: Mon, 4 Mar 2024 11:04:31 -0600 Subject: [PATCH 304/362] Automation for Gen3 Discovery AI Service (#2396) * feat(gen3-openai): rough initial testing, no automation for rolling * feat(gen3-discovery-ai): initial deployment * fix(ai): fix setup jq escaping * fix(ai): fix file name * fix(ai): fix deployment configmap * fix(ai): fix configmap * fix(ai): env path * feat(image): use from manifest * chore(ai): better naming * fix(ai): fix mount path for cfg * fix(ai): first attempt to fix issue of needing write volume for chromadb persistance * fix(ai): k8s deploy command * fix(ai): fix duplicate name * chore(ai): don't sent telemetry data * chore(ai): more logging in init * chore(logs): more * fix(ai): mv instead of cp * fix(ai): back to cp, can't mv b/c of readonly * feat(ai): use s3 and service account + role to handle persisted vectorstore data instead of configmap * fix(ai): fix setup * fix(ai): fix setup * fix(ai): fix automation * fix(ai): automation * fix(ai): fix logic for setup * fix(ai): mount storage config and don't use gen3/jq since they're not available * fix(ai): fix wrong path * fix(ai): quotes * fix(ai): quoting * fix(ai): use awshelper for access to aws commands * fix(ai): move files to correct location * fix(ai): only get folder * fix(ai): fix sync * fix(ai): clear folder before syncing * fix(ai): update bucket contents every roll for updates * feat(ai): support TSV loading from manifest config * fix(ai): fix init so aws syncing is done with awshelper image and loading into vectorstore is with service image * fix(ai): fix loading * fix(ai): fix loading * fix(ai): sync all files * feat(ai): add google secret loading and mounting * fix(ai): mount to container, not inits * fix(mount): don't create another dir * fix(mount): don't create another dir * fix(mounts): fix paths * fix(mounts): mount all secrets * fix(secrets): allow .env file to be a secret * fix(secrets): revert failed attempt to support .env * chore(ai): cd to dir with pyproject.toml * chore(ai): try to fix issue with pyproject.toml * fix(ai): actually we need to poetry run * chore(ai): debug lines * chore(ai): debug lines * chore(ai): debug lines * chore(ai): debug lines * chore(ai): debug lines * fix(mount): don't overwrite whole dir * fix(ai): mounts * chore(ai): remove debug lines * fix(ai): remove debug * chore(debug): debug line * chore(debug): remove debug line * feat(ai): add to roll all, fix port in service yaml * fix(ai): fix nginx conf file name * fix(nginx): fix routing for AI service to add trailing slash after "ai" * Update web_whitelist * Update kube-setup-gen3-discovery-ai.sh * Update README.md * Update gen3-discovery-ai-deploy.yaml * Update gen3-discovery-ai-deploy.yaml * Update gen3-discovery-ai-service.yaml * Update kube-setup-gen3-discovery-ai.sh * feat(discovery): update to data load commands and strategy to support markdown --- gen3/bin/kube-roll-all.sh | 6 + gen3/bin/kube-setup-gen3-discovery-ai.sh | 154 +++++++++++++++ kube/services/gen3-discovery-ai/README.md | 42 ++++ .../gen3-discovery-ai-deploy.yaml | 181 ++++++++++++++++++ .../gen3-discovery-ai-service.yaml | 21 ++ .../gen3-discovery-ai-service.conf | 12 ++ 6 files changed, 416 insertions(+) create mode 100644 gen3/bin/kube-setup-gen3-discovery-ai.sh create mode 100644 kube/services/gen3-discovery-ai/README.md create mode 100644 kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml create mode 100644 kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml create mode 100644 kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 6a67f2bdd..1dca87c68 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -243,6 +243,12 @@ else gen3_log_info "not deploying dicom-viewer - no manifest entry for '.versions[\"dicom-viewer\"]'" fi +if g3k_manifest_lookup '.versions["gen3-discovery-ai"]' 2> /dev/null; then + gen3 kube-setup-gen3-discovery-ai & +else + gen3_log_info "not deploying gen3-discovery-ai - no manifest entry for '.versions[\"gen3-discovery-ai\"]'" +fi + if g3k_manifest_lookup '.versions["ohdsi-atlas"]' && g3k_manifest_lookup '.versions["ohdsi-webapi"]' 2> /dev/null; then gen3 kube-setup-ohdsi & else diff --git a/gen3/bin/kube-setup-gen3-discovery-ai.sh b/gen3/bin/kube-setup-gen3-discovery-ai.sh new file mode 100644 index 000000000..44a472a74 --- /dev/null +++ b/gen3/bin/kube-setup-gen3-discovery-ai.sh @@ -0,0 +1,154 @@ +#!/bin/bash +# +# Deploy the gen3-discovery-ai service +# + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/gen3setup" + +# NOTE: no db for this service yet, but we'll likely need it in the future +setup_database() { + gen3_log_info "setting up gen3-discovery-ai service ..." + + if g3kubectl describe secret gen3-discovery-ai-g3auto > /dev/null 2>&1; then + gen3_log_info "gen3-discovery-ai-g3auto secret already configured" + return 0 + fi + if [[ -n "$JENKINS_HOME" || ! -f "$(gen3_secrets_folder)/creds.json" ]]; then + gen3_log_err "skipping db setup in non-adminvm environment" + return 0 + fi + # Setup .env file that gen3-discovery-ai service consumes + if [[ ! -f "$secretsFolder/gen3-discovery-ai.env" || ! -f "$secretsFolder/base64Authz.txt" ]]; then + local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai" + + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + if ! gen3 db setup gen3-discovery-ai; then + gen3_log_err "Failed setting up database for gen3-discovery-ai service" + return 1 + fi + fi + if [[ ! -f "$secretsFolder/dbcreds.json" ]]; then + gen3_log_err "dbcreds not present in Gen3Secrets/" + return 1 + fi + + # go ahead and rotate the password whenever we regen this file + local password="$(gen3 random)" + cat - > "$secretsFolder/gen3-discovery-ai.env" < "$secretsFolder/base64Authz.txt" + fi + gen3 secrets sync 'setup gen3-discovery-ai-g3auto secrets' +} + +if ! g3k_manifest_lookup '.versions."gen3-discovery-ai"' 2> /dev/null; then + gen3_log_info "kube-setup-gen3-discovery-ai exiting - gen3-discovery-ai service not in manifest" + exit 0 +fi + +# There's no db for this service *yet* +# +# if ! setup_database; then +# gen3_log_err "kube-setup-gen3-discovery-ai bailing out - database failed setup" +# exit 1 +# fi + +setup_storage() { + local saName="gen3-discovery-ai-sa" + g3kubectl create sa "$saName" > /dev/null 2>&1 || true + + local secret + local secretsFolder="$(gen3_secrets_folder)/g3auto/gen3-discovery-ai" + + secret="$(g3kubectl get secret gen3-discovery-ai-g3auto -o json 2> /dev/null)" + local hasStorageCfg + hasStorageCfg=$(jq -r '.data | has("storage_config.json")' <<< "$secret") + + if [ "$hasStorageCfg" = "false" ]; then + gen3_log_info "setting up storage for gen3-discovery-ai service" + # + # gen3-discovery-ai-g3auto secret still does not exist + # we need to setup an S3 bucket and IAM creds + # let's avoid creating multiple buckets for different + # deployments to the same k8s cluster (dev, etc) + # + local bucketName + local accountNumber + local environment + + if ! accountNumber="$(aws sts get-caller-identity --output text --query 'Account')"; then + gen3_log_err "could not determine account numer" + return 1 + fi + + gen3_log_info "accountNumber: ${accountNumber}" + + if ! environment="$(g3kubectl get configmap manifest-global -o json | jq -r .data.environment)"; then + gen3_log_err "could not determine environment from manifest-global - bailing out of gen3-discovery-ai setup" + return 1 + fi + + gen3_log_info "environment: ${environment}" + + # try to come up with a unique but composable bucket name + bucketName="gen3-discovery-ai-${accountNumber}-${environment//_/-}" + + gen3_log_info "bucketName: ${bucketName}" + + if aws s3 ls --page-size 1 "s3://${bucketName}" > /dev/null 2>&1; then + gen3_log_info "${bucketName} s3 bucket already exists - probably in use by another namespace - copy the creds from there to $(gen3_secrets_folder)/g3auto/gen3-discovery-ai" + # continue on ... + elif ! gen3 s3 create "${bucketName}"; then + gen3_log_err "maybe failed to create bucket ${bucketName}, but maybe not, because the terraform script is flaky" + fi + + local hostname + hostname="$(gen3 api hostname)" + jq -r -n --arg bucket "${bucketName}" --arg hostname "${hostname}" '.bucket=$bucket | .prefix=$hostname' > "${secretsFolder}/storage_config.json" + gen3 secrets sync 'setup gen3-discovery-ai credentials' + + local roleName + roleName="$(gen3 api safe-name gen3-discovery-ai)" || return 1 + + if ! gen3 awsrole info "$roleName" > /dev/null; then # setup role + bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || return 1 + gen3 awsrole create "$roleName" "$saName" || return 1 + gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${roleName}" + # try to give the gitops role read/write permissions on the bucket + local gitopsRoleName + gitopsRoleName="$(gen3 api safe-name gitops)" + gen3 s3 attach-bucket-policy "$bucketName" --read-write --role-name "${gitopsRoleName}" + fi + fi + + return 0 +} + +if ! setup_storage; then + gen3_log_err "kube-setup-gen3-discovery-ai bailing out - storage failed setup" + exit 1 +fi + +gen3_log_info "Setup complete, syncing configuration to bucket" + +bucketName="$( (gen3 secrets decode 'gen3-discovery-ai-g3auto' 'storage_config.json' || echo ERROR) | jq -r .bucket)" || exit 1 +aws s3 sync "$(dirname $(g3k_manifest_path))/gen3-discovery-ai/knowledge" "s3://$bucketName" --delete + +gen3 roll gen3-discovery-ai +g3kubectl apply -f "${GEN3_HOME}/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml" + +if [[ -z "$GEN3_ROLL_ALL" ]]; then + gen3 kube-setup-networkpolicy + gen3 kube-setup-revproxy +fi + +gen3_log_info "The gen3-discovery-ai service has been deployed onto the kubernetes cluster" +gen3_log_info "test with: curl https://commons-host/ai" diff --git a/kube/services/gen3-discovery-ai/README.md b/kube/services/gen3-discovery-ai/README.md new file mode 100644 index 000000000..4c20678e0 --- /dev/null +++ b/kube/services/gen3-discovery-ai/README.md @@ -0,0 +1,42 @@ +# Gen3 Discovery AI Configuration + +Expects data in a `gen3-discovery-ai` folder relative to +where the `manifest.json` is. + +Basic setup: + +`{{dir where manifest.json is}}/gen3-discovery-ai/knowledge/` + +- `tsvs` folder + - tsvs with topic_name at beginning of file +- `markdown` folder + - {{topic_name_1}} + - markdown file(s) + - {{topic_name_2}} + - markdown file(s) + +The `kube-setup-gen3-discovery-ai` script syncs the above `/knowledge` folder to +an S3 bucket. The service configuration then pulls from the S3 bucket and runs load commands +to get the data into chromadb. + +> Note: See the `gen3-discovery-ai` service repo docs and README for more details on data load capabilities. + +Check the `gen3-discovery-ai-deploy.yaml` for what commands are being run in the automation. + +Expects secrets setup in `g3auto/gen3-discovery-ai` folder + - `credentials.json`: Google service account key if using a topic with Google Vertex AI + - `env`: .env file contents for service configuration (see service repo for a default one) + +## Populating Disk for In-Memory Vectordb Chromadb + +In order to setup pre-configured topics, we need to load a bunch of data +into Chromadb (which is an in-mem vectordb with an option to persist to disk). + +To load topics consistently, we setup an S3 bucket to house the persisted +data for the vectordb. + +### Getting data from S3 in mem + +We specify a path for Chromadb to use for persisted data and when it sees +data there, it loads it in. So the deployment automation: 1. aws syncs the bucket +and then 2. calls a script to load the files into the in-mem vectorstore from there. diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml new file mode 100644 index 000000000..dcfe03248 --- /dev/null +++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml @@ -0,0 +1,181 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: gen3-discovery-ai-deployment +spec: + selector: + # Only select pods based on the 'app' label + matchLabels: + app: gen3-discovery-ai + release: production + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + template: + metadata: + labels: + app: gen3-discovery-ai + release: production + GEN3_DATE_LABEL + spec: + serviceAccountName: gen3-discovery-ai-sa + volumes: + - name: gen3-discovery-ai-g3auto-volume + secret: + secretName: gen3-discovery-ai-g3auto + - name: gen3-discovery-ai-knowledge-library-volume + emptyDir: {} + initContainers: + # chromadb's persisted disk support requires the ability to write. We don't technically need this ability + # since we're populating the entirety of the database from configured files (no live updates). + # + # Solution: utilize emptyDir as a writable space. + # + # Procedure: in init containers, copy files from s3 to writable + # temporary space in emptyDir, use files from writable space + # to load into knowledge libary, move final knowledge library + # files into top-level emptyDir and make available in final container + - name: gen3-discovery-ai-aws-init + GEN3_AWSHELPER_IMAGE|-image: quay.io/cdis/awshelper:master-| + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + volumeMounts: + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/.env + subPath: env + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/credentials.json + subPath: credentials.json + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/storage_config.json + subPath: storage_config.json + - name: gen3-discovery-ai-knowledge-library-volume + mountPath: /gen3discoveryai/knowledge + imagePullPolicy: Always + resources: + requests: + cpu: 1 + limits: + cpu: 2 + memory: 512Mi + command: ["/bin/bash"] + args: + - "-c" + - | + bucketName=$(grep -o "\"bucket\": *\"[^\"]*\"" /gen3discoveryai/storage_config.json | awk -F'"' '{print $4}') + echo BUCKET: "$bucketName" + echo + echo BEFORE /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + echo + echo syncing from s3 + aws s3 sync "s3://${bucketName}" "/gen3discoveryai/knowledge/tmp" + echo + echo AFTER /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + - name: gen3-discovery-ai-knowledge-init + GEN3_GEN3-DISCOVERY-AI_IMAGE + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + - name: ANONYMIZED_TELEMETRY + value: "False" + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /gen3discoveryai/credentials.json + volumeMounts: + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/.env + subPath: env + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/credentials.json + subPath: credentials.json + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/storage_config.json + subPath: storage_config.json + - name: gen3-discovery-ai-knowledge-library-volume + mountPath: /gen3discoveryai/knowledge + imagePullPolicy: Always + resources: + requests: + cpu: 1 + limits: + cpu: 2 + memory: 512Mi + command: ["/bin/bash"] + args: + - "-c" + - | + echo + echo BEFORE /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + echo running load_into_knowledge_store.py + poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py tsvs /gen3discoveryai/knowledge/tmp/tsvs + + if [ -d "/gen3discoveryai/knowledge/tmp/markdown" ]; then + for dir in "/gen3discoveryai/knowledge/tmp/markdown"/*; do + if [ -d "$dir" ]; then + dir_name=$(basename "$dir") + + echo "Processing directory: $dir_name. Full path: $dir" + poetry run python /gen3discoveryai/bin/load_into_knowledge_store.py markdown --topic $dir_name $dir + fi + done + else + echo "Not syncing markdown, directory not found: /gen3discoveryai/knowledge/tmp/markdown" + fi + + rm -r /gen3discoveryai/knowledge/tmp/ + echo + echo AFTER /gen3discoveryai/knowledge + ls -Ra /gen3discoveryai/knowledge + containers: + - name: gen3-discovery-ai + GEN3_GEN3-DISCOVERY-AI_IMAGE + imagePullPolicy: Always + ports: + - containerPort: 8080 + env: + - name: GEN3_DEBUG + GEN3_DEBUG_FLAG|-value: "False"-| + - name: ANONYMIZED_TELEMETRY + value: "False" + - name: GOOGLE_APPLICATION_CREDENTIALS + value: /gen3discoveryai/credentials.json + volumeMounts: + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/.env + subPath: env + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/credentials.json + subPath: credentials.json + - name: gen3-discovery-ai-g3auto-volume + readOnly: true + mountPath: /gen3discoveryai/storage_config.json + subPath: storage_config.json + - name: gen3-discovery-ai-knowledge-library-volume + mountPath: /gen3discoveryai/knowledge + imagePullPolicy: Always + resources: + requests: + cpu: 1 + limits: + cpu: 2 + # NOTE: If the configured data for the knowledge library (vector database) is large, you may need to bump this + memory: 512Mi diff --git a/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml new file mode 100644 index 000000000..b4734c3b8 --- /dev/null +++ b/kube/services/gen3-discovery-ai/gen3-discovery-ai-service.yaml @@ -0,0 +1,21 @@ +kind: Service +apiVersion: v1 +metadata: + name: gen3-discovery-ai-service +spec: + selector: + app: gen3-discovery-ai + release: production + ports: + - protocol: TCP + port: 80 + targetPort: 8089 + name: http + nodePort: null + - protocol: TCP + port: 443 + targetPort: 443 + name: https + nodePort: null + type: ClusterIP + diff --git a/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf b/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf new file mode 100644 index 000000000..42e9a3758 --- /dev/null +++ b/kube/services/revproxy/gen3.nginx.conf/gen3-discovery-ai-service.conf @@ -0,0 +1,12 @@ + location /ai { + if ($csrf_check !~ ^ok-\S.+$) { + return 403 "failed csrf check"; + } + + set $proxy_service "gen3-discovery-ai-service"; + set $upstream http://gen3-discovery-ai-service$des_domain; + rewrite ^/ai/(.*) /$1 break; + proxy_pass $upstream; + proxy_redirect http://$host/ https://$host/ai/; + client_max_body_size 0; + } From 077e475463098416001ebd3ced41ec06d1ee631b Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 6 Mar 2024 15:21:16 -0500 Subject: [PATCH 305/362] Raising requests for Jenkins pods (#2495) --- .secrets.baseline | 2 +- Jenkinsfile | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index b7e06622d..fbed122fd 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -3,7 +3,7 @@ "files": "^.secrets.baseline$", "lines": null }, - "generated_at": "2024-02-23T20:30:41Z" + "generated_at": "2024-02-23T20:30:41Z", "plugins_used": [ { "name": "AWSKeyDetector" diff --git a/Jenkinsfile b/Jenkinsfile index 4e3470ded..9c70a2e37 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -99,8 +99,8 @@ spec: resources: requests: cpu: 0.2 - memory: 200Mi - ephemeral-storage: 200Mi + memory: 400Mi + ephemeral-storage: 1Gi env: - name: AWS_DEFAULT_REGION value: us-east-1 From 62894388c1ac907587916462d641788c87226e4a Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Thu, 7 Mar 2024 10:42:36 -0600 Subject: [PATCH 306/362] fix cedar ingestion problems (#2494) * fix cedar ingestion problems * add default falue to get statement --------- Co-authored-by: Mingfei Shao <2475897+mfshao@users.noreply.github.com> --- .secrets.baseline | 360 +++++------------- .../healdata/heal-cedar-data-ingest.py | 101 +++++ 2 files changed, 187 insertions(+), 274 deletions(-) diff --git a/.secrets.baseline b/.secrets.baseline index fbed122fd..200b69841 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -1,9 +1,9 @@ { "exclude": { - "files": "^.secrets.baseline$", + "files": null, "lines": null }, - "generated_at": "2024-02-23T20:30:41Z", + "generated_at": "2024-03-04T21:42:56Z", "plugins_used": [ { "name": "AWSKeyDetector" @@ -61,14 +61,12 @@ "Chef/repo/data_bags/README.md": [ { "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366", - "is_secret": false, "is_verified": false, "line_number": 45, "type": "Secret Keyword" }, { "hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee", - "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -77,25 +75,22 @@ "Docker/jenkins/Jenkins-CI-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, - "line_number": 121, + "line_number": 124, "type": "Secret Keyword" } ], "Docker/jenkins/Jenkins-Worker/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, - "line_number": 143, + "line_number": 139, "type": "Secret Keyword" } ], "Docker/jenkins/Jenkins/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 107, "type": "Secret Keyword" @@ -104,7 +99,6 @@ "Docker/jenkins/Jenkins2/Dockerfile": [ { "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", - "is_secret": false, "is_verified": false, "line_number": 108, "type": "Secret Keyword" @@ -113,7 +107,6 @@ "Docker/sidecar/service.key": [ { "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", - "is_secret": false, "is_verified": false, "line_number": 1, "type": "Private Key" @@ -122,7 +115,6 @@ "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ { "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -131,7 +123,6 @@ "ansible/roles/awslogs/defaults/main.yaml": [ { "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Basic Auth Credentials" @@ -140,14 +131,12 @@ "ansible/roles/slurm/README.md": [ { "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", - "is_secret": false, "is_verified": false, "line_number": 86, "type": "Base64 High Entropy String" }, { "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476", - "is_secret": false, "is_verified": false, "line_number": 86, "type": "Secret Keyword" @@ -156,7 +145,6 @@ "apis_configs/config_helper.py": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, "is_verified": false, "line_number": 66, "type": "Basic Auth Credentials" @@ -165,7 +153,6 @@ "apis_configs/fence_credentials.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 23, "type": "Secret Keyword" @@ -174,21 +161,18 @@ "apis_configs/fence_settings.py": [ { "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Basic Auth Credentials" }, { "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3", - "is_secret": false, "is_verified": false, "line_number": 58, "type": "Secret Keyword" }, { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, "is_verified": false, "line_number": 80, "type": "Basic Auth Credentials" @@ -197,7 +181,6 @@ "apis_configs/indexd_settings.py": [ { "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc", - "is_secret": false, "is_verified": false, "line_number": 59, "type": "Basic Auth Credentials" @@ -206,7 +189,6 @@ "apis_configs/peregrine_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -215,7 +197,6 @@ "apis_configs/sheepdog_settings.py": [ { "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", - "is_secret": false, "is_verified": false, "line_number": 46, "type": "Basic Auth Credentials" @@ -224,7 +205,6 @@ "doc/Gen3-data-upload.md": [ { "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -233,7 +213,6 @@ "doc/api.md": [ { "hashed_secret": "625de83a7517422051911680cc803921ff99db90", - "is_secret": false, "is_verified": false, "line_number": 47, "type": "Hex High Entropy String" @@ -242,28 +221,24 @@ "doc/gen3OnK8s.md": [ { "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069", - "is_secret": false, "is_verified": false, "line_number": 113, "type": "Secret Keyword" }, { "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2", - "is_secret": false, "is_verified": false, "line_number": 143, "type": "Secret Keyword" }, { "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48", - "is_secret": false, "is_verified": false, "line_number": 170, "type": "Secret Keyword" }, { "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce", - "is_secret": false, "is_verified": false, "line_number": 189, "type": "Secret Keyword" @@ -272,7 +247,6 @@ "doc/kube-setup-data-ingestion-job.md": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Secret Keyword" @@ -281,7 +255,6 @@ "doc/logs.md": [ { "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Secret Keyword" @@ -290,7 +263,6 @@ "doc/slurm_cluster.md": [ { "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6", - "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" @@ -299,14 +271,12 @@ "files/dashboard/usage-reports/package-lock.json": [ { "hashed_secret": "e095101882f706c4de95e0f75c5bcb9666e3f448", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "5422e4f96964d5739998b25ac214520c1b113e5b", - "is_secret": false, "is_verified": false, "line_number": 15, "type": "Base64 High Entropy String" @@ -315,14 +285,12 @@ "gen3/bin/api.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 407, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, "is_verified": false, "line_number": 477, "type": "Secret Keyword" @@ -331,7 +299,6 @@ "gen3/bin/kube-dev-namespace.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 135, "type": "Secret Keyword" @@ -340,7 +307,6 @@ "gen3/bin/kube-setup-argo.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 206, "type": "Secret Keyword" @@ -349,7 +315,6 @@ "gen3/bin/kube-setup-aurora-monitoring.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 59, "type": "Secret Keyword" @@ -358,7 +323,6 @@ "gen3/bin/kube-setup-certs.sh": [ { "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99", - "is_secret": false, "is_verified": false, "line_number": 50, "type": "Secret Keyword" @@ -367,14 +331,12 @@ "gen3/bin/kube-setup-dashboard.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 40, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, "is_verified": false, "line_number": 41, "type": "Secret Keyword" @@ -383,14 +345,12 @@ "gen3/bin/kube-setup-data-ingestion-job.sh": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" }, { "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c", - "is_secret": false, "is_verified": false, "line_number": 102, "type": "Secret Keyword" @@ -399,7 +359,6 @@ "gen3/bin/kube-setup-dicom-server.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 43, "type": "Secret Keyword" @@ -408,23 +367,48 @@ "gen3/bin/kube-setup-dicom.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" } ], + "gen3/bin/kube-setup-gen3-discovery-ai.sh": [ + { + "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "is_verified": false, + "line_number": 37, + "type": "Secret Keyword" + }, + { + "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "is_verified": false, + "line_number": 71, + "type": "Secret Keyword" + } + ], "gen3/bin/kube-setup-jenkins.sh": [ { "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, + "is_verified": false, + "line_number": 22, + "type": "Secret Keyword" + } + ], + "gen3/bin/kube-setup-jenkins2.sh": [ + { + "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", + "is_verified": false, + "line_number": 18, + "type": "Secret Keyword" + }, + { + "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", "is_verified": false, "line_number": 22, "type": "Secret Keyword" @@ -433,7 +417,6 @@ "gen3/bin/kube-setup-metadata.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 35, "type": "Secret Keyword" @@ -442,21 +425,18 @@ "gen3/bin/kube-setup-revproxy.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 38, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 55, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 57, "type": "Secret Keyword" @@ -465,21 +445,18 @@ "gen3/bin/kube-setup-secrets.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 79, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 82, "type": "Secret Keyword" }, { "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8", - "is_secret": false, "is_verified": false, "line_number": 95, "type": "Secret Keyword" @@ -488,14 +465,12 @@ "gen3/bin/kube-setup-sftp.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 36, "type": "Secret Keyword" }, { "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4", - "is_secret": false, "is_verified": false, "line_number": 51, "type": "Secret Keyword" @@ -504,7 +479,6 @@ "gen3/bin/kube-setup-sheepdog.sh": [ { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 33, "type": "Secret Keyword" @@ -513,28 +487,24 @@ "gen3/bin/kube-setup-sower-jobs.sh": [ { "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", - "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" }, { "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 120, "type": "Secret Keyword" }, { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 122, "type": "Secret Keyword" @@ -543,21 +513,18 @@ "gen3/bin/kube-setup-ssjdispatcher.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 117, "type": "Secret Keyword" }, { "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93", - "is_secret": false, "is_verified": false, "line_number": 184, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 197, "type": "Secret Keyword" @@ -566,14 +533,12 @@ "gen3/lib/aws.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, "is_verified": false, "line_number": 640, "type": "Secret Keyword" }, { "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8", - "is_secret": false, "is_verified": false, "line_number": 660, "type": "Secret Keyword" @@ -582,14 +547,12 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ { "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", - "is_secret": false, "is_verified": false, "line_number": 33, "type": "Basic Auth Credentials" }, { "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db", - "is_secret": false, "is_verified": false, "line_number": 286, "type": "Secret Keyword" @@ -598,7 +561,6 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" @@ -607,7 +569,6 @@ "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Secret Keyword" @@ -616,7 +577,6 @@ "gen3/lib/logs/utils.sh": [ { "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5", - "is_secret": false, "is_verified": false, "line_number": 3, "type": "Secret Keyword" @@ -625,7 +585,6 @@ "gen3/lib/manifestDefaults/hatchery/hatchery.json": [ { "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039", - "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -634,14 +593,12 @@ "gen3/lib/onprem.sh": [ { "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", - "is_secret": false, "is_verified": false, "line_number": 68, "type": "Secret Keyword" }, { "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", - "is_secret": false, "is_verified": false, "line_number": 84, "type": "Secret Keyword" @@ -650,14 +607,12 @@ "gen3/lib/secrets/rotate-postgres.sh": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", - "is_secret": false, "is_verified": false, "line_number": 250, "type": "Secret Keyword" @@ -666,49 +621,42 @@ "gen3/lib/testData/etlconvert/expected2.yaml": [ { "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" }, { "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", - "is_secret": false, "is_verified": false, "line_number": 13, "type": "Base64 High Entropy String" }, { "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", - "is_secret": false, "is_verified": false, "line_number": 16, "type": "Base64 High Entropy String" }, { "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", - "is_secret": false, "is_verified": false, "line_number": 33, "type": "Base64 High Entropy String" }, { "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", - "is_secret": false, "is_verified": false, "line_number": 35, "type": "Base64 High Entropy String" }, { "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", - "is_secret": false, "is_verified": false, "line_number": 36, "type": "Base64 High Entropy String" @@ -717,7 +665,6 @@ "gen3/test/secretsTest.sh": [ { "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", - "is_secret": false, "is_verified": false, "line_number": 25, "type": "Secret Keyword" @@ -726,28 +673,24 @@ "gen3/test/terraformTest.sh": [ { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, "is_verified": false, "line_number": 156, "type": "Secret Keyword" }, { "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", - "is_secret": false, "is_verified": false, "line_number": 172, "type": "Base64 High Entropy String" }, { "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", - "is_secret": false, "is_verified": false, "line_number": 175, "type": "Base64 High Entropy String" }, { "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c", - "is_secret": false, "is_verified": false, "line_number": 175, "type": "Secret Keyword" @@ -756,21 +699,18 @@ "kube/services/argocd/values.yaml": [ { "hashed_secret": "27c6929aef41ae2bcadac15ca6abcaff72cda9cd", - "is_secret": false, "is_verified": false, "line_number": 360, "type": "Private Key" }, { "hashed_secret": "edbd5e119f94badb9f99a67ac6ff4c7a5204ad61", - "is_secret": false, "is_verified": false, "line_number": 379, "type": "Secret Keyword" }, { "hashed_secret": "91dfd9ddb4198affc5c194cd8ce6d338fde470e2", - "is_secret": false, "is_verified": false, "line_number": 412, "type": "Secret Keyword" @@ -779,7 +719,6 @@ "kube/services/datadog/values.yaml": [ { "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b", - "is_secret": false, "is_verified": false, "line_number": 23, "type": "Secret Keyword" @@ -788,401 +727,362 @@ "kube/services/fenceshib/fenceshib-configmap.yaml": [ { "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c", - "is_secret": false, "is_verified": false, "line_number": 375, "type": "Base64 High Entropy String" }, { "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f", - "is_secret": false, "is_verified": false, "line_number": 376, "type": "Base64 High Entropy String" }, { "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b", - "is_secret": false, "is_verified": false, "line_number": 377, "type": "Base64 High Entropy String" }, { "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5", - "is_secret": false, "is_verified": false, "line_number": 378, "type": "Base64 High Entropy String" }, { "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a", - "is_secret": false, "is_verified": false, "line_number": 379, "type": "Base64 High Entropy String" }, { "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9", - "is_secret": false, "is_verified": false, "line_number": 380, "type": "Base64 High Entropy String" }, { "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12", - "is_secret": false, "is_verified": false, "line_number": 381, "type": "Base64 High Entropy String" }, { "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7", - "is_secret": false, "is_verified": false, "line_number": 382, "type": "Base64 High Entropy String" }, { "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a", - "is_secret": false, "is_verified": false, "line_number": 383, "type": "Base64 High Entropy String" }, { "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3", - "is_secret": false, "is_verified": false, "line_number": 384, "type": "Base64 High Entropy String" }, { "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c", - "is_secret": false, "is_verified": false, "line_number": 385, "type": "Base64 High Entropy String" }, { "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52", - "is_secret": false, "is_verified": false, "line_number": 386, "type": "Base64 High Entropy String" }, { "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110", - "is_secret": false, "is_verified": false, "line_number": 387, "type": "Base64 High Entropy String" }, { "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7", - "is_secret": false, "is_verified": false, "line_number": 388, "type": "Base64 High Entropy String" }, { "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e", - "is_secret": false, "is_verified": false, "line_number": 389, "type": "Base64 High Entropy String" }, { "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1", - "is_secret": false, "is_verified": false, "line_number": 390, "type": "Base64 High Entropy String" }, { "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53", - "is_secret": false, "is_verified": false, "line_number": 391, "type": "Base64 High Entropy String" }, { "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a", - "is_secret": false, "is_verified": false, "line_number": 392, "type": "Base64 High Entropy String" }, { "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294", - "is_secret": false, "is_verified": false, "line_number": 393, "type": "Base64 High Entropy String" }, { "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41", - "is_secret": false, "is_verified": false, "line_number": 394, "type": "Base64 High Entropy String" }, { "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1", - "is_secret": false, "is_verified": false, "line_number": 395, "type": "Base64 High Entropy String" }, { "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb", - "is_secret": false, "is_verified": false, "line_number": 396, "type": "Base64 High Entropy String" }, { "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433", - "is_secret": false, "is_verified": false, "line_number": 397, "type": "Base64 High Entropy String" }, { "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a", - "is_secret": false, "is_verified": false, "line_number": 398, "type": "Base64 High Entropy String" }, { "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2", - "is_secret": false, "is_verified": false, "line_number": 399, "type": "Base64 High Entropy String" }, { - "hashed_secret": "7803ae08cdc22a5e0b025eff3c9ef0628eedc165", - "is_secret": false, + "hashed_secret": "fbad0bc8f7792b03f89cd3780eb7cf79f284c525", "is_verified": false, "line_number": 419, "type": "Base64 High Entropy String" }, { - "hashed_secret": "b8b61e87f5b58b0eeb597b2122ea0cea2ccab3d9", - "is_secret": false, + "hashed_secret": "3f6480956a775dacb44e2c39aa3d4722a347f7ab", "is_verified": false, "line_number": 420, "type": "Base64 High Entropy String" }, { - "hashed_secret": "787745fc904c3bd7eddc3d1aab683a376c13890f", - "is_secret": false, + "hashed_secret": "17f32ae55b14d708ca121722c2cae37189f19daf", "is_verified": false, "line_number": 423, "type": "Base64 High Entropy String" }, { - "hashed_secret": "81361d672f238f505a6246ef9b655ee2f48d67e7", - "is_secret": false, + "hashed_secret": "08a74689ca077515d406093720a7e5675fb42bb8", "is_verified": false, "line_number": 424, "type": "Base64 High Entropy String" }, { - "hashed_secret": "7c98bff76ac3f273d15ed9bc3dd5294d323ab577", - "is_secret": false, + "hashed_secret": "fa577bb3b2600d2d522dcfea8f1e34896760fcf2", "is_verified": false, "line_number": 425, "type": "Base64 High Entropy String" }, { - "hashed_secret": "46038fc88daceed8dd46817ca45c72ae0270fdd4", - "is_secret": false, + "hashed_secret": "37254f15cca211a1bd5f7ceb23de2b3eb8fb33aa", "is_verified": false, "line_number": 426, "type": "Base64 High Entropy String" }, { - "hashed_secret": "acad0c57b4f5cbed1b4863ed06d02784180a9f92", - "is_secret": false, + "hashed_secret": "86865593e038509467b91c2d5f36ccc09c3f422b", "is_verified": false, "line_number": 427, "type": "Base64 High Entropy String" }, { - "hashed_secret": "1b57f49a6ee337c16ecd6aabfc0dff3b3821cd09", - "is_secret": false, + "hashed_secret": "a899a8d9e114b2a8e108f90e6a72c056db22489f", "is_verified": false, "line_number": 428, "type": "Base64 High Entropy String" }, { - "hashed_secret": "5b688158be36e8b3f265a462ed599dcf69290084", - "is_secret": false, + "hashed_secret": "756b4825f886afd83c25563ac9d45f318d695c48", "is_verified": false, "line_number": 429, "type": "Base64 High Entropy String" }, { - "hashed_secret": "965996e12c8b50b3c325d96003e8984a4ece658a", - "is_secret": false, + "hashed_secret": "89882eeb0aca97717a7e4afcf4bc08d077813c7f", "is_verified": false, "line_number": 430, "type": "Base64 High Entropy String" }, { - "hashed_secret": "584f0c58e764e948af1a35c9e60447aa0f84c6f5", - "is_secret": false, + "hashed_secret": "347140d7b7ceb4e501c3c9c2ea4f29338e2f145e", "is_verified": false, "line_number": 431, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bcaf897786d060a675ee9d654a84ae8baf96e9d0", - "is_secret": false, + "hashed_secret": "61dbf70eb10d609e60c7b87faf8f755ff48abc46", "is_verified": false, "line_number": 432, "type": "Base64 High Entropy String" }, { - "hashed_secret": "0c09277fa183e06d32065f9386a3b4190b445df3", - "is_secret": false, + "hashed_secret": "24cd54c4b2f58378bba008cb2df68ac663fba7c8", "is_verified": false, "line_number": 433, "type": "Base64 High Entropy String" }, { - "hashed_secret": "5a51be06b305d6664e4afd25f21869b0f8b5039b", - "is_secret": false, + "hashed_secret": "fa4f9626ae4b98f4b61203c5bafb6f21c9c31e5d", "is_verified": false, "line_number": 434, "type": "Base64 High Entropy String" }, { - "hashed_secret": "b38404f8853d734e3d03577b2c1084b4540c8708", - "is_secret": false, + "hashed_secret": "b1370003d9cc1e346c83dba33e0418c7775a0c15", "is_verified": false, "line_number": 435, "type": "Base64 High Entropy String" }, { - "hashed_secret": "126ccc602cffcb8292beb57137f7f6719e317b72", - "is_secret": false, + "hashed_secret": "c66526e195e423a7ba7d68ac661cdcd8600dcd1f", "is_verified": false, "line_number": 436, "type": "Base64 High Entropy String" }, { - "hashed_secret": "6681c1d7e1d327642a32cb8864ad51e4b8f981e5", - "is_secret": false, + "hashed_secret": "d29d7044f0944eb30e02cf445f6998e3343dd811", "is_verified": false, "line_number": 437, "type": "Base64 High Entropy String" }, { - "hashed_secret": "7f7b1f316ece195e5f584fe2faf6f9edc6942c6f", - "is_secret": false, + "hashed_secret": "80a869460f33722387d8d58e7d9d2e1bbd5d1fe1", + "is_verified": false, + "line_number": 438, + "type": "Base64 High Entropy String" + }, + { + "hashed_secret": "4a06e2a02cbc665adccb4162dc57836895da65b8", "is_verified": false, "line_number": 439, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bb908c7bc655057f2edc42815c5dff82e9dea529", - "is_secret": false, + "hashed_secret": "ba2549f35835dfa101d3f660f7604dc78e3e226f", "is_verified": false, "line_number": 440, "type": "Base64 High Entropy String" }, { - "hashed_secret": "bc2a0d18e3dd142df7b34e95342d47bf8aadabcb", - "is_secret": false, + "hashed_secret": "f354d4ee5fdb94ad29c7b3600264467f45b80eaa", "is_verified": false, "line_number": 441, "type": "Base64 High Entropy String" }, { - "hashed_secret": "d60f0bcea109bb6edb6e45fd387f5f2c86e49e1a", - "is_secret": false, + "hashed_secret": "bf17b587868ba7c3db9865b114261b5b8f1df870", "is_verified": false, "line_number": 442, "type": "Base64 High Entropy String" }, { - "hashed_secret": "e549dd40a741557cc1c4e377df0a141354e22688", - "is_secret": false, + "hashed_secret": "de1fd7a0d32cba528b4d80818c6601f2588d5383", "is_verified": false, "line_number": 443, "type": "Base64 High Entropy String" }, { - "hashed_secret": "2dd2486dae84cad50387c20bf687b6fbc6162b58", - "is_secret": false, + "hashed_secret": "bcad65055f6de654541db2bf27d4e27bd54d94c7", "is_verified": false, "line_number": 444, "type": "Base64 High Entropy String" }, { - "hashed_secret": "71622010fc7eb09d9273f59c548bde6a5da5dc0e", - "is_secret": false, + "hashed_secret": "f2e16f2dd532f65f79341342fdf57a093fc408d8", "is_verified": false, "line_number": 445, "type": "Base64 High Entropy String" }, { - "hashed_secret": "6f0115cf53bd49ec990c562ac6cbfc452c83cd46", - "is_secret": false, + "hashed_secret": "bb036a679a7d2df9fd2ca57068a446bf7f7dd106", "is_verified": false, "line_number": 446, "type": "Base64 High Entropy String" }, { - "hashed_secret": "70dddd534b2f9bb70871fefe0845b79c3b69363f", - "is_secret": false, + "hashed_secret": "5aa6568b1e8185578a6e964f5c322783ad349554", + "is_verified": false, + "line_number": 447, + "type": "Base64 High Entropy String" + }, + { + "hashed_secret": "4d14835ff0b0bf5aad480296cb705c74ac65f413", "is_verified": false, "line_number": 448, "type": "Base64 High Entropy String" }, { - "hashed_secret": "acf3536b0416aa99608b0be17e87655370ece829", - "is_secret": false, + "hashed_secret": "3f23f77dcf454ad73c4d61c44fd9aa584ef946c1", "is_verified": false, - "line_number": 449, + "line_number": 451, "type": "Base64 High Entropy String" }, { - "hashed_secret": "1d13ee35c7279c1fae1c6474ed47611994273e41", - "is_secret": false, + "hashed_secret": "1739fe5e5dfcf851b64f8b7b11538f1de29ce0b5", "is_verified": false, - "line_number": 450, + "line_number": 452, "type": "Base64 High Entropy String" }, { - "hashed_secret": "d38cf89b25bd7378cdb4e00b4b59293001dd500b", - "is_secret": false, + "hashed_secret": "8129db302110714fc735e3494bd82a65690e0963", "is_verified": false, - "line_number": 451, + "line_number": 453, "type": "Base64 High Entropy String" }, { - "hashed_secret": "1648f34ce2f1b563a8ed1c6d5d55b5e76a395903", - "is_secret": false, + "hashed_secret": "b48bfc62091164086a703115a0e68bdb09212591", "is_verified": false, - "line_number": 452, + "line_number": 454, "type": "Base64 High Entropy String" }, { - "hashed_secret": "9bf63f6f49fb01ff80959bc5a60c8688df92cc02", - "is_secret": false, + "hashed_secret": "a10284feaf27f84081073a3267e3dce24ca7b911", "is_verified": false, - "line_number": 453, + "line_number": 455, + "type": "Base64 High Entropy String" + }, + { + "hashed_secret": "3fd80f31de4be8dde9d2b421e832c7d4043fd49a", + "is_verified": false, + "line_number": 456, "type": "Base64 High Entropy String" } ], "kube/services/jobs/indexd-authz-job.yaml": [ { "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", - "is_secret": false, "is_verified": false, "line_number": 87, "type": "Basic Auth Credentials" @@ -1191,14 +1091,12 @@ "kube/services/monitoring/grafana-values.yaml": [ { "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385", - "is_secret": false, "is_verified": false, "line_number": 162, "type": "Secret Keyword" }, { "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4", - "is_secret": false, "is_verified": false, "line_number": 166, "type": "Secret Keyword" @@ -1207,7 +1105,6 @@ "kube/services/revproxy/helpers.js": [ { "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", - "is_secret": false, "is_verified": false, "line_number": 10, "type": "Base64 High Entropy String" @@ -1216,7 +1113,6 @@ "kube/services/revproxy/helpersTest.js": [ { "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", - "is_secret": false, "is_verified": false, "line_number": 22, "type": "JSON Web Token" @@ -1225,7 +1121,6 @@ "kube/services/superset/superset-deploy.yaml": [ { "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7", - "is_secret": false, "is_verified": false, "line_number": 38, "type": "Secret Keyword" @@ -1234,14 +1129,12 @@ "kube/services/superset/superset-redis.yaml": [ { "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", - "is_secret": false, "is_verified": false, "line_number": 165, "type": "Secret Keyword" }, { "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9", - "is_secret": false, "is_verified": false, "line_number": 265, "type": "Secret Keyword" @@ -1250,35 +1143,30 @@ "kube/services/superset/values.yaml": [ { "hashed_secret": "6f803b24314c39062efe38d0c1da8c472f47eab3", - "is_secret": false, "is_verified": false, "line_number": 54, "type": "Secret Keyword" }, { "hashed_secret": "6eae3a5b062c6d0d79f070c26e6d62486b40cb46", - "is_secret": false, "is_verified": false, "line_number": 86, "type": "Secret Keyword" }, { "hashed_secret": "3eb416223e9e69e6bb8ee19793911ad1ad2027d8", - "is_secret": false, "is_verified": false, "line_number": 212, "type": "Secret Keyword" }, { "hashed_secret": "ff55435345834a3fe224936776c2aa15f6ed5358", - "is_secret": false, "is_verified": false, "line_number": 396, "type": "Secret Keyword" }, { "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", - "is_secret": false, "is_verified": false, "line_number": 503, "type": "Secret Keyword" @@ -1287,280 +1175,240 @@ "package-lock.json": [ { "hashed_secret": "0656ad0df3af4633dc369f13d5e8806973c5fd9d", - "is_secret": false, "is_verified": false, "line_number": 1481, "type": "Base64 High Entropy String" }, { "hashed_secret": "00091d875d922437c5fc9e6067a08e78c2482e87", - "is_secret": false, "is_verified": false, "line_number": 1489, "type": "Base64 High Entropy String" }, { "hashed_secret": "c4e5cc37e115bf7d86e76e3d799705bf691e4d00", - "is_secret": false, "is_verified": false, "line_number": 1521, "type": "Base64 High Entropy String" }, { "hashed_secret": "0512e37fbedf1d16828680a038a241b4780a5c04", - "is_secret": false, "is_verified": false, "line_number": 1547, "type": "Base64 High Entropy String" }, { "hashed_secret": "01868fd50edbfe6eb91e5b01209b543adc6857af", - "is_secret": false, "is_verified": false, "line_number": 1611, "type": "Base64 High Entropy String" }, { "hashed_secret": "a6f48bf1e398deffc7fd31da17c3506b46c97a93", - "is_secret": false, "is_verified": false, "line_number": 1640, "type": "Base64 High Entropy String" }, { "hashed_secret": "85ce358dbdec0996cf3ccd2bf1c6602af68c181e", - "is_secret": false, "is_verified": false, "line_number": 1648, "type": "Base64 High Entropy String" }, { "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", - "is_secret": false, "is_verified": false, "line_number": 1664, "type": "Base64 High Entropy String" }, { "hashed_secret": "7098a3e6d6d2ec0a40f04fe12509c5c6f4c49c0e", - "is_secret": false, "is_verified": false, "line_number": 1683, "type": "Base64 High Entropy String" }, { "hashed_secret": "1664ad175bba1795a7ecad572bae7e0740b94f56", - "is_secret": false, "is_verified": false, "line_number": 1733, "type": "Base64 High Entropy String" }, { "hashed_secret": "1ec4ce2eb945ce2f816dcb6ebdd1e10247f439a3", - "is_secret": false, "is_verified": false, "line_number": 1742, "type": "Base64 High Entropy String" }, { "hashed_secret": "a7af5768a6d936e36f28e1030d7f894d7aaf555e", - "is_secret": false, "is_verified": false, "line_number": 1755, "type": "Base64 High Entropy String" }, { "hashed_secret": "6fbc7dd864586173160874f2a86ca7d2d552cb85", - "is_secret": false, "is_verified": false, "line_number": 1769, "type": "Base64 High Entropy String" }, { "hashed_secret": "81a961f2c89c6209328b74a8768e30fd76c3ac72", - "is_secret": false, "is_verified": false, "line_number": 1855, "type": "Base64 High Entropy String" }, { "hashed_secret": "797d4751c536c421cb82b9f62e0a804af30d78f5", - "is_secret": false, "is_verified": false, "line_number": 1889, "type": "Base64 High Entropy String" }, { "hashed_secret": "0d55babfa89f240142c0adfc7b560500a1d3ae7c", - "is_secret": false, "is_verified": false, "line_number": 1894, "type": "Base64 High Entropy String" }, { "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", - "is_secret": false, "is_verified": false, "line_number": 1921, "type": "Base64 High Entropy String" }, { "hashed_secret": "4cf9419259c0ce8eee84b468af3c72db8b001620", - "is_secret": false, "is_verified": false, "line_number": 1950, "type": "Base64 High Entropy String" }, { "hashed_secret": "24816e3eb4308e247bde7c1d09ffb7b79c519b71", - "is_secret": false, "is_verified": false, "line_number": 1983, "type": "Base64 High Entropy String" }, { "hashed_secret": "e9adfe8a333d45f4776fe0eab31608be5d7b6a7d", - "is_secret": false, "is_verified": false, "line_number": 2004, "type": "Base64 High Entropy String" }, { "hashed_secret": "03d6fb388dd1b185129b14221f7127715822ece6", - "is_secret": false, "is_verified": false, "line_number": 2013, "type": "Base64 High Entropy String" }, { "hashed_secret": "ee161bb3f899720f95cee50a5f9ef9c9ed96278b", - "is_secret": false, "is_verified": false, "line_number": 2046, "type": "Base64 High Entropy String" }, { "hashed_secret": "ebeb5b574fa1ed24a40248275e6136759e766466", - "is_secret": false, "is_verified": false, "line_number": 2078, "type": "Base64 High Entropy String" }, { "hashed_secret": "a6a555a428522ccf439fd516ce7c7e269274363f", - "is_secret": false, "is_verified": false, "line_number": 2083, "type": "Base64 High Entropy String" }, { "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", - "is_secret": false, "is_verified": false, "line_number": 2111, "type": "Base64 High Entropy String" }, { "hashed_secret": "3f1646b60abe74297d2f37a1eee5dc771ad834fc", - "is_secret": false, "is_verified": false, "line_number": 2138, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd933c71e82d5519ae0cb0779b370d02f6935759", - "is_secret": false, "is_verified": false, "line_number": 2143, "type": "Base64 High Entropy String" }, { "hashed_secret": "7090aa59cb52ad1f1810b08c4ac1ddf5c8fce523", - "is_secret": false, "is_verified": false, "line_number": 2150, "type": "Base64 High Entropy String" }, { "hashed_secret": "756444bea4ea3d67844d8ddf58ad32356e9c2430", - "is_secret": false, "is_verified": false, "line_number": 2188, "type": "Base64 High Entropy String" }, { "hashed_secret": "f74135fdd6b8dafdfb01ebbc61c5e5c24ee27cf8", - "is_secret": false, "is_verified": false, "line_number": 2291, "type": "Base64 High Entropy String" }, { "hashed_secret": "56fbae787f4aed7d0632e95840d71bd378d3a36f", - "is_secret": false, "is_verified": false, "line_number": 2303, "type": "Base64 High Entropy String" }, { "hashed_secret": "81cb6be182eb79444202c4563080aee75296a672", - "is_secret": false, "is_verified": false, "line_number": 2308, "type": "Base64 High Entropy String" }, { "hashed_secret": "f0f3f7bce32184893046ac5f8cc80da56c3ca539", - "is_secret": false, "is_verified": false, "line_number": 2317, "type": "Base64 High Entropy String" }, { "hashed_secret": "097893233346336f4003acfb6eb173ee59e648f0", - "is_secret": false, "is_verified": false, "line_number": 2327, "type": "Base64 High Entropy String" }, { "hashed_secret": "bb14c3b4ef4a9f2e86ffdd44b88d9b6729419671", - "is_secret": false, "is_verified": false, "line_number": 2332, "type": "Base64 High Entropy String" }, { "hashed_secret": "71344a35cff67ef081920095d1406601fb5e9b97", - "is_secret": false, "is_verified": false, "line_number": 2340, "type": "Base64 High Entropy String" }, { "hashed_secret": "eb3db6990fd43477a35dfeffc90b3f1ffa83c7bd", - "is_secret": false, "is_verified": false, "line_number": 2349, "type": "Base64 High Entropy String" }, { "hashed_secret": "266288bdc14807b538d1e48a5891e361fa9b4a14", - "is_secret": false, "is_verified": false, "line_number": 2357, "type": "Base64 High Entropy String" }, { "hashed_secret": "800477261175fd21f23e7321923e1fba6ae55471", - "is_secret": false, "is_verified": false, "line_number": 2369, "type": "Base64 High Entropy String" }, { "hashed_secret": "3f0c251b9c2c21454445a98fde6915ceacde2136", - "is_secret": false, "is_verified": false, "line_number": 2387, "type": "Base64 High Entropy String" @@ -1569,7 +1417,6 @@ "tf_files/aws/cognito/README.md": [ { "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70", - "is_secret": false, "is_verified": false, "line_number": 106, "type": "Secret Keyword" @@ -1578,14 +1425,12 @@ "tf_files/aws/commons/README.md": [ { "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed", - "is_secret": false, "is_verified": false, "line_number": 60, "type": "Secret Keyword" }, { "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e", - "is_secret": false, "is_verified": false, "line_number": 78, "type": "Secret Keyword" @@ -1594,7 +1439,6 @@ "tf_files/aws/eks/sample.tfvars": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, "line_number": 107, "type": "Hex High Entropy String" @@ -1603,7 +1447,6 @@ "tf_files/aws/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, "line_number": 133, "type": "Hex High Entropy String" @@ -1612,14 +1455,12 @@ "tf_files/aws/modules/common-logging/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, "is_verified": false, "line_number": 57, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, "is_verified": false, "line_number": 59, "type": "Hex High Entropy String" @@ -1628,28 +1469,24 @@ "tf_files/aws/modules/common-logging/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -1658,21 +1495,18 @@ "tf_files/aws/modules/common-logging/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" @@ -1681,7 +1515,6 @@ "tf_files/aws/modules/eks/variables.tf": [ { "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", - "is_secret": false, "is_verified": false, "line_number": 113, "type": "Hex High Entropy String" @@ -1690,14 +1523,12 @@ "tf_files/aws/modules/management-logs/README.md": [ { "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", - "is_secret": false, "is_verified": false, "line_number": 54, "type": "Base64 High Entropy String" }, { "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_secret": false, "is_verified": false, "line_number": 56, "type": "Hex High Entropy String" @@ -1706,28 +1537,24 @@ "tf_files/aws/modules/management-logs/lambda_function.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 18, "type": "Hex High Entropy String" }, { "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", - "is_secret": false, "is_verified": false, "line_number": 30, "type": "Hex High Entropy String" @@ -1736,42 +1563,36 @@ "tf_files/aws/modules/management-logs/testLambda.py": [ { "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Base64 High Entropy String" }, { "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", - "is_secret": false, "is_verified": false, "line_number": 5, "type": "Hex High Entropy String" }, { "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Base64 High Entropy String" }, { "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" }, { "hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4", - "is_secret": false, "is_verified": false, "line_number": 6, "type": "Hex High Entropy String" @@ -1780,7 +1601,6 @@ "tf_files/aws/slurm/README.md": [ { "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", - "is_secret": false, "is_verified": false, "line_number": 83, "type": "Secret Keyword" @@ -1789,7 +1609,6 @@ "tf_files/azure/cloud.tf": [ { "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4", - "is_secret": false, "is_verified": false, "line_number": 424, "type": "Secret Keyword" @@ -1798,7 +1617,6 @@ "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -1807,7 +1625,6 @@ "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, "line_number": 231, "type": "Secret Keyword" @@ -1816,7 +1633,6 @@ "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [ { "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", - "is_secret": false, "is_verified": false, "line_number": 262, "type": "Secret Keyword" @@ -1825,21 +1641,18 @@ "tf_files/gcp/commons/sample.tfvars": [ { "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", - "is_secret": false, "is_verified": false, "line_number": 11, "type": "Secret Keyword" }, { "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", - "is_secret": false, "is_verified": false, "line_number": 26, "type": "Secret Keyword" }, { "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105", - "is_secret": false, "is_verified": false, "line_number": 37, "type": "Secret Keyword" @@ -1848,7 +1661,6 @@ "tf_files/shared/modules/k8s_configs/creds.tpl": [ { "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", - "is_secret": false, "is_verified": false, "line_number": 8, "type": "Secret Keyword" diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 71575e3c5..c54f9d5aa 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -1,4 +1,5 @@ import argparse +import copy import json import sys import requests @@ -40,6 +41,16 @@ "study_metadata.human_subject_applicability.gender_applicability": "Not applicable" } +# repository links +REPOSITORY_STUDY_ID_LINK_TEMPLATE = { + "NIDDK Central": "https://repository.niddk.nih.gov/studies//", + "NIDA Data Share": "https://datashare.nida.nih.gov/study/", + "NICHD DASH": "https://dash.nichd.nih.gov/study/", + "ICPSR": "https://www.icpsr.umich.edu/web/ICPSR/studies/", + "BioSystics-AP": "https://biosystics-ap.com/assays/assaystudy//", +} + + def is_valid_uuid(uuid_to_test, version=4): """ Check if uuid_to_test is a valid UUID. @@ -114,6 +125,31 @@ def get_client_token(client_id: str, client_secret: str): return token +def get_related_studies(serial_num, hostname): + related_study_result = [] + + if serial_num: + mds = requests.get(f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000") + if mds.status_code == 200: + related_study_metadata = mds.json() + + for ( + related_study_metadata_key, + related_study_metadata_value, + ) in related_study_metadata.items(): + title = ( + related_study_metadata_value.get( + "gen3_discovery", {} + ) + .get("study_metadata", {}) + .get("minimal_info", {}) + .get("study_name", "") + ) + link = f"https://{hostname}/portal/discovery/{related_study_metadata_key}/" + related_study_result.append({"title": title, "link": link}) + return related_study_result + + parser = argparse.ArgumentParser() parser.add_argument("--directory", help="CEDAR Directory ID for registering ") @@ -214,6 +250,71 @@ def get_client_token(client_id: str, client_secret: str): mds_res["gen3_discovery"]["study_metadata"].update(cedar_record) mds_res["gen3_discovery"]["study_metadata"]["metadata_location"]["other_study_websites"] = cedar_record_other_study_websites + # setup citations + doi_citation = mds_res["gen3_discovery"]["study_metadata"].get("doi_citation", "") + mds_res["gen3_discovery"]["study_metadata"]["citation"]["heal_platform_citation"] = doi_citation + + + # setup repository_study_link + data_repositories = ( + mds_res + .get("gen3_discovery", {}) + .get("study_metadata", {}) + .get("metadata_location", {}) + .get("data_repositories", []) + ) + repository_citation = "Users must also include a citation to the data as specified by the local repository." + repository_citation_additional_text = ' The link to the study page at the local repository can be found in the "Data" tab.' + for repository in data_repositories: + if ( + repository["repository_name"] + and repository["repository_name"] + in REPOSITORY_STUDY_ID_LINK_TEMPLATE + and repository["repository_study_ID"] + ): + repository_study_link = REPOSITORY_STUDY_ID_LINK_TEMPLATE[ + repository["repository_name"] + ].replace("", repository["repository_study_ID"]) + repository.update({"repository_study_link": repository_study_link}) + if repository_citation_additional_text not in repository_citation: + repository_citation += repository_citation_additional_text + if len(data_repositories): + data_repositories[0] = { + **data_repositories[0], + "repository_citation": repository_citation, + } + + mds_res["gen3_discovery"]["study_metadata"][ + "metadata_location" + ]["data_repositories"] = copy.deepcopy(data_repositories) + + + + # set up related studies + serial_num = None + try: + serial_num = ( + mds_res + .get("nih_reporter", {}) + .get("project_num_split", {}) + .get("serial_num", None) + ) + except Exception: + print(f"Unable to get serial number for study") + + if serial_num == None: + print(f"Unable to get serial number for study") + + related_study_result = get_related_studies(serial_num, hostname) + existing_related_study_result = mds_res.get("related_studies", []) + for related_study in related_study_result: + if related_study not in existing_related_study_result: + existing_related_study_result.append(copy.deepcopy(related_study)) + mds_res["gen3_discovery"][ + "related_studies" + ] = copy.deepcopy(existing_related_study_result) + + # merge data from cedar that is not study level metadata into a level higher deleted_keys = [] for key, value in mds_res["gen3_discovery"]["study_metadata"].items(): From 99fc77ac5ee4a36443a9b802d903e6ffaab6f8c7 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Thu, 7 Mar 2024 17:32:40 -0600 Subject: [PATCH 307/362] MIDRC-602 Add ecr-access job (#2480) --- .pre-commit-config.yaml | 2 +- .secrets.baseline | 3792 +++++++++++++---- files/scripts/ecr-access-job-requirements.txt | 1 + files/scripts/ecr-access-job.md | 85 + files/scripts/ecr-access-job.py | 177 + gen3/bin/kube-setup-ecr-access-cronjob.sh | 61 + kube/services/jobs/ecr-access-job.yaml | 85 + 7 files changed, 3339 insertions(+), 864 deletions(-) create mode 100644 files/scripts/ecr-access-job-requirements.txt create mode 100644 files/scripts/ecr-access-job.md create mode 100644 files/scripts/ecr-access-job.py create mode 100644 gen3/bin/kube-setup-ecr-access-cronjob.sh create mode 100644 kube/services/jobs/ecr-access-job.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2e3ce795b..82034495d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: git@github.com:Yelp/detect-secrets - rev: v0.13.1 + rev: v1.4.0 hooks: - id: detect-secrets args: ['--baseline', '.secrets.baseline'] diff --git a/.secrets.baseline b/.secrets.baseline index 200b69841..0c4eba0a8 100644 --- a/.secrets.baseline +++ b/.secrets.baseline @@ -1,19 +1,18 @@ { - "exclude": { - "files": null, - "lines": null - }, - "generated_at": "2024-03-04T21:42:56Z", + "version": "1.4.0", "plugins_used": [ + { + "name": "ArtifactoryDetector" + }, { "name": "AWSKeyDetector" }, { - "name": "ArtifactoryDetector" + "name": "AzureStorageKeyDetector" }, { - "base64_limit": 4.5, - "name": "Base64HighEntropyString" + "name": "Base64HighEntropyString", + "limit": 4.5 }, { "name": "BasicAuthDetector" @@ -22,8 +21,14 @@ "name": "CloudantDetector" }, { - "hex_limit": 3, - "name": "HexHighEntropyString" + "name": "DiscordBotTokenDetector" + }, + { + "name": "GitHubTokenDetector" + }, + { + "name": "HexHighEntropyString", + "limit": 3.0 }, { "name": "IbmCloudIamDetector" @@ -35,21 +40,30 @@ "name": "JwtTokenDetector" }, { - "keyword_exclude": null, - "name": "KeywordDetector" + "name": "KeywordDetector", + "keyword_exclude": "" }, { "name": "MailchimpDetector" }, + { + "name": "NpmDetector" + }, { "name": "PrivateKeyDetector" }, + { + "name": "SendGridDetector" + }, { "name": "SlackDetector" }, { "name": "SoftlayerDetector" }, + { + "name": "SquareOAuthDetector" + }, { "name": "StripeDetector" }, @@ -57,1619 +71,3671 @@ "name": "TwilioKeyDetector" } ], + "filters_used": [ + { + "path": "detect_secrets.filters.allowlist.is_line_allowlisted" + }, + { + "path": "detect_secrets.filters.common.is_baseline_file", + "filename": ".secrets.baseline" + }, + { + "path": "detect_secrets.filters.common.is_ignored_due_to_verification_policies", + "min_level": 2 + }, + { + "path": "detect_secrets.filters.heuristic.is_indirect_reference" + }, + { + "path": "detect_secrets.filters.heuristic.is_likely_id_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_lock_file" + }, + { + "path": "detect_secrets.filters.heuristic.is_not_alphanumeric_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_potential_uuid" + }, + { + "path": "detect_secrets.filters.heuristic.is_prefixed_with_dollar_sign" + }, + { + "path": "detect_secrets.filters.heuristic.is_sequential_string" + }, + { + "path": "detect_secrets.filters.heuristic.is_swagger_file" + }, + { + "path": "detect_secrets.filters.heuristic.is_templated_secret" + } + ], "results": { "Chef/repo/data_bags/README.md": [ { - "hashed_secret": "8a9250639e092d90f164792e35073a9395bff366", - "is_verified": false, - "line_number": 45, - "type": "Secret Keyword" - }, - { + "type": "Secret Keyword", + "filename": "Chef/repo/data_bags/README.md", "hashed_secret": "6367c48dd193d56ea7b0baad25b19455e529f5ee", "is_verified": false, - "line_number": 51, - "type": "Secret Keyword" + "line_number": 38 } ], - "Docker/jenkins/Jenkins-CI-Worker/Dockerfile": [ + "Docker/sidecar/service.key": [ { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "type": "Private Key", + "filename": "Docker/sidecar/service.key", + "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", "is_verified": false, - "line_number": 124, - "type": "Secret Keyword" + "line_number": 1 } ], - "Docker/jenkins/Jenkins-Worker/Dockerfile": [ + "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "type": "Secret Keyword", + "filename": "Jenkins/Stacks/Jenkins/jenkins.env.sample", + "hashed_secret": "f41a52528dd2d592d2c05de5f388101c2948aa98", "is_verified": false, - "line_number": 139, - "type": "Secret Keyword" + "line_number": 5 } ], - "Docker/jenkins/Jenkins/Dockerfile": [ + "Jenkinsfile": [ { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "type": "Secret Keyword", + "filename": "Jenkinsfile", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", "is_verified": false, - "line_number": 107, - "type": "Secret Keyword" - } - ], - "Docker/jenkins/Jenkins2/Dockerfile": [ + "line_number": 144 + }, { - "hashed_secret": "10daf3a26c6a17242a5ab2438a12ebc8276c7603", + "type": "Secret Keyword", + "filename": "Jenkinsfile", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 108, - "type": "Secret Keyword" + "line_number": 147 } ], - "Docker/sidecar/service.key": [ + "ansible/roles/slurm/README.md": [ { - "hashed_secret": "1348b145fa1a555461c1b790a2f66614781091e9", + "type": "Base64 High Entropy String", + "filename": "ansible/roles/slurm/README.md", + "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", "is_verified": false, - "line_number": 1, - "type": "Private Key" + "line_number": 86 } ], - "Jenkins/Stacks/Jenkins/jenkins.env.sample": [ + "apis_configs/fence_settings.py": [ { - "hashed_secret": "eecee33686ac5861c2a7edc8b46bd0e5432bfddd", + "type": "Basic Auth Credentials", + "filename": "apis_configs/fence_settings.py", + "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_verified": false, - "line_number": 5, - "type": "Secret Keyword" + "line_number": 80 } ], - "ansible/roles/awslogs/defaults/main.yaml": [ + "apis_configs/peregrine_settings.py": [ { - "hashed_secret": "9d4e1e23bd5b727046a9e3b4b7db57bd8d6ee684", + "type": "Basic Auth Credentials", + "filename": "apis_configs/peregrine_settings.py", + "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_verified": false, - "line_number": 30, - "type": "Basic Auth Credentials" + "line_number": 46 } ], - "ansible/roles/slurm/README.md": [ - { - "hashed_secret": "4acfde1ff9c353ba2ef0dbe0df73bda2743cba42", - "is_verified": false, - "line_number": 86, - "type": "Base64 High Entropy String" - }, + "apis_configs/sheepdog_settings.py": [ { - "hashed_secret": "579649582303921502d9e6d3f8755f13fdd2b476", + "type": "Basic Auth Credentials", + "filename": "apis_configs/sheepdog_settings.py", + "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", "is_verified": false, - "line_number": 86, - "type": "Secret Keyword" + "line_number": 46 } ], - "apis_configs/config_helper.py": [ + "aws-inspec/kubernetes/chef_inspec-cron.yaml": [ { - "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", + "type": "Secret Keyword", + "filename": "aws-inspec/kubernetes/chef_inspec-cron.yaml", + "hashed_secret": "a3ba27250861948a554629a0e21168821ddfa9f1", "is_verified": false, - "line_number": 66, - "type": "Basic Auth Credentials" + "line_number": 35 } ], - "apis_configs/fence_credentials.json": [ + "doc/api.md": [ { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "type": "Hex High Entropy String", + "filename": "doc/api.md", + "hashed_secret": "625de83a7517422051911680cc803921ff99db90", "is_verified": false, - "line_number": 23, - "type": "Secret Keyword" + "line_number": 47 } ], - "apis_configs/fence_settings.py": [ + "doc/gen3OnK8s.md": [ { - "hashed_secret": "3ef0fb8a603abdc0b6caac44a23fdc6792f77ddf", + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "55c100ba37d2df35ec1e5f5d6302f060387df6cc", "is_verified": false, - "line_number": 6, - "type": "Basic Auth Credentials" + "line_number": 113 }, { - "hashed_secret": "b60d121b438a380c343d5ec3c2037564b82ffef3", + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "262d8e9b8ac5f06e7612dfb608f7267f88679801", "is_verified": false, - "line_number": 58, - "type": "Secret Keyword" + "line_number": 120 }, { - "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "1c17e556736c4d23933f99d199e7c2c572895fd2", + "is_verified": false, + "line_number": 143 + }, + { + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "76a4acaf31b815aa2c41cc2a2176b11fa9edf00a", + "is_verified": false, + "line_number": 145 + }, + { + "type": "Secret Keyword", + "filename": "doc/gen3OnK8s.md", + "hashed_secret": "9d678cbce5a343920f754d5836f03346ee01cde5", "is_verified": false, - "line_number": 80, - "type": "Basic Auth Credentials" + "line_number": 154 } ], - "apis_configs/indexd_settings.py": [ + "files/scripts/psql-fips-fix.sh": [ { - "hashed_secret": "0a0d18c85e096611b5685b62bc60ec534d19bacc", + "type": "Secret Keyword", + "filename": "files/scripts/psql-fips-fix.sh", + "hashed_secret": "2f1aa1e2a58704b452a5dd60ab1bd2b761bf296a", "is_verified": false, - "line_number": 59, - "type": "Basic Auth Credentials" + "line_number": 9 } ], - "apis_configs/peregrine_settings.py": [ + "gen3/bin/bucket-manifest.sh": [ { - "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", + "type": "Secret Keyword", + "filename": "gen3/bin/bucket-manifest.sh", + "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833", "is_verified": false, - "line_number": 46, - "type": "Basic Auth Credentials" + "line_number": 58 } ], - "apis_configs/sheepdog_settings.py": [ + "gen3/bin/bucket-replicate.sh": [ { - "hashed_secret": "347cd9c53ff77d41a7b22aa56c7b4efaf54658e3", + "type": "Secret Keyword", + "filename": "gen3/bin/bucket-replicate.sh", + "hashed_secret": "2be88ca4242c76e8253ac62474851065032d6833", "is_verified": false, - "line_number": 46, - "type": "Basic Auth Credentials" + "line_number": 39 } ], - "doc/Gen3-data-upload.md": [ + "gen3/bin/secrets.sh": [ { - "hashed_secret": "b8bd20d4a2701dc3aba0efbbf325f1359392d93e", + "type": "Secret Keyword", + "filename": "gen3/bin/secrets.sh", + "hashed_secret": "fb6220478aaba649aac37271a1d7c6317abc03a6", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 135 } ], - "doc/api.md": [ + "gen3/lib/aws.sh": [ { - "hashed_secret": "625de83a7517422051911680cc803921ff99db90", + "type": "Secret Keyword", + "filename": "gen3/lib/aws.sh", + "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", "is_verified": false, - "line_number": 47, - "type": "Hex High Entropy String" + "line_number": 640 } ], - "doc/gen3OnK8s.md": [ + "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ { - "hashed_secret": "2db6d21d365f544f7ca3bcfb443ac96898a7a069", + "type": "Basic Auth Credentials", + "filename": "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml", + "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", "is_verified": false, - "line_number": 113, - "type": "Secret Keyword" - }, + "line_number": 33 + } + ], + "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json": [ { - "hashed_secret": "ff9ee043d85595eb255c05dfe32ece02a53efbb2", + "type": "Secret Keyword", + "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json", + "hashed_secret": "0447a636536df0264b2000403fbefd69f603ceb1", "is_verified": false, - "line_number": 143, - "type": "Secret Keyword" + "line_number": 54 }, { - "hashed_secret": "70374248fd7129088fef42b8f568443f6dce3a48", + "type": "Secret Keyword", + "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 170, - "type": "Secret Keyword" + "line_number": 60 }, { - "hashed_secret": "bcf22dfc6fb76b7366b1f1675baf2332a0e6a7ce", + "type": "Secret Keyword", + "filename": "gen3/lib/bootstrap/templates/cdis-manifest/manifests/sower/sower.json", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 189, - "type": "Secret Keyword" + "line_number": 108 } ], - "doc/kube-setup-data-ingestion-job.md": [ + "gen3/lib/onprem.sh": [ { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "type": "Secret Keyword", + "filename": "gen3/lib/onprem.sh", + "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", "is_verified": false, - "line_number": 30, - "type": "Secret Keyword" - } - ], - "doc/logs.md": [ + "line_number": 68 + }, { - "hashed_secret": "9addbf544119efa4a64223b649750a510f0d463f", + "type": "Secret Keyword", + "filename": "gen3/lib/onprem.sh", + "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", "is_verified": false, - "line_number": 6, - "type": "Secret Keyword" + "line_number": 84 } ], - "doc/slurm_cluster.md": [ + "gen3/lib/testData/default/expectedFenceResult.yaml": [ { - "hashed_secret": "2ace62c1befa19e3ea37dd52be9f6d508c5163e6", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 184, - "type": "Secret Keyword" - } - ], - "files/dashboard/usage-reports/package-lock.json": [ + "line_number": 68 + }, { - "hashed_secret": "e095101882f706c4de95e0f75c5bcb9666e3f448", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" + "line_number": 71 }, { - "hashed_secret": "5422e4f96964d5739998b25ac214520c1b113e5b", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 15, - "type": "Base64 High Entropy String" - } - ], - "gen3/bin/api.sh": [ + "line_number": 74 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 407, - "type": "Secret Keyword" + "line_number": 84 }, { - "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 477, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-dev-namespace.sh": [ + "line_number": 87 + }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 135, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-argo.sh": [ + "line_number": 90 + }, { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 206, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-aurora-monitoring.sh": [ + "line_number": 93 + }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", "is_verified": false, - "line_number": 59, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-certs.sh": [ + "line_number": 96 + }, { - "hashed_secret": "2e9ee120fd25e31048598693aca91d5473898a99", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedFenceResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 50, - "type": "Secret Keyword" + "line_number": 99 } ], - "gen3/bin/kube-setup-dashboard.sh": [ + "gen3/lib/testData/default/expectedSheepdogResult.yaml": [ { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", "is_verified": false, - "line_number": 40, - "type": "Secret Keyword" + "line_number": 60 }, { - "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 41, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-data-ingestion-job.sh": [ + "line_number": 63 + }, { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 37, - "type": "Secret Keyword" + "line_number": 69 }, { - "hashed_secret": "8695a632956b1b0ea7b66993dcc98732da39148c", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/default/expectedSheepdogResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 102, - "type": "Secret Keyword" + "line_number": 72 } ], - "gen3/bin/kube-setup-dicom-server.sh": [ + "gen3/lib/testData/etlconvert/expected2.yaml": [ { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", "is_verified": false, - "line_number": 43, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-dicom.sh": [ + "line_number": 10 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", + "is_verified": false, + "line_number": 13 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", + "is_verified": false, + "line_number": 16 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", + "is_verified": false, + "line_number": 18 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", + "is_verified": false, + "line_number": 33 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", + "is_verified": false, + "line_number": 35 + }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/expected2.yaml", + "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", "is_verified": false, - "line_number": 78, - "type": "Secret Keyword" + "line_number": 36 } ], - "gen3/bin/kube-setup-gen3-discovery-ai.sh": [ + "gen3/lib/testData/etlconvert/users2.yaml": [ + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", + "is_verified": false, + "line_number": 543 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", + "is_verified": false, + "line_number": 553 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", + "is_verified": false, + "line_number": 558 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", + "is_verified": false, + "line_number": 568 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", + "is_verified": false, + "line_number": 643 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", "is_verified": false, - "line_number": 37, - "type": "Secret Keyword" + "line_number": 653 }, { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "type": "Base64 High Entropy String", + "filename": "gen3/lib/testData/etlconvert/users2.yaml", + "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", "is_verified": false, - "line_number": 71, - "type": "Secret Keyword" + "line_number": 658 } ], - "gen3/bin/kube-setup-jenkins.sh": [ + "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 77 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 87 + }, { - "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 18, - "type": "Secret Keyword" + "line_number": 90 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 22, - "type": "Secret Keyword" + "line_number": 93 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 99 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedFenceResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 102 } ], - "gen3/bin/kube-setup-jenkins2.sh": [ + "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml": [ + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", + "is_verified": false, + "line_number": 63 + }, { - "hashed_secret": "05ea760643a5c0a9bacb3544dc844ac79938a51f", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 18, - "type": "Secret Keyword" + "line_number": 66 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 22, - "type": "Secret Keyword" + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "gen3/lib/testData/test1.manifest.g3k/expectedSheepdogResult.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 75 } ], - "gen3/bin/kube-setup-metadata.sh": [ + "gen3/test/secretsTest.sh": [ { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Secret Keyword", + "filename": "gen3/test/secretsTest.sh", + "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", "is_verified": false, - "line_number": 35, - "type": "Secret Keyword" + "line_number": 25 } ], - "gen3/bin/kube-setup-revproxy.sh": [ + "gen3/test/terraformTest.sh": [ { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", "is_verified": false, - "line_number": 38, - "type": "Secret Keyword" + "line_number": 156 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "d869db7fe62fb07c25a0403ecaea55031744b5fb", "is_verified": false, - "line_number": 55, - "type": "Secret Keyword" + "line_number": 163 }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Base64 High Entropy String", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", "is_verified": false, - "line_number": 57, - "type": "Secret Keyword" - } - ], - "gen3/bin/kube-setup-secrets.sh": [ + "line_number": 172 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", + "is_verified": false, + "line_number": 172 + }, + { + "type": "Base64 High Entropy String", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", + "is_verified": false, + "line_number": 175 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", + "is_verified": false, + "line_number": 175 + }, + { + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "212e1d3823c8c9af9e4c0c172164ee292b9a6768", + "is_verified": false, + "line_number": 311 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "cb80dbb67a1a5bdf4957eea1473789f1c65357c6", "is_verified": false, - "line_number": 79, - "type": "Secret Keyword" + "line_number": 312 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "5f35c25f4bf588b5fad46e249fcd9221f5257ce4", "is_verified": false, - "line_number": 82, - "type": "Secret Keyword" + "line_number": 313 }, { - "hashed_secret": "6f7531b95bbc99ac25a5cc82edb825f319c5dee8", + "type": "Secret Keyword", + "filename": "gen3/test/terraformTest.sh", + "hashed_secret": "5308421b43dde5775f1993bd25a8163070d65598", "is_verified": false, - "line_number": 95, - "type": "Secret Keyword" + "line_number": 314 } ], - "gen3/bin/kube-setup-sftp.sh": [ + "kube/services/access-backend/access-backend-deploy.yaml": [ { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Secret Keyword", + "filename": "kube/services/access-backend/access-backend-deploy.yaml", + "hashed_secret": "dbf88a0c3d905c669c0fd13bf8172bb34d4b1168", "is_verified": false, - "line_number": 36, - "type": "Secret Keyword" - }, + "line_number": 60 + } + ], + "kube/services/acronymbot/acronymbot-deploy.yaml": [ { - "hashed_secret": "83d11e3aec005a3b9a2077c6800683e202a95af4", + "type": "Secret Keyword", + "filename": "kube/services/acronymbot/acronymbot-deploy.yaml", + "hashed_secret": "600833390a6b9891d0d8a5f6e3326abb237ac8ca", "is_verified": false, - "line_number": 51, - "type": "Secret Keyword" + "line_number": 49 } ], - "gen3/bin/kube-setup-sheepdog.sh": [ + "kube/services/arborist/arborist-deploy-2.yaml": [ { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy-2.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", "is_verified": false, - "line_number": 33, - "type": "Secret Keyword" + "line_number": 59 } ], - "gen3/bin/kube-setup-sower-jobs.sh": [ + "kube/services/arborist/arborist-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 64 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 70 + }, { - "hashed_secret": "40304f287a52d99fdbe086ad19dbdbf9cc1b3897", + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 25, - "type": "Secret Keyword" + "line_number": 77 }, { - "hashed_secret": "e7064f0b80f61dbc65915311032d27baa569ae2a", + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 80 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 120, - "type": "Secret Keyword" + "line_number": 83 }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Secret Keyword", + "filename": "kube/services/arborist/arborist-deploy.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", "is_verified": false, - "line_number": 122, - "type": "Secret Keyword" + "line_number": 86 } ], - "gen3/bin/kube-setup-ssjdispatcher.sh": [ + "kube/services/argo/workflows/fence-usersync-wf.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 108 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 117, - "type": "Secret Keyword" + "line_number": 111 }, { - "hashed_secret": "7992309146efaa8da936e34b0bd33242cd0e9f93", + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 184, - "type": "Secret Keyword" + "line_number": 114 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 197, - "type": "Secret Keyword" + "line_number": 117 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/argo/workflows/fence-usersync-wf.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", + "is_verified": false, + "line_number": 120 } ], - "gen3/lib/aws.sh": [ + "kube/services/argocd/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/argocd/values.yaml", + "hashed_secret": "bfc1b86ce643b65bd540989213254b01fd6ad418", + "is_verified": false, + "line_number": 1489 + } + ], + "kube/services/arranger/arranger-deploy.yaml": [ { - "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", + "type": "Secret Keyword", + "filename": "kube/services/arranger/arranger-deploy.yaml", + "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761", "is_verified": false, - "line_number": 640, - "type": "Secret Keyword" + "line_number": 61 }, { - "hashed_secret": "5b4b6c62d3d99d202f095c38c664eded8f640ce8", + "type": "Secret Keyword", + "filename": "kube/services/arranger/arranger-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 660, - "type": "Secret Keyword" + "line_number": 64 } ], - "gen3/lib/bootstrap/templates/Gen3Secrets/apis_configs/fence-config.yaml": [ + "kube/services/audit-service/audit-service-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/audit-service/audit-service-deploy.yaml", + "hashed_secret": "42cde1c58c36d8bb5804a076e55ac6ec07ef99fc", + "is_verified": false, + "line_number": 64 + } + ], + "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml", + "hashed_secret": "7f834ccb442433fc12ec9532f75c3a4b6a748d4c", + "is_verified": false, + "line_number": 46 + } + ], + "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 56 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/cedar-wrapper/cedar-wrapper-deploy.yaml", + "hashed_secret": "5949b79e0c7082dc78d543cde662871a4f8b8913", + "is_verified": false, + "line_number": 59 + } + ], + "kube/services/cogwheel/cogwheel-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/cogwheel/cogwheel-deploy.yaml", + "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab", + "is_verified": false, + "line_number": 35 + } + ], + "kube/services/cohort-middleware/cohort-middleware-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/cohort-middleware/cohort-middleware-deploy.yaml", + "hashed_secret": "bf22f6c4bd03572f1ef593efc3eb1a7e0b6dcab4", + "is_verified": false, + "line_number": 62 + } + ], + "kube/services/dashboard/dashboard-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/dashboard/dashboard-deploy.yaml", + "hashed_secret": "9e722d12ce045c8718ab803ed465b2fbe199f3d3", + "is_verified": false, + "line_number": 61 + } + ], + "kube/services/datadog/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/datadog/values.yaml", + "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b", + "is_verified": false, + "line_number": 23 + } + ], + "kube/services/datasim/datasim-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 66 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 76 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/datasim/datasim-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 79 + } + ], + "kube/services/dicom-server/dicom-server-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/dicom-server/dicom-server-deploy.yaml", + "hashed_secret": "706168ac2565a93cceffe2202ac45d3d31c075fb", + "is_verified": false, + "line_number": 40 + } + ], + "kube/services/fence/fence-canary-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 93 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 99 + } + ], + "kube/services/fence/fence-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 71 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 74 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 77 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 93 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 96 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 99 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fence/fence-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 102 + } + ], + "kube/services/fenceshib/fenceshib-canary-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 62 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 65 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 78 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 81 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 87 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 90 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 93 + } + ], + "kube/services/fenceshib/fenceshib-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 69 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 75 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 85 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 88 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 91 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 94 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 97 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 100 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/fenceshib/fenceshib-deploy.yaml", + "hashed_secret": "6c4789c3be186fd5dcbf06723462ccdd2c86dc37", + "is_verified": false, + "line_number": 103 + } + ], + "kube/services/frontend-framework/frontend-framework-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", + "is_verified": false, + "line_number": 66 + } + ], + "kube/services/frontend-framework/frontend-framework-root-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "6607b403f74e62246fc6a3c938feffc5a34a7e49", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "4b0bb3e58651fe56ee23e59aa6a3cb96dc61ddd2", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/frontend-framework/frontend-framework-root-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", + "is_verified": false, + "line_number": 66 + } + ], + "kube/services/gdcapi/gdcapi-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/gdcapi/gdcapi-deploy.yaml", + "hashed_secret": "e8c2f0bacaffbf2f9897217c6770413879945296", + "is_verified": false, + "line_number": 38 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/gdcapi/gdcapi-deploy.yaml", + "hashed_secret": "517cded9f3e3ab79237fde330b97a93f5a943316", + "is_verified": false, + "line_number": 41 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/gdcapi/gdcapi-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 44 + } + ], + "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/gen3-discovery-ai/gen3-discovery-ai-deploy.yaml", + "hashed_secret": "38ded89f83435a558169dedb91a38f72d6cebf41", + "is_verified": false, + "line_number": 27 + } + ], + "kube/services/google-sa-validation/google-sa-validation-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 73 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 76 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", + "is_verified": false, + "line_number": 79 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/google-sa-validation/google-sa-validation-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 82 + } + ], + "kube/services/guppy/guppy-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/guppy/guppy-deploy.yaml", + "hashed_secret": "0db22b31c9add2d3c76743c0ac6fbc99bb8b4761", + "is_verified": false, + "line_number": 65 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/guppy/guppy-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 68 + } + ], + "kube/services/indexd/indexd-canary-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", + "is_verified": false, + "line_number": 59 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", + "is_verified": false, + "line_number": 62 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc", + "is_verified": false, + "line_number": 68 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 71 + } + ], + "kube/services/indexd/indexd-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", + "is_verified": false, + "line_number": 66 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "bdecca54d39013d43d3b7f05f2927eaa7df375dc", + "is_verified": false, + "line_number": 72 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/indexd/indexd-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 75 + } + ], + "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 143 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-ci-worker/jenkins-ci-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 146 + } + ], + "kube/services/jenkins-worker/jenkins-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 150 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins-worker/jenkins-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 153 + } + ], + "kube/services/jenkins/jenkins-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins/jenkins-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 157 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins/jenkins-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 160 + } + ], + "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 143 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-ci-worker/jenkins2-ci-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 146 + } + ], + "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 146 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2-worker/jenkins2-worker-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 149 + } + ], + "kube/services/jenkins2/jenkins2-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2/jenkins2-deploy.yaml", + "hashed_secret": "c937b6fbb346a51ef679dd02ac5c4863e02bfdbf", + "is_verified": false, + "line_number": 153 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jenkins2/jenkins2-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 156 + } + ], + "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/arborist-rm-expired-access-cronjob.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", + "is_verified": false, + "line_number": 37 + } + ], + "kube/services/jobs/arborist-rm-expired-access-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/arborist-rm-expired-access-job.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", + "is_verified": false, + "line_number": 37 + } + ], + "kube/services/jobs/arboristdb-create-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/arboristdb-create-job.yaml", + "hashed_secret": "6c57cdfdaaf3cde7a1da6aa94c7d8e46502c4bab", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/aws-bucket-replicate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml", + "hashed_secret": "deb02468778f4041fb189654698ac948e436732d", + "is_verified": false, + "line_number": 33 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml", + "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/aws-bucket-replicate-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/bucket-manifest-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-manifest-job.yaml", + "hashed_secret": "6c36710fe8825b381388d7005f2c9b5c70175fba", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/bucket-replicate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-replicate-job.yaml", + "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483", + "is_verified": false, + "line_number": 46 + } + ], + "kube/services/jobs/bucket-replication-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-replication-job.yaml", + "hashed_secret": "84954f7729144580d612cbb0517aeca8880e3483", + "is_verified": false, + "line_number": 32 + } + ], + "kube/services/jobs/bucket-size-report-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/bucket-size-report-job.yaml", + "hashed_secret": "7cccf62cb63863d9d3baabed4f576eb0f7039735", + "is_verified": false, + "line_number": 34 + } + ], + "kube/services/jobs/cedar-ingestion-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/cedar-ingestion-job.yaml", + "hashed_secret": "e1c426d126dcc618dcd0686fc718d509ca6ee3b8", + "is_verified": false, + "line_number": 54 + } + ], + "kube/services/jobs/client-modify-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 41 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 44 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 50 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/client-modify-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 60 + } + ], + "kube/services/jobs/cogwheel-register-client-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/cogwheel-register-client-job.yaml", + "hashed_secret": "09b772df628fd10bca646b6a877eb661122210ab", + "is_verified": false, + "line_number": 40 + } + ], + "kube/services/jobs/config-fence-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/config-fence-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 44 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/config-fence-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/config-fence-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 57 + } + ], + "kube/services/jobs/covid19-etl-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/covid19-etl-job.yaml", + "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212", + "is_verified": false, + "line_number": 34 + } + ], + "kube/services/jobs/covid19-notebook-etl-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/covid19-notebook-etl-job.yaml", + "hashed_secret": "a7a2b42615b2b256a7c601c77c426e5d6cafb212", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/data-ingestion-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "81e4388059839f71aed21999aa51095c7e545094", + "is_verified": false, + "line_number": 34 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 48 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 51 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 54 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 60 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 63 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/data-ingestion-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 66 + } + ], + "kube/services/jobs/etl-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/etl-cronjob.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", + "is_verified": false, + "line_number": 38 + } + ], + "kube/services/jobs/etl-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/etl-job.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", + "is_verified": false, + "line_number": 35 + } + ], + "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 43 + } + ], + "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-cleanup-expired-ga4gh-info-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + } + ], + "kube/services/jobs/fence-db-migrate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-db-migrate-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-db-migrate-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 39 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-db-migrate-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/fence-delete-expired-clients-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-delete-expired-clients-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 38 + } + ], + "kube/services/jobs/fence-visa-update-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 42 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 45 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 48 + } + ], + "kube/services/jobs/fence-visa-update-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 39 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fence-visa-update-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/fencedb-create-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/fencedb-create-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/gdcdb-create-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gdcdb-create-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", + "is_verified": false, + "line_number": 33 + } + ], + "kube/services/jobs/gen3qa-check-bucket-access-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 177 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 180 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 186 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 190 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gen3qa-check-bucket-access-job.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 193 + } + ], + "kube/services/jobs/gentestdata-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 76 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 80 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/gentestdata-job.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", + "is_verified": false, + "line_number": 83 + } + ], + "kube/services/jobs/google-bucket-manifest-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-manifest-job.yaml", + "hashed_secret": "5ca8fff7767e5dd6ebed80e2c8eab66d6f3bf5eb", + "is_verified": false, + "line_number": 31 + } + ], + "kube/services/jobs/google-bucket-replicate-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-replicate-job.yaml", + "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6", + "is_verified": false, + "line_number": 35 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-replicate-job.yaml", + "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063", + "is_verified": false, + "line_number": 38 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-bucket-replicate-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", + "is_verified": false, + "line_number": 41 + } + ], + "kube/services/jobs/google-create-bucket-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 78 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 81 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 84 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 91 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 94 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-create-bucket-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 97 + } + ], + "kube/services/jobs/google-delete-expired-access-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 43 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 46 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 49 + } + ], + "kube/services/jobs/google-delete-expired-access-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 36 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 39 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-access-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 42 + } + ], + "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 48 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 51 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 57 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 61 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 64 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", + "is_verified": false, + "line_number": 67 + } + ], + "kube/services/jobs/google-delete-expired-service-account-job.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", + "is_verified": false, + "line_number": 40 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", + "is_verified": false, + "line_number": 43 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", + "is_verified": false, + "line_number": 49 + }, { - "hashed_secret": "a94a8fe5ccb19ba61c4c0873d391e987982fbbd3", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 33, - "type": "Basic Auth Credentials" + "line_number": 53 }, { - "hashed_secret": "5d07e1b80e448a213b392049888111e1779a52db", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 286, - "type": "Secret Keyword" - } - ], - "gen3/lib/bootstrap/templates/Gen3Secrets/creds.json": [ + "line_number": 56 + }, { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-delete-expired-service-account-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 59 } ], - "gen3/lib/bootstrap/templates/Gen3Secrets/g3auto/dbfarm/servers.json": [ + "kube/services/jobs/google-init-proxy-groups-cronjob.yaml": [ { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 5, - "type": "Secret Keyword" - } - ], - "gen3/lib/logs/utils.sh": [ + "line_number": 48 + }, { - "hashed_secret": "76143b4ffc8aa2a53f9700ce229f904e69f1e8b5", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 3, - "type": "Secret Keyword" - } - ], - "gen3/lib/manifestDefaults/hatchery/hatchery.json": [ + "line_number": 51 + }, { - "hashed_secret": "0da0e0005ca04acb407af2681d0bede6d9406039", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 78, - "type": "Secret Keyword" - } - ], - "gen3/lib/onprem.sh": [ + "line_number": 54 + }, { - "hashed_secret": "29e52a9bac8f274fa41c51fce9c98eba0dd99cb3", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 68, - "type": "Secret Keyword" + "line_number": 61 }, { - "hashed_secret": "50f013532a9770a2c2cfdc38b7581dd01df69b70", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 84, - "type": "Secret Keyword" - } - ], - "gen3/lib/secrets/rotate-postgres.sh": [ + "line_number": 64 + }, { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 162, - "type": "Secret Keyword" + "line_number": 67 }, { - "hashed_secret": "d3df8a3b08a9de43b73eca1302d50e7a0e5b360f", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-cronjob.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 250, - "type": "Secret Keyword" + "line_number": 70 } ], - "gen3/lib/testData/etlconvert/expected2.yaml": [ + "kube/services/jobs/google-init-proxy-groups-job.yaml": [ { - "hashed_secret": "fe54e5e937d642307ec155b47ac8a214cb40d474", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "cea0e701e53c42bede2212b22f58f9ff8324da55", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 13, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "d98d72830f08c9a8b96ed11d3d96ae9e71b72a26", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 16, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "667fd45d415f73f4132cf0ed11452beb51117b12", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 18, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "c2599d515ba3be74ed58821485ba769fc565e424", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 33, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "6ec5eb29e2884f0c9731493b38902e37c2d672ba", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 35, - "type": "Base64 High Entropy String" + "line_number": 59 }, { - "hashed_secret": "99126b74731670a59b663d5320712564ec7b5f22", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-init-proxy-groups-job.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 36, - "type": "Base64 High Entropy String" + "line_number": 62 } ], - "gen3/test/secretsTest.sh": [ + "kube/services/jobs/google-manage-account-access-cronjob.yaml": [ { - "hashed_secret": "c2c715092ef59cba22520f109f041efca84b8938", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 25, - "type": "Secret Keyword" - } - ], - "gen3/test/terraformTest.sh": [ + "line_number": 48 + }, { - "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 156, - "type": "Secret Keyword" + "line_number": 51 }, { - "hashed_secret": "1cc07dccfdf640eb0e403e490a873a5536759009", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 172, - "type": "Base64 High Entropy String" + "line_number": 54 }, { - "hashed_secret": "185a71a740ef6b9b21c84e6eaa47b89c7de181ef", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 61 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 175, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "329b7cd8191942bedd337107934d365c43a86e6c", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 175, - "type": "Secret Keyword" + "line_number": 67 } ], - "kube/services/argocd/values.yaml": [ + "kube/services/jobs/google-manage-account-access-job.yaml": [ { - "hashed_secret": "27c6929aef41ae2bcadac15ca6abcaff72cda9cd", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 360, - "type": "Private Key" + "line_number": 40 }, { - "hashed_secret": "edbd5e119f94badb9f99a67ac6ff4c7a5204ad61", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 379, - "type": "Secret Keyword" + "line_number": 43 }, { - "hashed_secret": "91dfd9ddb4198affc5c194cd8ce6d338fde470e2", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 412, - "type": "Secret Keyword" - } - ], - "kube/services/datadog/values.yaml": [ + "line_number": 46 + }, { - "hashed_secret": "4a8ce7ae6a8a7f2624e232b61b18c2ac9789c44b", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", + "is_verified": false, + "line_number": 53 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", + "is_verified": false, + "line_number": 56 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-account-access-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 23, - "type": "Secret Keyword" + "line_number": 59 } ], - "kube/services/fenceshib/fenceshib-configmap.yaml": [ + "kube/services/jobs/google-manage-keys-cronjob.yaml": [ { - "hashed_secret": "a985e14b9d6744a2d04f29347693b55c116e478c", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 375, - "type": "Base64 High Entropy String" + "line_number": 48 }, { - "hashed_secret": "adc747bc5eb82ef4b017f5c3759dcee5aa28c36f", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 376, - "type": "Base64 High Entropy String" + "line_number": 51 }, { - "hashed_secret": "59b1702ff0eaf92c9271cbd12f587de97df7e13b", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 377, - "type": "Base64 High Entropy String" + "line_number": 54 }, { - "hashed_secret": "b4a748bbfbbca8925d932a47ab3dcb970d34caf5", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 378, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "af646701a84f7dd9f0e87753f54def881326e78a", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 379, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "20c15ad9742124dc06e1612282c49bb443ebcbd9", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 380, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/jobs/google-manage-keys-job.yaml": [ { - "hashed_secret": "9caded71b967a11b7a6cd0f20db91f06f3517d12", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 381, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "8f19501bc9241b71f7b6db929fb35ab12635dcd7", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 382, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "d6220f6a55df1ed11c4250f42ab07bb9da20541a", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 383, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "dadd9b96636f9529f2547d05d754dc310ceba0c3", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 384, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "3074bc66584550e20c3697a28f67a0762394943c", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 385, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "823131319b4c4b4688f44d3e832bfa9696f16b52", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-manage-keys-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 386, - "type": "Base64 High Entropy String" - }, + "line_number": 59 + } + ], + "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml": [ { - "hashed_secret": "015b780cbfb76988caf52de8ac974a6781e53110", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 387, - "type": "Base64 High Entropy String" + "line_number": 48 }, { - "hashed_secret": "5c8fac33207d74d667680ade09447ea8f43b76d7", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 388, - "type": "Base64 High Entropy String" + "line_number": 51 }, { - "hashed_secret": "c0c4bb09d8394e8f001e337bd27ccac355433d9e", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 389, - "type": "Base64 High Entropy String" + "line_number": 54 }, { - "hashed_secret": "f95631bcbbbc56e18487dcb242cfb1b3e74b16a1", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 390, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "01a692ab6232e0882a313d148981bab58ab98f53", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 391, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "658060a680d415ce6690ad2c3b622ddb33ddd50a", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 392, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/jobs/google-verify-bucket-access-group-job.yaml": [ { - "hashed_secret": "80915b0bd9daa5e1f95cad573892980b1b5a2294", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 393, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "cc55977b293d8cdca8a2c19dfea6874e70057c41", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 394, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "e400ed02add75dd5f3a8c212857acf12027437d1", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 395, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "2e819c8baa3b0508a32b77de258655b3f3a6f7cb", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 396, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "546ed926d58ea5492ab6adb8be94a67aa44ac433", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 397, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "f056f2deceed268e7af6dbdaf2577079c76e006a", + "type": "Secret Keyword", + "filename": "kube/services/jobs/google-verify-bucket-access-group-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 398, - "type": "Base64 High Entropy String" - }, + "line_number": 59 + } + ], + "kube/services/jobs/graph-create-job.yaml": [ { - "hashed_secret": "d75efee28f4798c3a9c6f44b78a8500513ef28b2", + "type": "Secret Keyword", + "filename": "kube/services/jobs/graph-create-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 399, - "type": "Base64 High Entropy String" - }, + "line_number": 33 + } + ], + "kube/services/jobs/indexd-authz-job.yaml": [ { - "hashed_secret": "fbad0bc8f7792b03f89cd3780eb7cf79f284c525", + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-authz-job.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", "is_verified": false, - "line_number": 419, - "type": "Base64 High Entropy String" + "line_number": 32 }, { - "hashed_secret": "3f6480956a775dacb44e2c39aa3d4722a347f7ab", + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-authz-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 420, - "type": "Base64 High Entropy String" + "line_number": 35 }, { - "hashed_secret": "17f32ae55b14d708ca121722c2cae37189f19daf", + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-authz-job.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", "is_verified": false, - "line_number": 423, - "type": "Base64 High Entropy String" - }, + "line_number": 38 + } + ], + "kube/services/jobs/indexd-userdb-job.yaml": [ { - "hashed_secret": "08a74689ca077515d406093720a7e5675fb42bb8", + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-userdb-job.yaml", + "hashed_secret": "0b701c1fabb6ba47a7d47d455e3696d207014bd3", "is_verified": false, - "line_number": 424, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "fa577bb3b2600d2d522dcfea8f1e34896760fcf2", + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-userdb-job.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 425, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "37254f15cca211a1bd5f7ceb23de2b3eb8fb33aa", + "type": "Secret Keyword", + "filename": "kube/services/jobs/indexd-userdb-job.yaml", + "hashed_secret": "aee98a99696237d70b6854ee4c2d9e42bc696039", "is_verified": false, - "line_number": 426, - "type": "Base64 High Entropy String" - }, + "line_number": 46 + } + ], + "kube/services/jobs/metadata-aggregate-sync-job.yaml": [ { - "hashed_secret": "86865593e038509467b91c2d5f36ccc09c3f422b", + "type": "Secret Keyword", + "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml", + "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9", "is_verified": false, - "line_number": 427, - "type": "Base64 High Entropy String" + "line_number": 31 }, { - "hashed_secret": "a899a8d9e114b2a8e108f90e6a72c056db22489f", + "type": "Secret Keyword", + "filename": "kube/services/jobs/metadata-aggregate-sync-job.yaml", + "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d", "is_verified": false, - "line_number": 428, - "type": "Base64 High Entropy String" - }, + "line_number": 34 + } + ], + "kube/services/jobs/metadata-delete-expired-objects-job.yaml": [ { - "hashed_secret": "756b4825f886afd83c25563ac9d45f318d695c48", + "type": "Secret Keyword", + "filename": "kube/services/jobs/metadata-delete-expired-objects-job.yaml", + "hashed_secret": "0cc8bac3fabe63722716d1e6fe04a8dded1e3ad0", "is_verified": false, - "line_number": 429, - "type": "Base64 High Entropy String" - }, + "line_number": 24 + } + ], + "kube/services/jobs/remove-objects-from-clouds-job.yaml": [ { - "hashed_secret": "89882eeb0aca97717a7e4afcf4bc08d077813c7f", + "type": "Secret Keyword", + "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml", + "hashed_secret": "deb02468778f4041fb189654698ac948e436732d", "is_verified": false, - "line_number": 430, - "type": "Base64 High Entropy String" + "line_number": 34 }, { - "hashed_secret": "347140d7b7ceb4e501c3c9c2ea4f29338e2f145e", + "type": "Secret Keyword", + "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml", + "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6", "is_verified": false, - "line_number": 431, - "type": "Base64 High Entropy String" + "line_number": 37 }, { - "hashed_secret": "61dbf70eb10d609e60c7b87faf8f755ff48abc46", + "type": "Secret Keyword", + "filename": "kube/services/jobs/remove-objects-from-clouds-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", "is_verified": false, - "line_number": 432, - "type": "Base64 High Entropy String" - }, + "line_number": 43 + } + ], + "kube/services/jobs/replicate-validation-job.yaml": [ { - "hashed_secret": "24cd54c4b2f58378bba008cb2df68ac663fba7c8", + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "deb02468778f4041fb189654698ac948e436732d", "is_verified": false, - "line_number": 433, - "type": "Base64 High Entropy String" + "line_number": 34 }, { - "hashed_secret": "fa4f9626ae4b98f4b61203c5bafb6f21c9c31e5d", + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "b6f0ec0b08da77656ced48427841e28d7a8a81d6", "is_verified": false, - "line_number": 434, - "type": "Base64 High Entropy String" + "line_number": 37 }, { - "hashed_secret": "b1370003d9cc1e346c83dba33e0418c7775a0c15", + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "abe72fcb190ed9c73eb20e198c73a97605b95063", "is_verified": false, - "line_number": 435, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "c66526e195e423a7ba7d68ac661cdcd8600dcd1f", + "type": "Secret Keyword", + "filename": "kube/services/jobs/replicate-validation-job.yaml", + "hashed_secret": "ca3cdac59f2bfa45cb014190e4509bf6becf28fb", "is_verified": false, - "line_number": 436, - "type": "Base64 High Entropy String" - }, + "line_number": 43 + } + ], + "kube/services/jobs/s3sync-cronjob.yaml": [ { - "hashed_secret": "d29d7044f0944eb30e02cf445f6998e3343dd811", + "type": "Secret Keyword", + "filename": "kube/services/jobs/s3sync-cronjob.yaml", + "hashed_secret": "27f6dfe15698a3bfaa183c84701cfb2bf4115415", "is_verified": false, - "line_number": 437, - "type": "Base64 High Entropy String" - }, + "line_number": 44 + } + ], + "kube/services/jobs/usersync-job.yaml": [ { - "hashed_secret": "80a869460f33722387d8d58e7d9d2e1bbd5d1fe1", + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 438, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "4a06e2a02cbc665adccb4162dc57836895da65b8", + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 439, - "type": "Base64 High Entropy String" + "line_number": 67 }, { - "hashed_secret": "ba2549f35835dfa101d3f660f7604dc78e3e226f", + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 440, - "type": "Base64 High Entropy String" + "line_number": 70 }, { - "hashed_secret": "f354d4ee5fdb94ad29c7b3600264467f45b80eaa", + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 441, - "type": "Base64 High Entropy String" + "line_number": 77 }, { - "hashed_secret": "bf17b587868ba7c3db9865b114261b5b8f1df870", + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 442, - "type": "Base64 High Entropy String" + "line_number": 80 }, { - "hashed_secret": "de1fd7a0d32cba528b4d80818c6601f2588d5383", + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 443, - "type": "Base64 High Entropy String" + "line_number": 83 }, { - "hashed_secret": "bcad65055f6de654541db2bf27d4e27bd54d94c7", + "type": "Secret Keyword", + "filename": "kube/services/jobs/usersync-job.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", "is_verified": false, - "line_number": 444, - "type": "Base64 High Entropy String" - }, + "line_number": 86 + } + ], + "kube/services/jobs/useryaml-job.yaml": [ { - "hashed_secret": "f2e16f2dd532f65f79341342fdf57a093fc408d8", + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 445, - "type": "Base64 High Entropy String" + "line_number": 40 }, { - "hashed_secret": "bb036a679a7d2df9fd2ca57068a446bf7f7dd106", + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 446, - "type": "Base64 High Entropy String" + "line_number": 43 }, { - "hashed_secret": "5aa6568b1e8185578a6e964f5c322783ad349554", + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 447, - "type": "Base64 High Entropy String" + "line_number": 46 }, { - "hashed_secret": "4d14835ff0b0bf5aad480296cb705c74ac65f413", + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 448, - "type": "Base64 High Entropy String" + "line_number": 53 }, { - "hashed_secret": "3f23f77dcf454ad73c4d61c44fd9aa584ef946c1", + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 451, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "1739fe5e5dfcf851b64f8b7b11538f1de29ce0b5", + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 452, - "type": "Base64 High Entropy String" + "line_number": 59 }, { - "hashed_secret": "8129db302110714fc735e3494bd82a65690e0963", + "type": "Secret Keyword", + "filename": "kube/services/jobs/useryaml-job.yaml", + "hashed_secret": "ea73fcfdaa415890d5fde24d3b2245671be32f73", + "is_verified": false, + "line_number": 65 + } + ], + "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 453, - "type": "Base64 High Entropy String" + "line_number": 56 }, { - "hashed_secret": "b48bfc62091164086a703115a0e68bdb09212591", + "type": "Secret Keyword", + "filename": "kube/services/kayako-wrapper/kayako-wrapper-deploy.yaml", + "hashed_secret": "fb7ea689a364feb7aafbf8d553eb77073fa7ba11", "is_verified": false, - "line_number": 454, - "type": "Base64 High Entropy String" + "line_number": 59 + } + ], + "kube/services/kubecost-standalone/thanos-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/kubecost-standalone/thanos-deploy.yaml", + "hashed_secret": "064376809efc3acda5bd341aca977e149b989696", + "is_verified": false, + "line_number": 127 + } + ], + "kube/services/kubecost-standalone/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/kubecost-standalone/values.yaml", + "hashed_secret": "ec9786daee68e3541963a51299160859fe4db663", + "is_verified": false, + "line_number": 30 + } + ], + "kube/services/manifestservice/manifestservice-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", + "hashed_secret": "3da2c49c267b6c58401bbf05e379b38d20434f78", + "is_verified": false, + "line_number": 61 }, { - "hashed_secret": "a10284feaf27f84081073a3267e3dce24ca7b911", + "type": "Secret Keyword", + "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", + "hashed_secret": "469e0c2b1a67aa94955bae023ddc727be31581a7", "is_verified": false, - "line_number": 455, - "type": "Base64 High Entropy String" + "line_number": 64 }, { - "hashed_secret": "3fd80f31de4be8dde9d2b421e832c7d4043fd49a", + "type": "Secret Keyword", + "filename": "kube/services/manifestservice/manifestservice-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 456, - "type": "Base64 High Entropy String" + "line_number": 67 } ], - "kube/services/jobs/indexd-authz-job.yaml": [ + "kube/services/metadata/metadata-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/metadata/metadata-deploy.yaml", + "hashed_secret": "e14f65c8ca7f3b27a0f0f5463569954841e162c9", + "is_verified": false, + "line_number": 61 + }, { - "hashed_secret": "bf21a9e8fbc5a3846fb05b4fa0859e0917b2202f", + "type": "Secret Keyword", + "filename": "kube/services/metadata/metadata-deploy.yaml", + "hashed_secret": "c27babf45eb0ed87329e69c7d47dba611e859c5d", "is_verified": false, - "line_number": 87, - "type": "Basic Auth Credentials" + "line_number": 66 } ], "kube/services/monitoring/grafana-values.yaml": [ { + "type": "Secret Keyword", + "filename": "kube/services/monitoring/grafana-values.yaml", "hashed_secret": "2ae868079d293e0a185c671c7bcdac51df36e385", "is_verified": false, - "line_number": 162, - "type": "Secret Keyword" + "line_number": 162 }, { - "hashed_secret": "7a64ff8446b06d38dc271019994f13823a2cbcf4", + "type": "Secret Keyword", + "filename": "kube/services/monitoring/grafana-values.yaml", + "hashed_secret": "5baa61e4c9b93f3f0682250b6cf8331b7ee68fd8", "is_verified": false, - "line_number": 166, - "type": "Secret Keyword" + "line_number": 331 } ], - "kube/services/revproxy/helpers.js": [ + "kube/services/monitoring/thanos-deploy.yaml": [ { - "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", + "type": "Secret Keyword", + "filename": "kube/services/monitoring/thanos-deploy.yaml", + "hashed_secret": "064376809efc3acda5bd341aca977e149b989696", "is_verified": false, - "line_number": 10, - "type": "Base64 High Entropy String" + "line_number": 130 } ], - "kube/services/revproxy/helpersTest.js": [ + "kube/services/ohif-viewer/ohif-viewer-deploy.yaml": [ { - "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", + "type": "Secret Keyword", + "filename": "kube/services/ohif-viewer/ohif-viewer-deploy.yaml", + "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818", "is_verified": false, - "line_number": 22, - "type": "JSON Web Token" + "line_number": 40 } ], - "kube/services/superset/superset-deploy.yaml": [ + "kube/services/orthanc/orthanc-deploy.yaml": [ { - "hashed_secret": "96e4aceb7cf284be363aa248a32a7cc89785a9f7", + "type": "Secret Keyword", + "filename": "kube/services/orthanc/orthanc-deploy.yaml", + "hashed_secret": "3f87db80519a9ae7d8112f4e0d4cc81441181818", "is_verified": false, - "line_number": 38, - "type": "Secret Keyword" + "line_number": 41 } ], - "kube/services/superset/superset-redis.yaml": [ + "kube/services/peregrine/peregrine-canary-deploy.yaml": [ { - "hashed_secret": "4af3596275edcb7cd5cc6c3c38bc10479902a08f", + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3", "is_verified": false, - "line_number": 165, - "type": "Secret Keyword" + "line_number": 61 }, { - "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9", + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", + "is_verified": false, + "line_number": 64 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec", + "is_verified": false, + "line_number": 70 + }, + { + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 265, - "type": "Secret Keyword" + "line_number": 73 } ], - "kube/services/superset/values.yaml": [ + "kube/services/peregrine/peregrine-deploy.yaml": [ { - "hashed_secret": "6f803b24314c39062efe38d0c1da8c472f47eab3", + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "6131c35d7eebdbc17a314bef8aac75b87323cff3", "is_verified": false, - "line_number": 54, - "type": "Secret Keyword" + "line_number": 67 }, { - "hashed_secret": "6eae3a5b062c6d0d79f070c26e6d62486b40cb46", + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 86, - "type": "Secret Keyword" + "line_number": 70 }, { - "hashed_secret": "3eb416223e9e69e6bb8ee19793911ad1ad2027d8", + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "990a3202b5c94aa5e5997e7dc1a218e457f8b8ec", "is_verified": false, - "line_number": 212, - "type": "Secret Keyword" + "line_number": 76 }, { - "hashed_secret": "ff55435345834a3fe224936776c2aa15f6ed5358", + "type": "Secret Keyword", + "filename": "kube/services/peregrine/peregrine-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 79 + } + ], + "kube/services/pidgin/pidgin-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/pidgin/pidgin-deploy.yaml", + "hashed_secret": "49af232c7adfcd54a40202e06261396a757e4ddd", "is_verified": false, - "line_number": 396, - "type": "Secret Keyword" + "line_number": 59 }, { - "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", + "type": "Secret Keyword", + "filename": "kube/services/pidgin/pidgin-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 503, - "type": "Secret Keyword" + "line_number": 62 } ], - "package-lock.json": [ + "kube/services/portal/portal-deploy.yaml": [ { - "hashed_secret": "0656ad0df3af4633dc369f13d5e8806973c5fd9d", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 1481, - "type": "Base64 High Entropy String" + "line_number": 55 }, { - "hashed_secret": "00091d875d922437c5fc9e6067a08e78c2482e87", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa", "is_verified": false, - "line_number": 1489, - "type": "Base64 High Entropy String" + "line_number": 58 }, { - "hashed_secret": "c4e5cc37e115bf7d86e76e3d799705bf691e4d00", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb", "is_verified": false, - "line_number": 1521, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "0512e37fbedf1d16828680a038a241b4780a5c04", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 1547, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/portal/portal-root-deploy.yaml": [ { - "hashed_secret": "01868fd50edbfe6eb91e5b01209b543adc6857af", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 1611, - "type": "Base64 High Entropy String" + "line_number": 55 }, { - "hashed_secret": "a6f48bf1e398deffc7fd31da17c3506b46c97a93", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "5c5a8e158ad2d8544f73cd5422072d414f497faa", "is_verified": false, - "line_number": 1640, - "type": "Base64 High Entropy String" + "line_number": 58 }, { - "hashed_secret": "85ce358dbdec0996cf3ccd2bf1c6602af68c181e", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "619551216e129bbc5322678abf9c9210c0327cfb", "is_verified": false, - "line_number": 1648, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "6f9bfb49cb818d2fe07592515e4c3f7a0bbd7e0e", + "type": "Secret Keyword", + "filename": "kube/services/portal/portal-root-deploy.yaml", + "hashed_secret": "e3c7565314f404e3883929f003c65a02a80366e9", "is_verified": false, - "line_number": 1664, - "type": "Base64 High Entropy String" - }, + "line_number": 67 + } + ], + "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml": [ { - "hashed_secret": "7098a3e6d6d2ec0a40f04fe12509c5c6f4c49c0e", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "dbd5f43594a152b52261c8e21520a3989823fe55", "is_verified": false, - "line_number": 1683, - "type": "Base64 High Entropy String" + "line_number": 74 }, { - "hashed_secret": "1664ad175bba1795a7ecad572bae7e0740b94f56", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "1c062eaac9e6fa0766377d3cfc3e4a88982fecdb", "is_verified": false, - "line_number": 1733, - "type": "Base64 High Entropy String" + "line_number": 77 }, { - "hashed_secret": "1ec4ce2eb945ce2f816dcb6ebdd1e10247f439a3", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "694cfd0a009a42055e975de9111b2f3c6e8a3634", "is_verified": false, - "line_number": 1742, - "type": "Base64 High Entropy String" + "line_number": 80 }, { - "hashed_secret": "a7af5768a6d936e36f28e1030d7f894d7aaf555e", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "4b09a441cef18c75560f6c3caeafc96f2163c3fd", "is_verified": false, - "line_number": 1755, - "type": "Base64 High Entropy String" + "line_number": 90 }, { - "hashed_secret": "6fbc7dd864586173160874f2a86ca7d2d552cb85", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "7e7478a28dcc3695a083b66b47243b050c813e2d", "is_verified": false, - "line_number": 1769, - "type": "Base64 High Entropy String" + "line_number": 93 }, { - "hashed_secret": "81a961f2c89c6209328b74a8768e30fd76c3ac72", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "2f57bb00fcb93481c2be444e3e9f322b6cb5fadb", "is_verified": false, - "line_number": 1855, - "type": "Base64 High Entropy String" + "line_number": 96 }, { - "hashed_secret": "797d4751c536c421cb82b9f62e0a804af30d78f5", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "98f5a68541a6d981bf5825f23dffe6a0b150e457", "is_verified": false, - "line_number": 1889, - "type": "Base64 High Entropy String" + "line_number": 99 }, { - "hashed_secret": "0d55babfa89f240142c0adfc7b560500a1d3ae7c", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "0849046cdafcdb17f5a4bf5c528430d5e04ad295", "is_verified": false, - "line_number": 1894, - "type": "Base64 High Entropy String" + "line_number": 102 }, { - "hashed_secret": "e9fdc3025cd10bd8aa4508611e6b7b7a9d650a2c", + "type": "Secret Keyword", + "filename": "kube/services/presigned-url-fence/presigned-url-fence-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 1921, - "type": "Base64 High Entropy String" - }, + "line_number": 105 + } + ], + "kube/services/qa-dashboard/qa-dashboard-deployment.yaml": [ { - "hashed_secret": "4cf9419259c0ce8eee84b468af3c72db8b001620", + "type": "Secret Keyword", + "filename": "kube/services/qa-dashboard/qa-dashboard-deployment.yaml", + "hashed_secret": "253939a955a575ac69f409e5914dd0191b704760", "is_verified": false, - "line_number": 1950, - "type": "Base64 High Entropy String" - }, + "line_number": 63 + } + ], + "kube/services/qabot/qabot-deploy.yaml": [ { - "hashed_secret": "24816e3eb4308e247bde7c1d09ffb7b79c519b71", + "type": "Secret Keyword", + "filename": "kube/services/qabot/qabot-deploy.yaml", + "hashed_secret": "a9fa7aa8c08b647c3fb696e6598642d4a63e25be", "is_verified": false, - "line_number": 1983, - "type": "Base64 High Entropy String" - }, + "line_number": 86 + } + ], + "kube/services/requestor/requestor-deploy.yaml": [ { - "hashed_secret": "e9adfe8a333d45f4776fe0eab31608be5d7b6a7d", + "type": "Secret Keyword", + "filename": "kube/services/requestor/requestor-deploy.yaml", + "hashed_secret": "15debe4170aa5b89858d939f4c0644307ae7789b", "is_verified": false, - "line_number": 2004, - "type": "Base64 High Entropy String" - }, + "line_number": 61 + } + ], + "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf": [ { - "hashed_secret": "03d6fb388dd1b185129b14221f7127715822ece6", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/gen3.nginx.conf/indexd-service.conf", + "hashed_secret": "f89523833036f85fed37ce3ebf25492189bc9397", "is_verified": false, - "line_number": 2013, - "type": "Base64 High Entropy String" - }, + "line_number": 41 + } + ], + "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf": [ { - "hashed_secret": "ee161bb3f899720f95cee50a5f9ef9c9ed96278b", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/gen3.nginx.conf/metadata-service.conf", + "hashed_secret": "18c0871af26eb9875c0f840b13211f097c133fd2", "is_verified": false, - "line_number": 2046, - "type": "Base64 High Entropy String" - }, + "line_number": 24 + } + ], + "kube/services/revproxy/helpers.js": [ { - "hashed_secret": "ebeb5b574fa1ed24a40248275e6136759e766466", + "type": "Base64 High Entropy String", + "filename": "kube/services/revproxy/helpers.js", + "hashed_secret": "1d278d3c888d1a2fa7eed622bfc02927ce4049af", "is_verified": false, - "line_number": 2078, - "type": "Base64 High Entropy String" - }, + "line_number": 10 + } + ], + "kube/services/revproxy/helpersTest.js": [ { - "hashed_secret": "a6a555a428522ccf439fd516ce7c7e269274363f", + "type": "Base64 High Entropy String", + "filename": "kube/services/revproxy/helpersTest.js", + "hashed_secret": "389c3ec21b7325359051e97ff569b078843d2d37", "is_verified": false, - "line_number": 2083, - "type": "Base64 High Entropy String" + "line_number": 19 }, { - "hashed_secret": "f7f85d9f7c87f1e576dcaf4cf50f35728f9a3265", + "type": "JSON Web Token", + "filename": "kube/services/revproxy/helpersTest.js", + "hashed_secret": "e029d4904cc728879d70030572bf37d4510367cb", "is_verified": false, - "line_number": 2111, - "type": "Base64 High Entropy String" - }, + "line_number": 22 + } + ], + "kube/services/revproxy/revproxy-deploy.yaml": [ { - "hashed_secret": "3f1646b60abe74297d2f37a1eee5dc771ad834fc", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/revproxy-deploy.yaml", + "hashed_secret": "c7a87a61893a647e29289845cb51e61afb06800b", "is_verified": false, - "line_number": 2138, - "type": "Base64 High Entropy String" + "line_number": 74 }, { - "hashed_secret": "fd933c71e82d5519ae0cb0779b370d02f6935759", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/revproxy-deploy.yaml", + "hashed_secret": "b3a4e2dea4c1fae8c58a07a84065b73b3a2d831c", "is_verified": false, - "line_number": 2143, - "type": "Base64 High Entropy String" + "line_number": 77 }, { - "hashed_secret": "7090aa59cb52ad1f1810b08c4ac1ddf5c8fce523", + "type": "Secret Keyword", + "filename": "kube/services/revproxy/revproxy-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", "is_verified": false, - "line_number": 2150, - "type": "Base64 High Entropy String" - }, + "line_number": 80 + } + ], + "kube/services/sftp/sftp-deploy.yaml": [ { - "hashed_secret": "756444bea4ea3d67844d8ddf58ad32356e9c2430", + "type": "Secret Keyword", + "filename": "kube/services/sftp/sftp-deploy.yaml", + "hashed_secret": "9fdebf62e477d59d25730744c8b3089c67c3db85", "is_verified": false, - "line_number": 2188, - "type": "Base64 High Entropy String" - }, + "line_number": 39 + } + ], + "kube/services/sheepdog/sheepdog-canary-deploy.yaml": [ { - "hashed_secret": "f74135fdd6b8dafdfb01ebbc61c5e5c24ee27cf8", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", "is_verified": false, - "line_number": 2291, - "type": "Base64 High Entropy String" + "line_number": 58 }, { - "hashed_secret": "56fbae787f4aed7d0632e95840d71bd378d3a36f", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 2303, - "type": "Base64 High Entropy String" + "line_number": 61 }, { - "hashed_secret": "81cb6be182eb79444202c4563080aee75296a672", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 2308, - "type": "Base64 High Entropy String" + "line_number": 67 }, { - "hashed_secret": "f0f3f7bce32184893046ac5f8cc80da56c3ca539", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-canary-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 70 + } + ], + "kube/services/sheepdog/sheepdog-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "ec9c944c51e87322de8d22e3ca9e2be1ad8fee0d", "is_verified": false, - "line_number": 2317, - "type": "Base64 High Entropy String" + "line_number": 63 }, { - "hashed_secret": "097893233346336f4003acfb6eb173ee59e648f0", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "79496491225eda4a7be9fcddee2825c85b1535cc", "is_verified": false, - "line_number": 2327, - "type": "Base64 High Entropy String" + "line_number": 66 }, { - "hashed_secret": "bb14c3b4ef4a9f2e86ffdd44b88d9b6729419671", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "e43756046ad1763d6946575fed0e05130a154bd2", "is_verified": false, - "line_number": 2332, - "type": "Base64 High Entropy String" + "line_number": 72 }, { - "hashed_secret": "71344a35cff67ef081920095d1406601fb5e9b97", + "type": "Secret Keyword", + "filename": "kube/services/sheepdog/sheepdog-deploy.yaml", + "hashed_secret": "9ce05cf6168d15dfe02aac9ca9e0712c19c9436d", + "is_verified": false, + "line_number": 75 + } + ], + "kube/services/shiny/shiny-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/shiny/shiny-deploy.yaml", + "hashed_secret": "327a1bbc6dc0ce857472ee9162a3415133862d50", + "is_verified": false, + "line_number": 43 + } + ], + "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/ssjdispatcher/ssjdispatcher-deploy.yaml", + "hashed_secret": "7f932449df74fc78573fea502df8a484aef3f69d", + "is_verified": false, + "line_number": 61 + } + ], + "kube/services/superset/superset-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/superset/superset-deploy.yaml", + "hashed_secret": "3e9d1737117ff62b23e37aedc72b522b0134997a", "is_verified": false, - "line_number": 2340, - "type": "Base64 High Entropy String" + "line_number": 235 }, { - "hashed_secret": "eb3db6990fd43477a35dfeffc90b3f1ffa83c7bd", + "type": "Secret Keyword", + "filename": "kube/services/superset/superset-deploy.yaml", + "hashed_secret": "6ac08eaa58d425783ff8b5a38fe16ee66c0bce15", + "is_verified": false, + "line_number": 311 + } + ], + "kube/services/superset/superset-redis.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/superset/superset-redis.yaml", + "hashed_secret": "9fe1c31809da38c55b2b64bfab47b92bc5f6b7b9", + "is_verified": false, + "line_number": 265 + } + ], + "kube/services/superset/values.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "9a09d4081ddc128a80384712ce6df3578e6bc58e", "is_verified": false, - "line_number": 2349, - "type": "Base64 High Entropy String" + "line_number": 173 }, { - "hashed_secret": "266288bdc14807b538d1e48a5891e361fa9b4a14", + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "118c413f3fc929a1624f4c3e1da1e3d24377a693", "is_verified": false, - "line_number": 2357, - "type": "Base64 High Entropy String" + "line_number": 299 }, { - "hashed_secret": "800477261175fd21f23e7321923e1fba6ae55471", + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "d2a8d1ddfa75398366cff06545380c73481ec17d", "is_verified": false, - "line_number": 2369, - "type": "Base64 High Entropy String" + "line_number": 445 }, { - "hashed_secret": "3f0c251b9c2c21454445a98fde6915ceacde2136", + "type": "Secret Keyword", + "filename": "kube/services/superset/values.yaml", + "hashed_secret": "98a84a63e5633d17e3b27b69695f87aa7189e9dc", + "is_verified": false, + "line_number": 459 + } + ], + "kube/services/thor/thor-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/thor/thor-deploy.yaml", + "hashed_secret": "1f3f96a3887209d0dda357e5516231ee9c5cd9a7", + "is_verified": false, + "line_number": 100 + } + ], + "kube/services/tube/tube-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/tube/tube-deploy.yaml", + "hashed_secret": "ca253d1c9dece2da0d6fb24ded7bdb849a475966", "is_verified": false, - "line_number": 2387, - "type": "Base64 High Entropy String" + "line_number": 58 } ], - "tf_files/aws/cognito/README.md": [ + "kube/services/ws-storage/ws-storage-deploy.yaml": [ { - "hashed_secret": "f6920f370a30262b7dd70e97293c73ec89739b70", + "type": "Secret Keyword", + "filename": "kube/services/ws-storage/ws-storage-deploy.yaml", + "hashed_secret": "ec2d9395e11f353370a4abac21a1565641b35ce9", "is_verified": false, - "line_number": 106, - "type": "Secret Keyword" + "line_number": 66 + } + ], + "kube/services/wts/wts-deploy.yaml": [ + { + "type": "Secret Keyword", + "filename": "kube/services/wts/wts-deploy.yaml", + "hashed_secret": "5de687ae886f19c3cb68d4980e3f2e77cca3db9e", + "is_verified": false, + "line_number": 65 + } + ], + "packer/buildAll.sh": [ + { + "type": "Secret Keyword", + "filename": "packer/buildAll.sh", + "hashed_secret": "6e1d66a1596528c308e601c10aa0b92d53606ab9", + "is_verified": false, + "line_number": 15 + } + ], + "packer/variables.example.json": [ + { + "type": "Secret Keyword", + "filename": "packer/variables.example.json", + "hashed_secret": "a3a0648a036bebf78ba1a1eb498a66081059da10", + "is_verified": false, + "line_number": 5 } ], "tf_files/aws/commons/README.md": [ { - "hashed_secret": "d02e53411e8cb4cd709778f173f7bc9a3455f8ed", + "type": "Secret Keyword", + "filename": "tf_files/aws/commons/README.md", + "hashed_secret": "5f02a3fb14ab1ce5c18c362b04b8ffc603ea5951", "is_verified": false, - "line_number": 60, - "type": "Secret Keyword" + "line_number": 60 }, { - "hashed_secret": "9dc0da3613af850c5a018b0a88a5626fb8888e4e", + "type": "Secret Keyword", + "filename": "tf_files/aws/commons/README.md", + "hashed_secret": "49cfceed8aa8df159e53aa5c5951cad48a3f1216", "is_verified": false, - "line_number": 78, - "type": "Secret Keyword" + "line_number": 67 + }, + { + "type": "Secret Keyword", + "filename": "tf_files/aws/commons/README.md", + "hashed_secret": "18ad13589ca5fb3c432d7d9f0fe49f8ed6e2c478", + "is_verified": false, + "line_number": 70 } ], "tf_files/aws/eks/sample.tfvars": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/eks/sample.tfvars", "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", "is_verified": false, - "line_number": 107, - "type": "Hex High Entropy String" + "line_number": 107 } ], "tf_files/aws/eks/variables.tf": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/eks/variables.tf", "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", "is_verified": false, - "line_number": 133, - "type": "Hex High Entropy String" + "line_number": 133 } ], "tf_files/aws/modules/common-logging/README.md": [ { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/README.md", "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", "is_verified": false, - "line_number": 57, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_verified": false, - "line_number": 59, - "type": "Hex High Entropy String" + "line_number": 57 } ], "tf_files/aws/modules/common-logging/lambda_function.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", "is_verified": false, - "line_number": 18, - "type": "Base64 High Entropy String" + "line_number": 18 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", + "is_verified": false, + "line_number": 18 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/lambda_function.py", "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", "is_verified": false, - "line_number": 30, - "type": "Hex High Entropy String" + "line_number": 30 } ], "tf_files/aws/modules/common-logging/testLambda.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", "is_verified": false, - "line_number": 5, - "type": "Base64 High Entropy String" + "line_number": 5 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", + "is_verified": false, + "line_number": 5 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 + }, + { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/common-logging/testLambda.py", + "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", + "is_verified": false, + "line_number": 10 } ], "tf_files/aws/modules/eks/variables.tf": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/eks/variables.tf", "hashed_secret": "83c1003f406f34fba4d6279a948fee3abc802884", "is_verified": false, - "line_number": 113, - "type": "Hex High Entropy String" + "line_number": 113 } ], "tf_files/aws/modules/management-logs/README.md": [ { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/README.md", "hashed_secret": "83442aa5a16cb1992731c32367ef464564388017", "is_verified": false, - "line_number": 54, - "type": "Base64 High Entropy String" - }, - { - "hashed_secret": "fd4a4637ac99de2c1d89155d66d1f3de15d231a2", - "is_verified": false, - "line_number": 56, - "type": "Hex High Entropy String" + "line_number": 54 } ], "tf_files/aws/modules/management-logs/lambda_function.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", "is_verified": false, - "line_number": 18, - "type": "Base64 High Entropy String" + "line_number": 18 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", + "is_verified": false, + "line_number": 18 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 18, - "type": "Hex High Entropy String" + "line_number": 18 }, { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/lambda_function.py", "hashed_secret": "4f9fd96d3926f2c53ab0261d33f1d1a85a6a77ff", "is_verified": false, - "line_number": 30, - "type": "Hex High Entropy String" + "line_number": 30 } ], "tf_files/aws/modules/management-logs/testLambda.py": [ { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", "hashed_secret": "061765d6854d72f03a6527610d5b6822c9d516de", "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 }, { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", "hashed_secret": "61df81a188bb4dba6ae6128ff7e2c9c6a6f736ef", "is_verified": false, - "line_number": 5, - "type": "Base64 High Entropy String" + "line_number": 5 }, { - "hashed_secret": "a4667450661f32f7ad0f06e2f893a8fee9f18e38", + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "a4752db26b4774d3429878f36ceb7b61805ffd94", "is_verified": false, - "line_number": 5, - "type": "Hex High Entropy String" + "line_number": 5 }, { - "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "b979d8d0c0e8413c20a5597f789e31f0a2b2ff3a", "is_verified": false, - "line_number": 6, - "type": "Base64 High Entropy String" + "line_number": 5 }, { - "hashed_secret": "51118900cd675df1b44f254057398f3e52902a5d", + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "3cf8eb4e9254e1d6cc523da01f8b798b9a83101a", "is_verified": false, - "line_number": 6, - "type": "Hex High Entropy String" + "line_number": 6 }, { + "type": "Hex High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", "hashed_secret": "60a6dfc8d43cd2f5c6292899fc2f94f2d4fc32c4", "is_verified": false, - "line_number": 6, - "type": "Hex High Entropy String" + "line_number": 6 + }, + { + "type": "Base64 High Entropy String", + "filename": "tf_files/aws/modules/management-logs/testLambda.py", + "hashed_secret": "d484ccb4ced21e0149078377f14b913bf5c613d0", + "is_verified": false, + "line_number": 6 } ], "tf_files/aws/slurm/README.md": [ { - "hashed_secret": "fd85d792fa56981cf6a8d2a5c0857c74af86e99d", + "type": "Secret Keyword", + "filename": "tf_files/aws/slurm/README.md", + "hashed_secret": "c16686250cd583de64e02a47a8b194cd5578b2a1", "is_verified": false, - "line_number": 83, - "type": "Secret Keyword" + "line_number": 83 } ], "tf_files/azure/cloud.tf": [ { - "hashed_secret": "7c1a4b52b64e4106041971c345a1f3eab58fb2a4", + "type": "Secret Keyword", + "filename": "tf_files/azure/cloud.tf", + "hashed_secret": "38d930120a56321ceaa147b2bc1f19db53a0b993", "is_verified": false, - "line_number": 424, - "type": "Secret Keyword" + "line_number": 361 } ], "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars": [ { + "type": "Secret Keyword", + "filename": "tf_files/gcp-bwg/roots/commons_setup/variables/answerfile-commons_setup-001.template.tfvars", "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", "is_verified": false, - "line_number": 231, - "type": "Secret Keyword" + "line_number": 231 } ], "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars": [ { + "type": "Secret Keyword", + "filename": "tf_files/gcp-bwg/roots/templates/answerfile-commons_setup-001.template.tfvars", "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", "is_verified": false, - "line_number": 231, - "type": "Secret Keyword" + "line_number": 231 } ], "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP": [ { + "type": "Secret Keyword", + "filename": "tf_files/gcp-bwg/roots/templates/answerfile-env-tenant.user.tfvars_NO_APP_SETUP", "hashed_secret": "f865b53623b121fd34ee5426c792e5c33af8c227", "is_verified": false, - "line_number": 262, - "type": "Secret Keyword" + "line_number": 262 } ], - "tf_files/gcp/commons/sample.tfvars": [ + "tf_files/gcp/commons/root.tf": [ { - "hashed_secret": "9f29ed52bc91ba45b309d5234e95edc7ca5286fd", + "type": "Secret Keyword", + "filename": "tf_files/gcp/commons/root.tf", + "hashed_secret": "013b6be0bd7ef38a9ee3472cec65c208a19421e6", "is_verified": false, - "line_number": 11, - "type": "Secret Keyword" - }, + "line_number": 65 + } + ], + "tf_files/gcp/commons/sample.tfvars": [ { - "hashed_secret": "8db3b325254b6389ca194d829d2fc923dc0a945d", + "type": "Secret Keyword", + "filename": "tf_files/gcp/commons/sample.tfvars", + "hashed_secret": "6b44a330b450ee550c081410c6b705dfeaa105ce", "is_verified": false, - "line_number": 26, - "type": "Secret Keyword" + "line_number": 26 }, { - "hashed_secret": "253c7b5e7c83a86346fc4501495b130813f08105", - "is_verified": false, - "line_number": 37, - "type": "Secret Keyword" - } - ], - "tf_files/shared/modules/k8s_configs/creds.tpl": [ - { - "hashed_secret": "1f5e25be9b575e9f5d39c82dfd1d9f4d73f1975c", + "type": "Secret Keyword", + "filename": "tf_files/gcp/commons/sample.tfvars", + "hashed_secret": "791191ef9eafc75f5dd28e37df837b4991556876", "is_verified": false, - "line_number": 8, - "type": "Secret Keyword" + "line_number": 31 } ] }, - "version": "0.13.1", - "word_list": { - "file": null, - "hash": null - } + "generated_at": "2024-03-07T21:26:14Z" } diff --git a/files/scripts/ecr-access-job-requirements.txt b/files/scripts/ecr-access-job-requirements.txt new file mode 100644 index 000000000..bb6d4b847 --- /dev/null +++ b/files/scripts/ecr-access-job-requirements.txt @@ -0,0 +1 @@ +boto3<2 diff --git a/files/scripts/ecr-access-job.md b/files/scripts/ecr-access-job.md new file mode 100644 index 000000000..9659b186b --- /dev/null +++ b/files/scripts/ecr-access-job.md @@ -0,0 +1,85 @@ +# ecr-access-job + +### How to run + +Configure `global.ecr-access-job-role-arn` to the ARN of the `EcrRepoPolicyUpdateRole` role (described below) in the `manifest.json` file. + +Run `gen3 kube-setup-ecr-access-cronjob` to set up the ECR access cronjob. + +### What does it do? + +The job runs the `ecr-access-job.py` script. + +This script updates the configuration of ECR repositories so that users can access the repositories that were created for them. + +It queries a DynamoDB table which has the following (simplified) structure: +| user_id | workspace_type | account_id | +| ------------------ | -------------------- | ---------- | +| user1@username.com | Direct Pay | 123456 | +| user2@username.com | Direct Pay | 789012 | +| user1@username.com | Other workspace type | | + +and then allows each AWS account to acccess the appropriate ECR repositories. The users' ECR repositories are based on their username as stored in the table. For example, `user1@username.com`'s ECR repository is assumed to be `nextflow-approved/user1-40username-2ecom`. + +### Access needed + +- "EcrRepoPolicyUpdateRole" role in the account (Acct1) that contains the ECR repositories: + +**Note:** `kube-setup-ecr-access-cronjob.sh` assumes this role already exists. + +Permissions: +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "UpdateEcrRepoPolicy", + "Effect": "Allow", + "Action": "ecr:SetRepositoryPolicy", + "Resource": "arn:aws:ecr:us-east-1::repository/nextflow-approved/*" + } + ] +} +``` + +Trust policy (allows Acct2): +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowAssumingRole", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam:::root" + }, + "Action": "sts:AssumeRole" + } + ] +} +``` + +- Policy in the account (Acct2) that contains the DynamoDB table (created automatically by `kube-setup-ecr-access-job.sh`): +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ReadDynamoDB", + "Effect": "Allow", + "Action": [ + "dynamodb:Scan" + ], + "Resource": "arn:aws:dynamodb:::table/" + }, + { + "Sid": "AssumeEcrRole", + "Effect": "Allow", + "Action": [ + "sts:AssumeRole" + ], + "Resource": "arn:aws:iam:::role/" + } + ] +} +``` diff --git a/files/scripts/ecr-access-job.py b/files/scripts/ecr-access-job.py new file mode 100644 index 000000000..828d94c96 --- /dev/null +++ b/files/scripts/ecr-access-job.py @@ -0,0 +1,177 @@ +""" +See documentation at https://github.com/uc-cdis/cloud-automation/blob/master/files/scripts/ecr-access-job.md +""" + +from decimal import Decimal +import json +import os +from typing import List +import uuid + +import boto3 +from boto3.dynamodb.conditions import Attr + + +REGION = "us-east-1" + +# for local testing. in production, use a service account instead of a key. +MAIN_ACCOUNT_CREDS = {"key_id": os.environ.get("KEY_ID"), "key_secret": os.environ.get("KEY_SECRET")} + + +def escapism(string: str) -> str: + """ + This is a direct translation of Hatchery's `escapism` golang function to python. + We need to escape the username in the same way it's escaped by Hatchery's `escapism` function because + special chars cannot be used in an ECR repo name, and so that the ECR repo generated here matches the + name expected by Hatchery. + """ + safeBytes = "abcdefghijklmnopqrstuvwxyz0123456789" + escaped = "" + for v in string: + if v not in safeBytes: + hexCode = "{0:02x}".format(ord(v)) + escaped += "-" + hexCode + else: + escaped += v + return escaped + + +def get_configs() -> (str, str): + table_name = os.environ.get("PAY_MODELS_DYNAMODB_TABLE") + if not table_name: + raise Exception("Missing 'PAY_MODELS_DYNAMODB_TABLE' environment variable") + + ecr_role_arn = os.environ.get("ECR_ACCESS_JOB_ARN") + if not ecr_role_arn: + raise Exception("Missing 'ECR_ACCESS_JOB_ARN' environment variable") + + return table_name, ecr_role_arn + + +def query_usernames_and_account_ids(table_name: str) -> List[dict]: + """ + Returns: + List[dict]: [ { "user_id": "user1@username.com", "account_id": "123456" } ] + """ + if MAIN_ACCOUNT_CREDS["key_id"]: + session = boto3.Session( + aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"], + aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"], + ) + else: + session = boto3.Session() + dynamodb = session.resource("dynamodb", region_name=REGION) + table = dynamodb.Table(table_name) + + # get usernames and AWS account IDs from DynamoDB + queried_keys = ["user_id", "account_id"] + filter_expr = Attr("workspace_type").eq("Direct Pay") + proj = ", ".join("#" + key for key in queried_keys) + expr = {"#" + key: key for key in queried_keys} + response = table.scan( + FilterExpression=filter_expr, + ProjectionExpression=proj, + ExpressionAttributeNames=expr, + ) + assert response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200, response + items = response["Items"] + # if the response is paginated, get the rest of the items + while response["Count"] > 0: + if "LastEvaluatedKey" not in response: + break + response = table.scan( + FilterExpression=filter_expr, + ProjectionExpression=proj, + ExpressionAttributeNames=expr, + ExclusiveStartKey=response["LastEvaluatedKey"], + ) + assert ( + response.get("ResponseMetadata", {}).get("HTTPStatusCode") == 200 + ), response + items.extend(response["Items"]) + + return items + + +def update_access_in_ecr(repo_to_account_ids: List[dict], ecr_role_arn: str) -> None: + # get access to ECR in the account that contains the ECR repos + if MAIN_ACCOUNT_CREDS["key_id"]: + sts = boto3.client( + "sts", + aws_access_key_id=MAIN_ACCOUNT_CREDS["key_id"], + aws_secret_access_key=MAIN_ACCOUNT_CREDS["key_secret"], + ) + else: + sts = boto3.client("sts") + assumed_role = sts.assume_role( + RoleArn=ecr_role_arn, + DurationSeconds=900, # minimum time for aws assume role as per boto docs + RoleSessionName=f"ecr-access-assume-role-{str(uuid.uuid4())[:8]}", + ) + assert "Credentials" in assumed_role, "Unable to assume role" + ecr = boto3.client( + "ecr", + aws_access_key_id=assumed_role["Credentials"]["AccessKeyId"], + aws_secret_access_key=assumed_role["Credentials"]["SecretAccessKey"], + aws_session_token=assumed_role["Credentials"]["SessionToken"], + ) + + # for each ECR repo, whitelist the account IDs so users can access the repo + for repo, account_ids in repo_to_account_ids.items(): + print(f"Allowing AWS accounts {account_ids} to use ECR repository '{repo}'") + policy = { + "Version": "2008-10-17", + "Statement": [ + { + "Sid": "AllowCrossAccountPull", + "Effect": "Allow", + "Principal": { + "AWS": [ + f"arn:aws:iam::{account_id}:root" + for account_id in account_ids + ] + }, + "Action": [ + "ecr:BatchCheckLayerAvailability", + "ecr:BatchGetImage", + "ecr:GetAuthorizationToken", + "ecr:GetDownloadUrlForLayer", + ], + } + ], + } + # Note that this is overwriting the repo policy, not appending to it. This means we can't have 2 dynamodb + # tables pointing at the same set of ECR repos: the repos would only allow the accounts in the table for + # which the script was run most recently. eg QA and Staging can't use the same ECR repos. + # Appending is not possible since this code will eventually rely on Arborist for authorization information + # and we'll need to overwrite in order to remove expired access. + try: + ecr.set_repository_policy( + repositoryName=repo, + policyText=json.dumps(policy), + ) + except Exception as e: + print(f" Unable to update '{repo}'; skipping it: {e}") + + +def main() -> None: + table_name, ecr_role_arn = get_configs() + items = query_usernames_and_account_ids(table_name) + + # construct mapping: { ECR repo url: [ AWS account IDs with access ] } + ecr_repo_prefix = "nextflow-approved" + repo_to_account_ids = { + f"{ecr_repo_prefix}/{escapism(e['user_id'])}": [e["account_id"]] + for e in items + if "account_id" in e + } + print( + "Mapping of ECR repository to allowed AWS accounts:\n", + json.dumps(repo_to_account_ids, indent=2), + ) + + update_access_in_ecr(repo_to_account_ids, ecr_role_arn) + + +if __name__ == "__main__": + main() diff --git a/gen3/bin/kube-setup-ecr-access-cronjob.sh b/gen3/bin/kube-setup-ecr-access-cronjob.sh new file mode 100644 index 000000000..d23afc862 --- /dev/null +++ b/gen3/bin/kube-setup-ecr-access-cronjob.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +source "${GEN3_HOME}/gen3/lib/utils.sh" +gen3_load "gen3/lib/kube-setup-init" + +setup_ecr_access_job() { + if g3kubectl get configmap manifest-global > /dev/null; then + ecrRoleArn=$(g3kubectl get configmap manifest-global -o jsonpath={.data.ecr-access-job-role-arn}) + fi + if [ -z "$ecrRoleArn" ]; then + gen3_log_err "Missing 'global.ecr-access-job-role-arn' configuration in manifest.json" + return 1 + fi + + local saName="ecr-access-job-sa" + if ! g3kubectl get sa "$saName" > /dev/null 2>&1; then + tempFile="ecr-access-job-policy.json" + cat - > $tempFile < Date: Fri, 8 Mar 2024 16:28:46 -0600 Subject: [PATCH 308/362] Add new middleware url (#2497) * add new middleware url * feat(argo-wrapper): newline --------- Co-authored-by: Andrew Prokhorenkov --- kube/services/argo-wrapper/config.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/kube/services/argo-wrapper/config.ini b/kube/services/argo-wrapper/config.ini index 40ac392fd..0693ee2e2 100644 --- a/kube/services/argo-wrapper/config.ini +++ b/kube/services/argo-wrapper/config.ini @@ -3,3 +3,4 @@ ARGO_ACCESS_METHOD = access ARGO_HOST = $ARGO_HOST ARGO_NAMESPACE = $ARGO_NAMESPACE COHORT_DEFINITION_BY_SOURCE_AND_TEAM_PROJECT_URL = http://cohort-middleware-service/cohortdefinition-stats/by-source-id/{}/by-team-project?team-project={} +COHORT_MIDDLEWARE_URL = http://cohort-middleware-service From f84581c1fd8899d7cbb09d48464bb6a9a6ceaa47 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 11 Mar 2024 09:35:52 -0600 Subject: [PATCH 309/362] Update gen3-helpers to fix failing tests (#2443) * updating the awsrole script to better handle variables and flag * adding "migrate to vpc cni" script to cloud-auto * removing script- wrong branch * updating kube-setup-argo script for IRSA * changing the script to get the 3 private ip to account for fargate instances and fix the "ec2 test filter" test * changing -S to -e to verify if docker is the container runtime due to the way that Jenkins pods are setup * testing mount change * testing another mount change * reverting change * mounting containerd instead * modifying jenkins shell commands * reverting ecr change * reverting jenkisfile change and then changing permissions on containerd.sock instead of docker.sock * adding more jobs to the "excludeJob" list * commenting out jupyter metric test as we currently don't use prometheus in most environments * commented out the wrong line in the wrong script * removing terraform test as we are now using Atlantis and Terragrunt to manage our infastructure and will no longer use the "workon" command * fixing snapshot script to grab init containers and all the main containers seperately * updating to use "pip3" instead of /usr/bin/pip3" * changing the evicted pod cleanup to produce no output so the healthcheck will pass in Jenkins. also, adding in the proper path for pip in the pytest stage of the pipeline * trying another method to get rid of the json error when running the gen3 healthcheck test * Update healthcheck.sh moving clear_evicted_pods function call to run inside the healthcheck function --- Jenkinsfile | 12 +- gen3/bin/awsrole.sh | 48 ++-- gen3/bin/healthcheck.sh | 6 +- gen3/bin/kube-setup-argo.sh | 2 +- gen3/lib/logs/snapshot.sh | 2 +- gen3/test/ec2Test.sh | 2 +- gen3/test/ecrTest.sh | 4 +- gen3/test/jobTest.sh | 2 +- gen3/test/jupyterTest.sh | 2 +- gen3/test/terraformTest.sh | 461 ------------------------------------ 10 files changed, 39 insertions(+), 502 deletions(-) delete mode 100644 gen3/test/terraformTest.sh diff --git a/Jenkinsfile b/Jenkinsfile index 9c70a2e37..908c2d01a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -134,8 +134,8 @@ spec: readOnly: true mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" subPath: "ca.pem" - - name: dockersock - mountPath: "/var/run/docker.sock" + - name: containerdsock + mountPath: "/var/run/containerd/containerd.sock" serviceAccount: jenkins-service serviceAccountName: jenkins-service volumes: @@ -145,9 +145,9 @@ spec: - name: ca-volume secret: secretName: "service-ca" - - name: dockersock + - name: containerdsock hostPath: - path: /var/run/docker.sock + path: /var/run/containerd/containerd.sock ''' defaultContainer 'shell' } @@ -293,8 +293,8 @@ spec: script { try { if(!skipUnitTests) { - sh '/usr/bin/pip3 install boto3 --upgrade --user' - sh '/usr/bin/pip3 install kubernetes --upgrade --user' + sh '/usr/local/bin/pip3 install boto3 --upgrade --user' + sh '/usr/local/bin/pip3 install kubernetes --upgrade --user' sh 'python3 -m pytest cloud-automation/apis_configs/' sh 'python3 -m pytest cloud-automation/gen3/lib/dcf/' sh 'cd cloud-automation/tf_files/aws/modules/common-logging && python3 -m pytest testLambda.py' diff --git a/gen3/bin/awsrole.sh b/gen3/bin/awsrole.sh index 144b7a4fe..dd19ea7a4 100644 --- a/gen3/bin/awsrole.sh +++ b/gen3/bin/awsrole.sh @@ -25,16 +25,16 @@ gen3_awsrole_help() { function gen3_awsrole_ar_policy() { local serviceAccount="$1" shift || return 1 - if [[ ! -z $1 ]]; then - local namespace=$1 + if [[ -z $1 ]] || [[ $1 == -* ]]; then + namespace=$(gen3 db namespace) else - local namespace=$(gen3 db namespace) + namespace=$1 + shift fi local issuer_url local account_id local vpc_name - shift || return 1 - local flag=$1 + local flag=$flag vpc_name="$(gen3 api environment)" || return 1 issuer_url="$(aws eks describe-cluster \ @@ -46,7 +46,7 @@ function gen3_awsrole_ar_policy() { local provider_arn="arn:aws:iam::${account_id}:oidc-provider/${issuer_url}" - if [[ "$flag" == "all_namespaces" ]]; then + if [[ "$flag" == "-all_namespaces" ]]; then # Use a trust policy that allows role to be used by multiple namespaces. cat - < config.tfvars @@ -230,10 +226,14 @@ gen3_awsrole_create() { gen3_log_err "use: gen3 awsrole create roleName saName" return 1 fi - if [[ ! -z $1 ]]; then - local namespace=$1 + if [[ -z $1 ]] || [[ $1 == -* ]]; then + namespace=$(gen3 db namespace) else - local namespace=$(gen3 db namespace) + namespace=$1 + shift + fi + if [[ ! -z $1 ]]; then + flag=$1 fi # do simple validation of name local regexp="^[a-z][a-z0-9\-]*$" @@ -247,13 +247,7 @@ EOF gen3_log_err $errMsg return 1 fi - shift || return 1 - local flag="" - # Check if the "all_namespaces" flag is provided - if [[ "$1" == "-f" || "$1" == "--flag" ]]; then - flag="$2" - shift 2 - fi + # check if the name is already used by another entity local entity_type @@ -271,9 +265,11 @@ EOF fi TF_IN_AUTOMATION="true" - if ! _tfplan_role $rolename $saName $namespace -f $flag; then + + if ! _tfplan_role $rolename $saName $namespace $flag; then return 1 fi + if ! _tfapply_role $rolename; then return 1 fi @@ -422,4 +418,4 @@ gen3_awsrole() { # Let testsuite source file if [[ -z "$GEN3_SOURCE_ONLY" ]]; then gen3_awsrole "$@" -fi +fi \ No newline at end of file diff --git a/gen3/bin/healthcheck.sh b/gen3/bin/healthcheck.sh index 149cb1aaa..b658ff033 100644 --- a/gen3/bin/healthcheck.sh +++ b/gen3/bin/healthcheck.sh @@ -137,6 +137,10 @@ gen3_healthcheck() { internetAccessExplicitProxy=false fi + gen3_log_info "Clearing Evicted pods" + sleep 5 + clear_evicted_pods + local healthJson=$(cat - < ". g3kubectl get pods -o json | \ - jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | map( {pod: .metadata.name, containers: [(.spec.containers | select(.!=null) | map(.name)), (.spec.initContainers | select(.!=null) | map(.name)) | add ] } ) | map( .pod as $pod | .containers | map( { pod: $pod, cont: .})[]) | map(select(.cont != "pause" and .cont != "jupyterhub"))[] | .pod + " " + .cont' | \ + jq -r '.items | map(select(.status.phase != "Pending" and .status.phase != "Unknown")) | .[] | .metadata.name as $pod | (.spec.containers + .spec.initContainers) | map(select(.name != "pause" and .name != "jupyterhub")) | .[] | {pod: $pod, cont: .name} | "\(.pod) \(.cont)"' | \ while read -r line; do gen3_logs_snapshot_container $line done diff --git a/gen3/test/ec2Test.sh b/gen3/test/ec2Test.sh index 21310a24c..4981c925c 100644 --- a/gen3/test/ec2Test.sh +++ b/gen3/test/ec2Test.sh @@ -1,6 +1,6 @@ -if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[0].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then +if ! EC2_TEST_IP="$(g3kubectl get nodes -o json | jq -r -e '.items[3].status.addresses[] | select(.type == "InternalIP") | .address')" || [[ -z "$EC2_TEST_IP" ]]; then gen3_log_err "ec2Test failed to acquire IP address of a k8s node to test against" fi diff --git a/gen3/test/ecrTest.sh b/gen3/test/ecrTest.sh index 91edf798b..57847abe5 100644 --- a/gen3/test/ecrTest.sh +++ b/gen3/test/ecrTest.sh @@ -10,8 +10,8 @@ test_ecr_login() { test_ecr_setup() { if [[ -n "$JENKINS_HOME" ]]; then - # give ourselves read/write permissions on /var/run/docker.sock - sudo chmod a+rw /var/run/docker.sock; because $? "ecr_setup modified docker.sock" + # give ourselves permissions on /run/containerd/containerd.sock + sudo chown root:sudo /run/containerd/containerd.sock; because $? "ecr_setup modified containerd.sock" fi } diff --git a/gen3/test/jobTest.sh b/gen3/test/jobTest.sh index 84a4d046b..bb37b4f72 100644 --- a/gen3/test/jobTest.sh +++ b/gen3/test/jobTest.sh @@ -6,7 +6,7 @@ excludeJob() { local jobKey="$1" local excludeList=( - /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup + /aws-bucket- /bucket- /covid19- /data-ingestion- /google- /nb-etl- /remove-objects-from- /replicate- /s3sync- /fence-cleanup /etl- /indexd- /metadata- ) for exclude in "${excludeList[@]}"; do if [[ "$it" =~ $exclude ]]; then return 0; fi diff --git a/gen3/test/jupyterTest.sh b/gen3/test/jupyterTest.sh index f0e327d71..db6a62618 100644 --- a/gen3/test/jupyterTest.sh +++ b/gen3/test/jupyterTest.sh @@ -30,7 +30,7 @@ test_jupyter_metrics() { } shunit_runtest "test_jupyter_idle" "jupyter" -shunit_runtest "test_jupyter_metrics" "jupyter" +# shunit_runtest "test_jupyter_metrics" "jupyter" shunit_runtest "test_jupyter_prepuller" "local,jupyter" shunit_runtest "test_jupyter_namespace" "local,jupyter" shunit_runtest "test_jupyter_setup" "jupyter" diff --git a/gen3/test/terraformTest.sh b/gen3/test/terraformTest.sh deleted file mode 100644 index 17bcc03c2..000000000 --- a/gen3/test/terraformTest.sh +++ /dev/null @@ -1,461 +0,0 @@ -GEN3_TEST_PROFILE="${GEN3_TEST_PROFILE:-cdistest}" -GEN3_TEST_WORKSPACE="gen3test" -GEN3_TEST_ACCOUNT=707767160287 - -# -# TODO - generalize these tests to setup their own test VPC, -# rather than relying on qaplanetv1 or devplanetv1 being there -# - -# -# Little macos/linux stat wrapper -# -file_mode() { - if [[ $(uname -s) == 'Linux' ]]; then - stat -c %a "$1" - else - stat -f %p "$1" - fi -} - -test_workspace() { - gen3 workon $GEN3_TEST_PROFILE $GEN3_TEST_WORKSPACE; because $? "Calling gen3 workon multiple times should be harmless" - [[ $GEN3_PROFILE = $GEN3_TEST_PROFILE ]]; because $? "gen3 workon sets the GEN3_PROFILE env variable: $GEN3_PROFILE" - [[ $GEN3_WORKSPACE = $GEN3_TEST_WORKSPACE ]]; because $? "gen3 workon sets the GEN3_WORKSPACE env variable: $GEN3_WORKSPACE" - [[ $GEN3_FLAVOR = "AWS" || \ - ($GEN3_FLAVOR == "GCP" && $GEN3_PROFILE =~ ^gcp-) || \ - ($GEN3_FLAVOR == "ONPREM" && $GEN3_PROFILE =~ ^onprem-) ]]; because $? "GEN3_FLAVOR is gcp for gcp-* profiles, else AWS" - [[ $GEN3_FLAVOR != "AWS" || $GEN3_S3_BUCKET = "cdis-state-ac${GEN3_TEST_ACCOUNT}-gen3" || $GEN3_S3_BUCKET = "cdis-terraform-state.account-${GEN3_TEST_ACCOUNT}.gen3" ]]; because $? "gen3 workon sets the GEN3_S3_BUCKET env variable: $GEN3_S3_BUCKET" - [[ (! -z $GEN3_WORKDIR) && -d $GEN3_WORKDIR ]]; because $? "gen3 workon sets the GEN3_WORKDIR env variable, and initializes the folder: $GEN3_WORKDIR" - [[ $(file_mode $GEN3_WORKDIR) =~ 700$ ]]; because $? "gen3 workon sets the GEN3_WORKDIR to mode 0700, because secrets are in there" - gen3 cd && [[ $(pwd) = "$GEN3_WORKDIR" ]]; because $? "gen3 cd should take us to the workspace by default: $(pwd) =? $GEN3_WORKDIR" - for fileName in README.md config.tfvars backend.tfvars; do - [[ -f $fileName ]]; because $? "gen3 workon ensures we have a $fileName - local copy || s3 copy || generated from template" - done - [[ ! -z "$MD5" ]]; because $? "commons.sh sets MD5 to $MD5" - - if [[ $GEN3_TEST_WORKSPACE =~ __custom$ ]]; then - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_WORKDIR" ]]; because $? "a __custom workspace loads from the workspace folder" - elif [[ "$GEN3_TEST_PROFILE" =~ ^gcp- ]]; then - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/gcp/commons" ]]; because $? "a gcp- profile currently only support a commons workspace" - elif [[ "$GEN3_TEST_PROFILE" =~ ^onprem- ]]; then - for fileName in README.md creds.json 00configmap.yaml kube-setup.sh; do - filePath="onprem_scripts/$fileName" - [[ -f $filePath ]]; because $? "gen3 workon ensures we have a $filePath generated from template" - done - else # aws profile - [[ "$GEN3_TFSCRIPT_FOLDER" =~ ^"$GEN3_HOME/tf_files/aws/" ]]; because $? "an aws workspace references the aws/ folder: $GEN3_TFSCRIPT_FOLDER" - fi -} - -workspace_cleanup() { - # try to avoid accidentally erasing the user's data ... - cd /tmp && [[ -n "$GEN3_WORKDIR" && "$GEN3_WORKDIR" =~ /gen3/ && -f "$GEN3_WORKDIR/config.tfvars" ]] && /bin/rm -rf "$GEN3_WORKDIR"; - because $? "was able to cleanup $GEN3_WORKDIR" -} - -test_uservpc_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_user" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_vpc" ]]; because $? "a _user workspace should use the ./aws/user_vpc resources: $GEN3_TFSCRIPT_FOLDER" - workspace_cleanup -} - -test_usergeneric_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_usergeneric" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/user_generic" ]]; because $? "a _usergeneric workspace should use the ./aws/user_generic resources: $GEN3_TFSCRIPT_FOLDER" - cat << EOF > config.tfvars -username="frickjack" -EOF - gen3 tfplan; because $? "_usergeneric tfplan should work"; - workspace_cleanup -} - -test_snapshot_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_snapshot" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/rds_snapshot" ]]; because $? "a _snapshot workspace should use the ./aws/rds_snapshot resources: $GEN3_TFSCRIPT_FOLDER" - workspace_cleanup -} - -test_databucket_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}_databucket" - test_workspace - [[ "$GEN3_TFSCRIPT_FOLDER" == "$GEN3_HOME/tf_files/aws/data_bucket" ]]; because $? "a _databucket workspace should use the ./aws/data_bucket resources: $GEN3_TFSCRIPT_FOLDER" - cat - > config.tfvars < config.tfvars < config.tfvars < @ in password -db_password_fence="whatever" - -db_password_gdcapi="whatever" -db_password_sheepdog="whatever" -db_password_peregrine="whatever" - -db_password_indexd="g6pmYkcoR7qECjGoErzVb5gkX3kum0yo" - -# password for write access to indexd -gdcapi_indexd_password="oYva39mIPV5uXskv7jWnKuVZBUFBQcxd" - -fence_snapshot="" -gdcapi_snapshot="" -indexd_snapshot="" -# mailgun for sending alert e-mails -mailgun_api_key="" -mailgun_api_url="" -mailgun_smtp_host="" - -kube_ssh_key="" -EOM - [[ "$(pwd)" =~ "/$GEN3_WORKSPACE"$ ]]; because $? "commons workspace should have base $GEN3_WORKSPACE - $(pwd)" - gen3 tfplan; because $? "tfplan should run even with some invalid config variables" - [[ -f "$GEN3_WORKDIR/plan.terraform" ]]; because $? "'gen3 tfplan' generates a plan.terraform file used by 'gen3 tfapply'" - workspace_cleanup -} - -test_custom_workspace() { - GEN3_TEST_WORKSPACE="${GEN3_TEST_WORKSPACE}__custom" - test_workspace - - local sourceFolder="../../../../../cloud-automation/tf_files/aws/modules/s3-bucket" - if [[ ! -d "$sourceFolder" ]]; then - # Jenkins has a different relative path setup - sourceFolder="../../../../cloud-automation/tf_files/aws/modules/s3-bucket" - fi - cat - > bucket.tf < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < config.tfvars < Date: Tue, 12 Mar 2024 10:05:49 -0500 Subject: [PATCH 310/362] Split build-push workflows (#2499) --- .github/workflows/build_awshelper.yaml | 21 +++++++++++++ .github/workflows/build_python3.10.yaml | 23 ++++++++++++++ .github/workflows/build_python3.9.yaml | 23 ++++++++++++++ .github/workflows/image_build_push.yaml | 42 ------------------------- 4 files changed, 67 insertions(+), 42 deletions(-) create mode 100644 .github/workflows/build_awshelper.yaml create mode 100644 .github/workflows/build_python3.10.yaml create mode 100644 .github/workflows/build_python3.9.yaml delete mode 100644 .github/workflows/image_build_push.yaml diff --git a/.github/workflows/build_awshelper.yaml b/.github/workflows/build_awshelper.yaml new file mode 100644 index 000000000..f42a13610 --- /dev/null +++ b/.github/workflows/build_awshelper.yaml @@ -0,0 +1,21 @@ +name: Build awshelper image + +on: + push: + paths: + - .github/workflows/build_awshelper.yaml + - Docker/awshelper/** + +jobs: + awshelper: + name: Build and push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile" + OVERRIDE_REPO_NAME: "awshelper" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + diff --git a/.github/workflows/build_python3.10.yaml b/.github/workflows/build_python3.10.yaml new file mode 100644 index 000000000..993da1468 --- /dev/null +++ b/.github/workflows/build_python3.10.yaml @@ -0,0 +1,23 @@ +name: Build Python 3.10 image + +on: + push: + paths: + - .github/workflows/build_python3.10.yaml + - Docker/python-nginx/python3.10-buster/** + +jobs: + python_3-10: + name: Build and push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster" + OVERRIDE_REPO_NAME: "python" + OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + diff --git a/.github/workflows/build_python3.9.yaml b/.github/workflows/build_python3.9.yaml new file mode 100644 index 000000000..5bc8bc462 --- /dev/null +++ b/.github/workflows/build_python3.9.yaml @@ -0,0 +1,23 @@ +name: Build Python 3.9 image + +on: + push: + paths: + - .github/workflows/build_python3.9.yaml + - Docker/python-nginx/python3.9-buster/** + +jobs: + python_3-9: + name: Build and push + uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master + with: + DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile" + DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster" + OVERRIDE_REPO_NAME: "python" + OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" + secrets: + ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} + ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} + QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} + QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} + diff --git a/.github/workflows/image_build_push.yaml b/.github/workflows/image_build_push.yaml deleted file mode 100644 index d5bfea351..000000000 --- a/.github/workflows/image_build_push.yaml +++ /dev/null @@ -1,42 +0,0 @@ -name: Build Python Base Images - -on: push - -jobs: - python_3-9: - name: Python 3.9 - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.9-buster" - OVERRIDE_REPO_NAME: "python" - OVERRIDE_TAG_NAME: "python3.9-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - python_3-10: - name: Python 3.10 - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile" - DOCKERFILE_BUILD_CONTEXT: "./Docker/python-nginx/python3.10-buster" - OVERRIDE_REPO_NAME: "python" - OVERRIDE_TAG_NAME: "python3.10-buster-$(echo ${GITHUB_REF#refs/*/} | tr / _)" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} - awshelper: - name: AwsHelper - uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master - with: - DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile" - OVERRIDE_REPO_NAME: "awshelper" - secrets: - ECR_AWS_ACCESS_KEY_ID: ${{ secrets.ECR_AWS_ACCESS_KEY_ID }} - ECR_AWS_SECRET_ACCESS_KEY: ${{ secrets.ECR_AWS_SECRET_ACCESS_KEY }} - QUAY_USERNAME: ${{ secrets.QUAY_USERNAME }} - QUAY_ROBOT_TOKEN: ${{ secrets.QUAY_ROBOT_TOKEN }} From b2195b1ae6f9d802d346629d126e9d3ba408eccd Mon Sep 17 00:00:00 2001 From: Pauline <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 12 Mar 2024 10:07:34 -0500 Subject: [PATCH 311/362] fix wf naming --- .github/workflows/build_awshelper.yaml | 2 +- .github/workflows/build_python3.10.yaml | 2 +- .github/workflows/build_python3.9.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build_awshelper.yaml b/.github/workflows/build_awshelper.yaml index f42a13610..3d2da5393 100644 --- a/.github/workflows/build_awshelper.yaml +++ b/.github/workflows/build_awshelper.yaml @@ -8,7 +8,7 @@ on: jobs: awshelper: - name: Build and push + name: awshelper uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/awshelper/Dockerfile" diff --git a/.github/workflows/build_python3.10.yaml b/.github/workflows/build_python3.10.yaml index 993da1468..80d2d7623 100644 --- a/.github/workflows/build_python3.10.yaml +++ b/.github/workflows/build_python3.10.yaml @@ -8,7 +8,7 @@ on: jobs: python_3-10: - name: Build and push + name: Python 3.10 uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.10-buster/Dockerfile" diff --git a/.github/workflows/build_python3.9.yaml b/.github/workflows/build_python3.9.yaml index 5bc8bc462..540e0d4ec 100644 --- a/.github/workflows/build_python3.9.yaml +++ b/.github/workflows/build_python3.9.yaml @@ -8,7 +8,7 @@ on: jobs: python_3-9: - name: Build and push + name: Python 3.9 uses: uc-cdis/.github/.github/workflows/image_build_push.yaml@master with: DOCKERFILE_LOCATION: "./Docker/python-nginx/python3.9-buster/Dockerfile" From b6031e029db84ab0190d2a263c16b418b113482d Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 12 Mar 2024 12:03:41 -0500 Subject: [PATCH 312/362] Build awshelper workflow: always build (#2501) --- .github/workflows/build_awshelper.yaml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build_awshelper.yaml b/.github/workflows/build_awshelper.yaml index 3d2da5393..36b5745db 100644 --- a/.github/workflows/build_awshelper.yaml +++ b/.github/workflows/build_awshelper.yaml @@ -1,10 +1,8 @@ name: Build awshelper image -on: - push: - paths: - - .github/workflows/build_awshelper.yaml - - Docker/awshelper/** +# Always build this image because it contains all the cloud-automation files. +# Some jobs depend on arbitrary files and we need to test them with updated awshelper images. +on: push jobs: awshelper: From 24492c2d6868ce49a474617544b575a38697d0af Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Tue, 12 Mar 2024 16:23:59 -0400 Subject: [PATCH 313/362] Adding a gen3 db namespace to the temp files so they don't overlap (#2502) --- gen3/bin/kube-setup-revproxy.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/gen3/bin/kube-setup-revproxy.sh b/gen3/bin/kube-setup-revproxy.sh index 5db9850a1..fd30b478b 100644 --- a/gen3/bin/kube-setup-revproxy.sh +++ b/gen3/bin/kube-setup-revproxy.sh @@ -114,8 +114,8 @@ done if g3k_manifest_lookup .argo.argo_server_service_url 2> /dev/null; then argo_server_service_url=$(g3k_manifest_lookup .argo.argo_server_service_url) - g3k_kv_filter "${scriptDir}/gen3.nginx.conf/argo-server.conf" SERVICE_URL "${argo_server_service_url}" > /tmp/argo-server-with-url.conf - filePath="/tmp/argo-server-with-url.conf" + g3k_kv_filter "${scriptDir}/gen3.nginx.conf/argo-server.conf" SERVICE_URL "${argo_server_service_url}" > /tmp/argo-server-with-url$(gen3 db namespace).conf + filePath="/tmp/argo-server-with-url$(gen3 db namespace).conf" if [[ -f "$filePath" ]]; then confFileList+=("--from-file" "$filePath") fi From e979669cd92cf1ecad69f2bc2837a8fed35e2926 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Wed, 13 Mar 2024 12:08:30 -0500 Subject: [PATCH 314/362] HP-1310 feat: updated related studies logic (#2498) * feat: updated related studies logic * update --- .../healdata/heal-cedar-data-ingest.py | 174 +++++++++++------- 1 file changed, 107 insertions(+), 67 deletions(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index c54f9d5aa..e0c4b3c46 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -13,7 +13,7 @@ "study_metadata.study_type.study_subject_type": "Subject Type", "study_metadata.human_subject_applicability.gender_applicability": "Gender", "study_metadata.human_subject_applicability.age_applicability": "Age", - "research_program": "Research Program" + "research_program": "Research Program", } # Defines how to handle special cases for values in filters @@ -33,7 +33,7 @@ "Gender Queer": "Genderqueer/gender nonconforming/neither exclusively male nor female", "Intersex": "Genderqueer/gender nonconforming/neither exclusively male nor female", "Intersexed": "Genderqueer/gender nonconforming/neither exclusively male nor female", - "Buisness Development": "Business Development" + "Buisness Development": "Business Development", } # Defines field that we don't want to include in the filters @@ -54,24 +54,25 @@ def is_valid_uuid(uuid_to_test, version=4): """ Check if uuid_to_test is a valid UUID. - + Parameters ---------- uuid_to_test : str version : {1, 2, 3, 4} - + Returns ------- `True` if uuid_to_test is a valid UUID, otherwise `False`. - + """ - + try: uuid_obj = UUID(uuid_to_test, version=version) except ValueError: return False return str(uuid_obj) == uuid_to_test + def update_filter_metadata(metadata_to_update): filter_metadata = [] for metadata_field_key, filter_field_key in FILTER_FIELD_MAPPINGS.items(): @@ -83,20 +84,21 @@ def update_filter_metadata(metadata_to_update): print(filter_field_values) raise TypeError("Neither a string nor a list") for filter_field_value in filter_field_values: - if (metadata_field_key, filter_field_value) in OMITTED_VALUES_MAPPING.items(): + if ( + metadata_field_key, + filter_field_value, + ) in OMITTED_VALUES_MAPPING.items(): continue if filter_field_value in SPECIAL_VALUE_MAPPINGS: filter_field_value = SPECIAL_VALUE_MAPPINGS[filter_field_value] - filter_metadata.append({"key": filter_field_key, "value": filter_field_value}) + filter_metadata.append( + {"key": filter_field_key, "value": filter_field_value} + ) filter_metadata = pydash.uniq(filter_metadata) metadata_to_update["advSearchFilters"] = filter_metadata # Retain these from existing tags save_tags = ["Data Repository"] - tags = [ - tag - for tag in metadata_to_update["tags"] - if tag["category"] in save_tags - ] + tags = [tag for tag in metadata_to_update["tags"] if tag["category"] in save_tags] # Add any new tags from advSearchFilters for f in metadata_to_update["advSearchFilters"]: if f["key"] == "Gender": @@ -111,25 +113,30 @@ def update_filter_metadata(metadata_to_update): def get_client_token(client_id: str, client_secret: str): try: token_url = f"http://revproxy-service/user/oauth2/token" - headers = {'Content-Type': 'application/x-www-form-urlencoded'} - params = {'grant_type': 'client_credentials'} - data = 'scope=openid user data' + headers = {"Content-Type": "application/x-www-form-urlencoded"} + params = {"grant_type": "client_credentials"} + data = "scope=openid user data" token_result = requests.post( - token_url, params=params, headers=headers, data=data, + token_url, + params=params, + headers=headers, + data=data, auth=(client_id, client_secret), ) - token = token_result.json()["access_token"] + token = token_result.json()["access_token"] except: raise Exception("Could not get token") return token -def get_related_studies(serial_num, hostname): +def get_related_studies(serial_num, guid, hostname): related_study_result = [] if serial_num: - mds = requests.get(f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000") + mds = requests.get( + f"http://revproxy-service/mds/metadata?nih_reporter.project_num_split.serial_num={serial_num}&data=true&limit=2000" + ) if mds.status_code == 200: related_study_metadata = mds.json() @@ -137,15 +144,22 @@ def get_related_studies(serial_num, hostname): related_study_metadata_key, related_study_metadata_value, ) in related_study_metadata.items(): + if related_study_metadata_key == guid or ( + related_study_metadata_value["_guid_type"] != "discovery_metadata" + and related_study_metadata_value["_guid_type"] + != "unregistered_discovery_metadata" + ): + # do nothing for self, or for archived studies + continue title = ( - related_study_metadata_value.get( - "gen3_discovery", {} - ) + related_study_metadata_value.get("gen3_discovery", {}) .get("study_metadata", {}) .get("minimal_info", {}) .get("study_name", "") ) - link = f"https://{hostname}/portal/discovery/{related_study_metadata_key}/" + link = ( + f"https://{hostname}/portal/discovery/{related_study_metadata_key}/" + ) related_study_result.append({"title": title, "link": link}) return related_study_result @@ -180,7 +194,7 @@ def get_related_studies(serial_num, hostname): print("Getting CEDAR client access token") access_token = get_client_token(client_id, client_secret) -token_header = {"Authorization": 'bearer ' + access_token} +token_header = {"Authorization": "bearer " + access_token} limit = 10 offset = 0 @@ -192,16 +206,21 @@ def get_related_studies(serial_num, hostname): print("Directory ID is not in UUID format!") sys.exit(1) -while((limit + offset <= total)): +while limit + offset <= total: # Get the metadata from cedar to register print("Querying CEDAR...") - cedar = requests.get(f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}", headers=token_header) + cedar = requests.get( + f"http://revproxy-service/cedar/get-instance-by-directory/{dir_id}?limit={limit}&offset={offset}", + headers=token_header, + ) # If we get metadata back now register with MDS if cedar.status_code == 200: metadata_return = cedar.json() if "metadata" not in metadata_return: - print("Got 200 from CEDAR wrapper but no metadata in body, something is not right!") + print( + "Got 200 from CEDAR wrapper but no metadata in body, something is not right!" + ) sys.exit(1) total = metadata_return["metadata"]["totalCount"] @@ -209,13 +228,17 @@ def get_related_studies(serial_num, hostname): print(f"Successfully got {returned_records} record(s) from CEDAR directory") for cedar_record in metadata_return["metadata"]["records"]: # get the appl id from cedar for querying in our MDS - cedar_appl_id = pydash.get(cedar_record, "metadata_location.nih_application_id") + cedar_appl_id = pydash.get( + cedar_record, "metadata_location.nih_application_id" + ) if cedar_appl_id is None: print("This record doesn't have appl_id, skipping...") continue # Get the metadata record for the nih_application_id - mds = requests.get(f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.nih_application_id={cedar_appl_id}&data=true") + mds = requests.get( + f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.nih_application_id={cedar_appl_id}&data=true" + ) if mds.status_code == 200: mds_res = mds.json() @@ -234,9 +257,13 @@ def get_related_studies(serial_num, hostname): if mds_res["_guid_type"] == "discovery_metadata": print("Metadata is already registered. Updating MDS record") elif mds_res["_guid_type"] == "unregistered_discovery_metadata": - print("Metadata has not been registered. Registering it in MDS record") + print( + "Metadata has not been registered. Registering it in MDS record" + ) else: - print(f"This metadata data record has a special GUID type \"{mds_res['_guid_type']}\" and will be skipped") + print( + f"This metadata data record has a special GUID type \"{mds_res['_guid_type']}\" and will be skipped" + ) continue if "clinicaltrials_gov" in cedar_record: @@ -244,21 +271,27 @@ def get_related_studies(serial_num, hostname): del cedar_record["clinicaltrials_gov"] # some special handing for this field, because its parent will be deleted before we merging the CEDAR and MDS SLMD to avoid duplicated values - cedar_record_other_study_websites = cedar_record.get("metadata_location", {}).get("other_study_websites", []) + cedar_record_other_study_websites = cedar_record.get( + "metadata_location", {} + ).get("other_study_websites", []) del cedar_record["metadata_location"] mds_res["gen3_discovery"]["study_metadata"].update(cedar_record) - mds_res["gen3_discovery"]["study_metadata"]["metadata_location"]["other_study_websites"] = cedar_record_other_study_websites + mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][ + "other_study_websites" + ] = cedar_record_other_study_websites # setup citations - doi_citation = mds_res["gen3_discovery"]["study_metadata"].get("doi_citation", "") - mds_res["gen3_discovery"]["study_metadata"]["citation"]["heal_platform_citation"] = doi_citation - + doi_citation = mds_res["gen3_discovery"]["study_metadata"].get( + "doi_citation", "" + ) + mds_res["gen3_discovery"]["study_metadata"]["citation"][ + "heal_platform_citation" + ] = doi_citation # setup repository_study_link data_repositories = ( - mds_res - .get("gen3_discovery", {}) + mds_res.get("gen3_discovery", {}) .get("study_metadata", {}) .get("metadata_location", {}) .get("data_repositories", []) @@ -275,8 +308,13 @@ def get_related_studies(serial_num, hostname): repository_study_link = REPOSITORY_STUDY_ID_LINK_TEMPLATE[ repository["repository_name"] ].replace("", repository["repository_study_ID"]) - repository.update({"repository_study_link": repository_study_link}) - if repository_citation_additional_text not in repository_citation: + repository.update( + {"repository_study_link": repository_study_link} + ) + if ( + repository_citation_additional_text + not in repository_citation + ): repository_citation += repository_citation_additional_text if len(data_repositories): data_repositories[0] = { @@ -284,36 +322,28 @@ def get_related_studies(serial_num, hostname): "repository_citation": repository_citation, } - mds_res["gen3_discovery"]["study_metadata"][ - "metadata_location" - ]["data_repositories"] = copy.deepcopy(data_repositories) - - + mds_res["gen3_discovery"]["study_metadata"]["metadata_location"][ + "data_repositories" + ] = copy.deepcopy(data_repositories) # set up related studies serial_num = None try: serial_num = ( - mds_res - .get("nih_reporter", {}) + mds_res.get("nih_reporter", {}) .get("project_num_split", {}) .get("serial_num", None) ) except Exception: - print(f"Unable to get serial number for study") - - if serial_num == None: - print(f"Unable to get serial number for study") + print("Unable to get serial number for study") - related_study_result = get_related_studies(serial_num, hostname) - existing_related_study_result = mds_res.get("related_studies", []) - for related_study in related_study_result: - if related_study not in existing_related_study_result: - existing_related_study_result.append(copy.deepcopy(related_study)) - mds_res["gen3_discovery"][ - "related_studies" - ] = copy.deepcopy(existing_related_study_result) + if serial_num is None: + print("Unable to get serial number for study") + related_study_result = get_related_studies( + serial_num, mds_record_guid, hostname + ) + mds_res["gen3_discovery"]["related_studies"] = copy.deepcopy(related_study_result) # merge data from cedar that is not study level metadata into a level higher deleted_keys = [] @@ -324,29 +354,39 @@ def get_related_studies(serial_num, hostname): for key in deleted_keys: del mds_res["gen3_discovery"]["study_metadata"][key] - mds_discovery_data_body = update_filter_metadata(mds_res["gen3_discovery"]) + mds_discovery_data_body = update_filter_metadata( + mds_res["gen3_discovery"] + ) mds_cedar_register_data_body["gen3_discovery"] = mds_discovery_data_body if mds_clinical_trials: - mds_cedar_register_data_body["clinicaltrials_gov"] = {**mds_cedar_register_data_body.get("clinicaltrials_gov", {}), **mds_clinical_trials} + mds_cedar_register_data_body["clinicaltrials_gov"] = { + **mds_cedar_register_data_body.get("clinicaltrials_gov", {}), + **mds_clinical_trials, + } mds_cedar_register_data_body["_guid_type"] = "discovery_metadata" print(f"Metadata {mds_record_guid} is now being registered.") - mds_put = requests.put(f"http://revproxy-service/mds/metadata/{mds_record_guid}", + mds_put = requests.put( + f"http://revproxy-service/mds/metadata/{mds_record_guid}", headers=token_header, - json = mds_cedar_register_data_body + json=mds_cedar_register_data_body, ) if mds_put.status_code == 200: print(f"Successfully registered: {mds_record_guid}") else: - print(f"Failed to register: {mds_record_guid}. Might not be MDS admin") + print( + f"Failed to register: {mds_record_guid}. Might not be MDS admin" + ) print(f"Status from MDS: {mds_put.status_code}") else: print(f"Failed to get information from MDS: {mds.status_code}") - + else: - print(f"Failed to get information from CEDAR wrapper service: {cedar.status_code}") + print( + f"Failed to get information from CEDAR wrapper service: {cedar.status_code}" + ) if offset + limit == total: break From ec6510ff37b03662497ac5e651b36d70f4101e68 Mon Sep 17 00:00:00 2001 From: Jian <52763034+tianj7@users.noreply.github.com> Date: Wed, 13 Mar 2024 16:30:06 -0500 Subject: [PATCH 315/362] add alt text to maintenance page images (#2500) --- files/dashboard/maintenance-page/index.html | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/files/dashboard/maintenance-page/index.html b/files/dashboard/maintenance-page/index.html index a3e34479b..fac49e64e 100644 --- a/files/dashboard/maintenance-page/index.html +++ b/files/dashboard/maintenance-page/index.html @@ -16,7 +16,7 @@ @@ -27,12 +27,12 @@

This site is under maintenance...

Please check back later.

- + A shiba dog looking into the distance From 5b75af3a39cc8ebcfeed9c75a4fe55f0db004ae1 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 18 Mar 2024 10:41:44 -0600 Subject: [PATCH 316/362] Update ingress.yaml (#2506) --- kube/services/ingress/ingress.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/ingress/ingress.yaml b/kube/services/ingress/ingress.yaml index 3f1f31259..1db08e8ef 100644 --- a/kube/services/ingress/ingress.yaml +++ b/kube/services/ingress/ingress.yaml @@ -11,7 +11,7 @@ metadata: alb.ingress.kubernetes.io/listen-ports: '[{"HTTP": 80}, {"HTTPS":443}]' alb.ingress.kubernetes.io/load-balancer-attributes: idle_timeout.timeout_seconds=600 alb.ingress.kubernetes.io/actions.ssl-redirect: '{"Type": "redirect", "RedirectConfig": { "Protocol": "HTTPS", "Port": "443", "StatusCode": "HTTP_301"}}' - alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-Res-FIPS-2023-04 + alb.ingress.kubernetes.io/ssl-policy: ELBSecurityPolicy-TLS13-1-2-FIPS-2023-04 spec: ingressClassName: alb rules: From bff3a57818d24f416a3a518bebfe956e281bad80 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Tue, 19 Mar 2024 09:53:47 -0500 Subject: [PATCH 317/362] MIDRC-543 Let Hatchery assume role (#2504) --- gen3/bin/kube-setup-hatchery.sh | 31 ++++++++++++++++++++++++-- kube/services/jobs/ecr-access-job.yaml | 2 -- 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index 5454d1e24..bdcff8ed0 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -76,15 +76,38 @@ else exists_or_create_gen3_license_table "$TARGET_TABLE" fi +# if `nextflow-global.imagebuilder-reader-role-arn` is set in hatchery config, allow hatchery +# to assume the configured role +imagebuilderRoleArn=$(g3kubectl get configmap manifest-hatchery -o jsonpath={.data.nextflow-global} | jq -r '."imagebuilder-reader-role-arn"') +assumeImageBuilderRolePolicyBlock="" +if [ -z "$imagebuilderRoleArn" ]; then + gen3_log_info "No 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, not granting AssumeRole" +else + gen3_log_info "Found 'nexftlow-global.imagebuilder-reader-role-arn' in Hatchery configuration, granting AssumeRole" + assumeImageBuilderRolePolicyBlock=$( cat < /dev/null 2>&1; then roleName="$(gen3 api safe-name hatchery-sa)" gen3 awsrole create $roleName $saName @@ -176,7 +204,6 @@ if ! g3kubectl get sa "$saName" -o json | jq -e '.metadata.annotations | ."eks.a # create the new version gen3_aws_run aws iam create-policy-version --policy-arn "$policyArn" --policy-document "$policy" --set-as-default fi - gen3_log_info "Attaching policy '${policyName}' to role '${roleName}'" gen3 awsrole attach-policy ${policyArn} --role-name ${roleName} --force-aws-cli || exit 1 gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1 diff --git a/kube/services/jobs/ecr-access-job.yaml b/kube/services/jobs/ecr-access-job.yaml index 11979a123..89bb49d6d 100644 --- a/kube/services/jobs/ecr-access-job.yaml +++ b/kube/services/jobs/ecr-access-job.yaml @@ -65,8 +65,6 @@ spec: args: - "-c" - | - set -e - cd cloud-automation/files/scripts/ echo Installing requirements... pip3 install -r ecr-access-job-requirements.txt From 6d67d747679ed73edf55fc3484297732f59e4000 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 20 Mar 2024 10:10:41 -0400 Subject: [PATCH 318/362] Feat/scaling va workflows (#2507) * Raising total parallelism to 13, to enable stress tests in va-testing. This should be merged with a value of 10, to allow 5 WFs in each env * Bumping the parallelism for Argo up to 5 --- kube/services/argo/values.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kube/services/argo/values.yaml b/kube/services/argo/values.yaml index 2b46ced0f..23dda4a5a 100644 --- a/kube/services/argo/values.yaml +++ b/kube/services/argo/values.yaml @@ -1,6 +1,6 @@ controller: - parallelism: 8 - namespaceParallelism: 3 + parallelism: 10 + namespaceParallelism: 5 metricsConfig: # -- Enables prometheus metrics server enabled: true From 31e6e49015d84782e1c5687175dc11e36bfae4f8 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 20 Mar 2024 15:41:29 -0400 Subject: [PATCH 319/362] Creating a new type of DB dump that grabs stuff for va-testing (#2508) * Creating a new type of DB dump that grabs stuff for va-testing * Missed changes to dbdump script * Changing job name --- gen3/bin/dbbackup.sh | 13 ++- .../jobs/psql-db-dump-va-testing-job.yaml | 80 +++++++++++++++++++ 2 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 kube/services/jobs/psql-db-dump-va-testing-job.yaml diff --git a/gen3/bin/dbbackup.sh b/gen3/bin/dbbackup.sh index 29f267221..eb9611a90 100644 --- a/gen3/bin/dbbackup.sh +++ b/gen3/bin/dbbackup.sh @@ -173,6 +173,10 @@ db_restore() { gen3 job run psql-db-prep-restore } +va_testing_db_dump() { + gen3 job run psql-db-dump-va-testing +} + # main function to determine whether dump or restore main() { @@ -191,8 +195,15 @@ main() { create_s3_bucket db_restore ;; + va-dump) + gen3_log_info "Running a va-testing DB dump..." + create_policy + create_service_account_and_role + create_s3_bucket + va_testing_db_dump + ;; *) - echo "Invalid command. Usage: gen3 dbbackup [dump|restore]" + echo "Invalid command. Usage: gen3 dbbackup [dump|restore|va-dump]" return 1 ;; esac diff --git a/kube/services/jobs/psql-db-dump-va-testing-job.yaml b/kube/services/jobs/psql-db-dump-va-testing-job.yaml new file mode 100644 index 000000000..8a8037e16 --- /dev/null +++ b/kube/services/jobs/psql-db-dump-va-testing-job.yaml @@ -0,0 +1,80 @@ +--- +# NOTE: This job was created specifically to dump all the databases in va-testing, in preparation for a move to second cluster +# If you aren't doing that, this probably is not the job you're looking for +apiVersion: batch/v1 +kind: Job +metadata: + name: psql-db-dump-va-testing +spec: + template: + metadata: + labels: + app: gen3job + spec: + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + preference: + matchExpressions: + - key: karpenter.sh/capacity-type + operator: In + values: + - on-demand + - weight: 99 + preference: + matchExpressions: + - key: eks.amazonaws.com/capacityType + operator: In + values: + - ONDEMAND + serviceAccountName: dbbackup-sa + containers: + - name: pgdump + image: quay.io/cdis/awshelper:master + imagePullPolicy: Always + env: + - name: gen3Env + valueFrom: + configMapKeyRef: + name: global + key: environment + - name: JENKINS_HOME + value: "devterm" + - name: GEN3_HOME + value: /home/ubuntu/cloud-automation + command: ["/bin/bash"] + args: + - "-c" + - | + source "${GEN3_HOME}/gen3/lib/utils.sh" + gen3_load "gen3/gen3setup" + account_id=$(aws sts get-caller-identity --query "Account" --output text) + default_bucket_name="gen3-db-backups-${account_id}" + default_databases=("fence" "indexd" "sheepdog" "peregrine" "arborist" "argo" "atlas" "metadata" "ohdsi" "omop-data" "wts") + s3_dir="va-testing-$(date +"%Y-%m-%d-%H-%M-%S")" + databases=("${default_databases[@]}") + bucket_name=$default_bucket_name + + for database in "${databases[@]}"; do + gen3_log_info "Starting database backup for ${database}" + gen3 db backup "${database}" > "${database}.sql" + + if [ $? -eq 0 ] && [ -f "${database}.sql" ]; then + gen3_log_info "Uploading backup file ${database}.sql to s3://${bucket_name}/${s3_dir}/${database}.sql" + aws s3 cp "${database}.sql" "s3://${bucket_name}/${s3_dir}/${database}.sql" + + if [ $? -eq 0 ]; then + gen3_log_info "Successfully uploaded ${database}.sql to S3" + else + gen3_log_err "Failed to upload ${database}.sql to S3" + fi + gen3_log_info "Deleting temporary backup file ${database}.sql" + rm -f "${database}.sql" + else + gen3_log_err "Backup operation failed for ${database}" + rm -f "${database}.sql" + fi + done + sleep 600 + restartPolicy: Never From 6de65e70a7065789f6250ad05e94f816bf8eeeaf Mon Sep 17 00:00:00 2001 From: Michael Lukowski Date: Wed, 20 Mar 2024 15:32:38 -0500 Subject: [PATCH 320/362] add whitelist for qdr staging (#2509) --- files/squid_whitelist/web_whitelist | 2 ++ 1 file changed, 2 insertions(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index c191b2e8c..afacba9e4 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -165,3 +165,5 @@ www.rabbitmq.com www.uniprot.org vpodc.org yahoo.com +idp.stage.qdr.org +stage.qdr.org \ No newline at end of file From cde8a9666a53fe9c2345f4562f202a701e9a172a Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 25 Mar 2024 11:26:58 -0400 Subject: [PATCH 321/362] Cronjobs aren't beta (#2511) --- .../services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml index 74d7fc9a4..93eaf7652 100644 --- a/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml +++ b/kube/services/jobs/fence-cleanup-expired-ga4gh-info-cronjob.yaml @@ -1,5 +1,5 @@ --- -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: fence-cleanup-expired-ga4gh-info From 79f305a7741a221a6a6e0236c08e0f610cc589f0 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:13:38 -0500 Subject: [PATCH 322/362] MIDRC-672 Fix ECR access job role name conflict (#2515) --- gen3/bin/iam-serviceaccount.sh | 25 +++++++++++++++-------- gen3/bin/kube-setup-ecr-access-cronjob.sh | 4 ++-- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/gen3/bin/iam-serviceaccount.sh b/gen3/bin/iam-serviceaccount.sh index 0c5a8bba3..1ea055f66 100644 --- a/gen3/bin/iam-serviceaccount.sh +++ b/gen3/bin/iam-serviceaccount.sh @@ -115,7 +115,7 @@ EOF # @return the resulting json from awscli ## function create_role(){ - local role_name="${vpc_name}-${SERVICE_ACCOUNT_NAME}-role" + local role_name="${1}" if [[ ${#role_name} -gt 63 ]]; then role_name=$(echo "$role_name" | head -c63) gen3_log_warning "Role name has been truncated, due to amazon role name 64 character limit. New role name is $role_name" @@ -123,8 +123,8 @@ function create_role(){ local assume_role_policy_path="$(create_assume_role_policy)" gen3_log_info "Entering create_role" - gen3_log_info " ${role_name}" - gen3_log_info " ${assume_role_policy_path}" + gen3_log_info " Role: ${role_name}" + gen3_log_info " Policy path: ${assume_role_policy_path}" local role_json role_json=$(aws iam create-role \ @@ -156,8 +156,8 @@ function add_policy_to_role(){ local role_name="${2}" gen3_log_info "Entering add_policy_to_role" - gen3_log_info " ${policy}" - gen3_log_info " ${role_name}" + gen3_log_info " Policy: ${policy}" + gen3_log_info " Role: ${role_name}" local result if [[ ${policy} =~ arn:aws:iam::aws:policy/[a-zA-Z0-9]+ ]] @@ -198,8 +198,8 @@ function create_role_with_policy() { local role_name="${2}" gen3_log_info "Entering create_role_with_policy" - gen3_log_info " ${policy}" - gen3_log_info " ${role_name}" + gen3_log_info " Policy: ${policy}" + gen3_log_info " Role: ${role_name}" local created_role_json created_role_json="$(create_role ${role_name})" || return $? @@ -357,7 +357,10 @@ function main() { local policy_validation local policy_source - local role_name="${vpc_name}-${SERVICE_ACCOUNT_NAME}-role" + local role_name=$ROLE_NAME + if [ -z "${role_name}" ]; then + role_name="${vpc_name}-${SERVICE_ACCOUNT_NAME}-role" + fi if [ -z ${NAMESPACE_SCRIPT} ]; then @@ -481,6 +484,12 @@ while getopts "$OPTSPEC" optchar; do ACTION="c" SERVICE_ACCOUNT_NAME=${OPTARG#*=} ;; + role-name) + ROLE_NAME="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 )) + ;; + role-name=*) + ROLE_NAME=${OPTARG#*=} + ;; list) ACTION="l" SERVICE_ACCOUNT_NAME="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 )) diff --git a/gen3/bin/kube-setup-ecr-access-cronjob.sh b/gen3/bin/kube-setup-ecr-access-cronjob.sh index d23afc862..5c645ad35 100644 --- a/gen3/bin/kube-setup-ecr-access-cronjob.sh +++ b/gen3/bin/kube-setup-ecr-access-cronjob.sh @@ -38,8 +38,8 @@ setup_ecr_access_job() { ] } EOM - local role_name - if ! role_name="$(gen3 iam-serviceaccount -c "${saName}" -p $tempFile)" || [[ -z "$role_name" ]]; then + local safe_role_name=$(gen3 api safe-name ${saName}-role | head -c63) + if ! role_name="$(gen3 iam-serviceaccount -c "${saName}" -p $tempFile --role-name $safe_role_name)" || [[ -z "$role_name" ]]; then gen3_log_err "Failed to create iam service account" rm $tempFile return 1 From 797fdf3fcd4f2ce8d66582a6e7891bfbba5bffe1 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Fri, 29 Mar 2024 11:59:34 -0400 Subject: [PATCH 323/362] Adding awslabs.github.io to the squid whitelist (#2516) --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index afacba9e4..6896314ab 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -7,6 +7,7 @@ achecker.ca apache.github.io api.epigraphdb.org api.monqcle.com +awslabs.github.io biodata-integration-tests.net marketing.biorender.com clinicaltrials.gov From 07813b6fff305398b90c6dcdc810e253fb41b086 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Thu, 4 Apr 2024 10:37:08 -0500 Subject: [PATCH 324/362] Fail if fence-create client fails in kube-setup-ohdsi (#2514) Co-authored-by: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> --- gen3/bin/kube-setup-ohdsi.sh | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/gen3/bin/kube-setup-ohdsi.sh b/gen3/bin/kube-setup-ohdsi.sh index 14b35a714..3d8165547 100644 --- a/gen3/bin/kube-setup-ohdsi.sh +++ b/gen3/bin/kube-setup-ohdsi.sh @@ -14,13 +14,8 @@ new_client() { local secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client atlas --urls https://${atlas_hostname}/WebAPI/user/oauth/callback?client_name=OidcClient --username atlas --allowed-scopes openid profile email user | tail -1) # secrets looks like ('CLIENT_ID', 'CLIENT_SECRET') if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then - # try delete client - g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-delete --client atlas > /dev/null 2>&1 - secrets=$(g3kubectl exec -c fence $(gen3 pod fence) -- fence-create client-create --client atlas --urls https://${atlas_hostname}/WebAPI/user/oauth/callback?client_name=OidcClient --username atlas --allowed-scopes openid profile email user | tail -1) - if [[ ! $secrets =~ (\'(.*)\', \'(.*)\') ]]; then - gen3_log_err "kube-setup-ohdsi" "Failed generating oidc client for atlas: $secrets" - return 1 - fi + gen3_log_err "kube-setup-ohdsi" "Failed generating oidc client for atlas: $secrets" + return 1 fi local FENCE_CLIENT_ID="${BASH_REMATCH[2]}" local FENCE_CLIENT_SECRET="${BASH_REMATCH[3]}" From 775d224e4ffc301e04a78c7878b499caf05d1f0f Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Thu, 4 Apr 2024 13:05:45 -0500 Subject: [PATCH 325/362] Update ecr.sh (#2518) --- gen3/bin/ecr.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index 930202a87..f3f13b993 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -32,6 +32,7 @@ accountList=( 205252583234 885078588865 922467707295 +533267425233 ) principalStr="" From 9c2f09eefbcdc1cc29286481cb0bafac170a4bf1 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Thu, 4 Apr 2024 13:16:43 -0500 Subject: [PATCH 326/362] Update ecr.sh (#2519) --- gen3/bin/ecr.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/gen3/bin/ecr.sh b/gen3/bin/ecr.sh index f3f13b993..36af791ef 100644 --- a/gen3/bin/ecr.sh +++ b/gen3/bin/ecr.sh @@ -33,6 +33,7 @@ accountList=( 885078588865 922467707295 533267425233 +048463324059 ) principalStr="" From 147ea5e0086a0753536a0f2f027471544b638bea Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 15 Apr 2024 06:14:13 -0600 Subject: [PATCH 327/362] fix(sqs-helper): Updated sqs helper script to create more than one workspace GPE-998 (#2295) * fix(sqs-helper): Updated sqs helper script to create more than one workspace * fix(sqs-helper): Updated scripts calling sqs helper to give simpler sqs names * fix(sqs-helper): Updated sqs variables to be consistent --------- Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-audit-service.sh | 2 +- gen3/bin/kube-setup-fence.sh | 2 +- gen3/bin/kube-setup-karpenter.sh | 6 +++--- gen3/bin/sqs.sh | 15 ++++++++------- 4 files changed, 13 insertions(+), 12 deletions(-) diff --git a/gen3/bin/kube-setup-audit-service.sh b/gen3/bin/kube-setup-audit-service.sh index b7565194c..92c70f352 100644 --- a/gen3/bin/kube-setup-audit-service.sh +++ b/gen3/bin/kube-setup-audit-service.sh @@ -65,7 +65,7 @@ EOM } setup_audit_sqs() { - local sqsName="$(gen3 api safe-name audit-sqs)" + local sqsName="audit-sqs" sqsInfo="$(gen3 sqs create-queue-if-not-exist $sqsName)" || exit 1 sqsUrl="$(jq -e -r '.["url"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; } sqsArn="$(jq -e -r '.["arn"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; } diff --git a/gen3/bin/kube-setup-fence.sh b/gen3/bin/kube-setup-fence.sh index 03edabbf4..cc0516c93 100644 --- a/gen3/bin/kube-setup-fence.sh +++ b/gen3/bin/kube-setup-fence.sh @@ -9,7 +9,7 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/lib/kube-setup-init" setup_audit_sqs() { - local sqsName="$(gen3 api safe-name audit-sqs)" + local sqsName="audit-sqs" sqsInfo="$(gen3 sqs create-queue-if-not-exist $sqsName)" || exit 1 sqsUrl="$(jq -e -r '.["url"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; } sqsArn="$(jq -e -r '.["arn"]' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; } diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 2737ed6ee..949c1ccd1 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -31,7 +31,7 @@ gen3_deploy_karpenter() { else karpenter=${karpenter:-v0.22.0} fi - local queue_name="karpenter-sqs-${vpc_name}" + local queue_name="$(gen3 api safe-name karpenter-sqs)" echo '{ "Statement": [ { @@ -202,9 +202,9 @@ gen3_update_karpenter_configs() { } gen3_create_karpenter_sqs_eventbridge() { - local queue_name="karpenter-sqs-${vpc_name}" + local queue_name="$(gen3 api safe-name karpenter-sqs)" local eventbridge_rule_name="karpenter-eventbridge-${vpc_name}" - #gen3 sqs create-queue-if-not-exist $queue_name >> "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" + gen3 sqs create-queue-if-not-exist karpenter-sqs >> "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" local queue_url=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.url') local queue_arn=$(cat "$XDG_RUNTIME_DIR/sqs-${vpc_name}.json" | jq -r '.arn') # Create eventbridge rules diff --git a/gen3/bin/sqs.sh b/gen3/bin/sqs.sh index dccb1ff7b..7448437a0 100644 --- a/gen3/bin/sqs.sh +++ b/gen3/bin/sqs.sh @@ -50,15 +50,15 @@ EOM # @sqsName # gen3_sqs_create_queue() { - local sqsName=$1 - if ! shift || [[ -z "$sqsName" ]]; then - gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_create_queue'" + local serviceName=$1 + if ! shift || [[ -z "$serviceName" ]]; then + gen3_log_err "Must provide 'serviceName' to 'gen3_sqs_create_queue'" return 1 fi + local sqsName="$(gen3 api safe-name $serviceName)" gen3_log_info "Creating SQS '$sqsName'" - local prefix="$(gen3 api safe-name sqs-create)" ( # subshell - do not pollute parent environment - gen3 workon default ${prefix}__sqs 1>&2 + gen3 workon default ${sqsName}__sqs 1>&2 gen3 cd 1>&2 cat << EOF > config.tfvars sqs_name="$sqsName" @@ -76,7 +76,8 @@ EOF # @sqsName # gen3_sqs_create_queue_if_not_exist() { - local sqsName=$1 + local serviceName=$1 + local sqsName="$(gen3 api safe-name $serviceName)" if ! shift || [[ -z "$sqsName" ]]; then gen3_log_err "Must provide 'sqsName' to 'gen3_sqs_create_queue'" return 1 @@ -90,7 +91,7 @@ gen3_sqs_create_queue_if_not_exist() { gen3_log_info "The '$sqsName' SQS already exists" else # create the queue - sqsInfo="$(gen3_sqs_create_queue $sqsName)" || exit 1 + sqsInfo="$(gen3_sqs_create_queue $serviceName)" || exit 1 sqsUrl="$(jq -e -r '.["sqs-url"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-url' from output: $sqsInfo"; exit 1; } sqsArn="$(jq -e -r '.["sqs-arn"].value' <<< "$sqsInfo")" || { echo "Cannot get 'sqs-arn' from output: $sqsInfo"; exit 1; } fi From d55a3862609339149ac9373d708aac6546267618 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 15 Apr 2024 16:34:51 -0600 Subject: [PATCH 328/362] Update web_wildcard_whitelist (#2523) --- files/squid_whitelist/web_wildcard_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index b71ee76c2..1374c5d67 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -11,6 +11,7 @@ .bioconductor.org .bionimbus.org .bitbucket.org +.blob.core.windows.net .bloodpac.org .braincommons.org .bsc.es From 9056d2b7754aeaec60628e5238958ae309494771 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Fri, 19 Apr 2024 10:56:16 -0600 Subject: [PATCH 329/362] adding a cron to check qaplanetv1 for the fenceshib service since it can break revproxy/automation (#2525) --- .../node-monitors/fenceshib-jenkins-test.yaml | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 kube/services/node-monitors/fenceshib-jenkins-test.yaml diff --git a/kube/services/node-monitors/fenceshib-jenkins-test.yaml b/kube/services/node-monitors/fenceshib-jenkins-test.yaml new file mode 100644 index 000000000..e9e27af98 --- /dev/null +++ b/kube/services/node-monitors/fenceshib-jenkins-test.yaml @@ -0,0 +1,40 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: fenceshib-service-check + namespace: default +spec: + schedule: "0 */4 * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: node-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + + fenceshib=$(kubectl get services -A | grep "fenceshib-service" | awk '{print $2}') + + # Check if there are any fenceshib services + if [[ ! -z "$fenceshib" ]]; then + echo "Alert: Service fenceshib-service found with output: $fenceshib" + curl -X POST -H 'Content-type: application/json' --data "{\"text\": \"WARNING: Fenceshib service discovered in qaplanetv1 cluster. This could cause issues with future CI runs. Please delete this service if it is not needed. Run the following in qaplanetv1 to see which namespace it is in: \`kubectl get services -A | grep "fenceshib-service"\`\"}" $SLACK_WEBHOOK_URL + else + echo "Fenceshib Service Not Found" + fi + restartPolicy: OnFailure From 5dd762ee9389b3284872bf66ba46739ca6aaf1c7 Mon Sep 17 00:00:00 2001 From: Andrew Prokhorenkov Date: Fri, 19 Apr 2024 12:35:16 -0500 Subject: [PATCH 330/362] feat: update instance types with newer generation (#2527) --- .../argo-events/workflows/configmap.yaml | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index ae1c16653..8d9045714 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -27,22 +27,48 @@ data: - c6a.4xlarge - c6a.8xlarge - c6a.12xlarge + - c7a.large + - c7a.xlarge + - c7a.2xlarge + - c7a.4xlarge + - c7a.8xlarge + - c7a.12xlarge - c6i.large - c6i.xlarge - c6i.2xlarge - c6i.4xlarge - c6i.8xlarge - c6i.12xlarge + - c7i.large + - c7i.xlarge + - c7i.2xlarge + - c7i.4xlarge + - c7i.8xlarge + - c7i.12xlarge - m6a.2xlarge - m6a.4xlarge - m6a.8xlarge - m6a.12xlarge - m6a.16xlarge + - m6a.24xlarge + - m7a.2xlarge + - m7a.4xlarge + - m7a.8xlarge + - m7a.12xlarge + - m7a.16xlarge + - m7a.24xlarge - m6i.2xlarge - m6i.4xlarge - m6i.8xlarge - m6i.12xlarge - m6i.16xlarge + - m6i.24xlarge + - m7i.2xlarge + - m7i.4xlarge + - m7i.8xlarge + - m7i.12xlarge + - m7i.16xlarge + - m7i.24xlarge taints: - key: role value: WORKFLOW_NAME From b68908ffb6b81e2300843175b14d4efeb0fbc5a4 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Tue, 23 Apr 2024 10:07:47 -0600 Subject: [PATCH 331/362] fix(cronjob-apis): Updated decprecated cronjob apis (#2529) Co-authored-by: Edward Malinowski --- kube/services/jobs/arborist-rm-expired-access-cronjob.yaml | 2 +- kube/services/jobs/covid19-bayes-cronjob.yaml | 2 +- kube/services/jobs/etl-cronjob.yaml | 2 +- kube/services/jobs/fence-visa-update-cronjob.yaml | 2 +- kube/services/jobs/google-delete-expired-access-cronjob.yaml | 2 +- .../jobs/google-delete-expired-service-account-cronjob.yaml | 4 ++-- kube/services/jobs/google-init-proxy-groups-cronjob.yaml | 4 ++-- kube/services/jobs/google-manage-account-access-cronjob.yaml | 4 ++-- kube/services/jobs/google-manage-keys-cronjob.yaml | 4 ++-- .../jobs/google-verify-bucket-access-group-cronjob.yaml | 4 ++-- kube/services/jobs/healthcheck-cronjob.yaml | 2 +- kube/services/jobs/s3sync-cronjob.yaml | 2 +- 12 files changed, 17 insertions(+), 17 deletions(-) diff --git a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml index 29603d27f..a72623736 100644 --- a/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml +++ b/kube/services/jobs/arborist-rm-expired-access-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: arborist-rm-expired-access diff --git a/kube/services/jobs/covid19-bayes-cronjob.yaml b/kube/services/jobs/covid19-bayes-cronjob.yaml index 733c17cf7..01e71bade 100644 --- a/kube/services/jobs/covid19-bayes-cronjob.yaml +++ b/kube/services/jobs/covid19-bayes-cronjob.yaml @@ -1,5 +1,5 @@ # gen3 job run covid19-bayes-cronjob S3_BUCKET -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: covid19-bayes diff --git a/kube/services/jobs/etl-cronjob.yaml b/kube/services/jobs/etl-cronjob.yaml index 463fbfb2e..95b423deb 100644 --- a/kube/services/jobs/etl-cronjob.yaml +++ b/kube/services/jobs/etl-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: etl diff --git a/kube/services/jobs/fence-visa-update-cronjob.yaml b/kube/services/jobs/fence-visa-update-cronjob.yaml index 6c58ef291..eba842ddf 100644 --- a/kube/services/jobs/fence-visa-update-cronjob.yaml +++ b/kube/services/jobs/fence-visa-update-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: fence-visa-update diff --git a/kube/services/jobs/google-delete-expired-access-cronjob.yaml b/kube/services/jobs/google-delete-expired-access-cronjob.yaml index ce485cce3..2b9e4e49a 100644 --- a/kube/services/jobs/google-delete-expired-access-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-access-cronjob.yaml @@ -1,5 +1,5 @@ --- -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: google-delete-expired-access diff --git a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml index eb102f5bf..b40e22624 100644 --- a/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml +++ b/kube/services/jobs/google-delete-expired-service-account-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-delete-expired-service-account diff --git a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml index 499d6cabd..6b4fc10aa 100644 --- a/kube/services/jobs/google-init-proxy-groups-cronjob.yaml +++ b/kube/services/jobs/google-init-proxy-groups-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-init-proxy-groups diff --git a/kube/services/jobs/google-manage-account-access-cronjob.yaml b/kube/services/jobs/google-manage-account-access-cronjob.yaml index 4e796cea0..fd8bba606 100644 --- a/kube/services/jobs/google-manage-account-access-cronjob.yaml +++ b/kube/services/jobs/google-manage-account-access-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-manage-account-access diff --git a/kube/services/jobs/google-manage-keys-cronjob.yaml b/kube/services/jobs/google-manage-keys-cronjob.yaml index ea0bcc45f..eff76d30a 100644 --- a/kube/services/jobs/google-manage-keys-cronjob.yaml +++ b/kube/services/jobs/google-manage-keys-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-manage-keys diff --git a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml index 57981d813..49e83374f 100644 --- a/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml +++ b/kube/services/jobs/google-verify-bucket-access-group-cronjob.yaml @@ -1,6 +1,6 @@ --- -# Note: change to batch/v1beta1 once we bump to k8s 1.8 -apiVersion: batch/v1beta1 +# Note: change to batch/v1 once we bump to k8s 1.8 +apiVersion: batch/v1 kind: CronJob metadata: name: google-verify-bucket-access-group diff --git a/kube/services/jobs/healthcheck-cronjob.yaml b/kube/services/jobs/healthcheck-cronjob.yaml index d79274bb7..1ca71fc8d 100644 --- a/kube/services/jobs/healthcheck-cronjob.yaml +++ b/kube/services/jobs/healthcheck-cronjob.yaml @@ -1,4 +1,4 @@ -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: healthcheck diff --git a/kube/services/jobs/s3sync-cronjob.yaml b/kube/services/jobs/s3sync-cronjob.yaml index f05ab518a..69d66ec3f 100644 --- a/kube/services/jobs/s3sync-cronjob.yaml +++ b/kube/services/jobs/s3sync-cronjob.yaml @@ -5,7 +5,7 @@ #####REQUIRED VARIABLE######## #SOURCE_BUCKET #TARGET_BUCKET -apiVersion: batch/v1beta1 +apiVersion: batch/v1 kind: CronJob metadata: name: s3sync From 40be00d3460af4c133ea0c4bcb675620be56dfce Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Tue, 23 Apr 2024 14:17:55 -0500 Subject: [PATCH 332/362] fix: skip setting up cedar-ingest client cred in ci (#2526) * fix: skip setting up cedar-ingest client cred in ci * fix cleanup --------- Co-authored-by: Hara Prasad --- gen3/bin/kube-setup-cedar-wrapper.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gen3/bin/kube-setup-cedar-wrapper.sh b/gen3/bin/kube-setup-cedar-wrapper.sh index c8f0d03c6..a56bebc40 100644 --- a/gen3/bin/kube-setup-cedar-wrapper.sh +++ b/gen3/bin/kube-setup-cedar-wrapper.sh @@ -60,8 +60,12 @@ if ! g3kubectl get secrets/cedar-g3auto > /dev/null 2>&1; then return 1 fi -gen3_log_info "Checking cedar-client creds" -setup_creds +if [[ -n "$JENKINS_HOME" ]]; then + gen3_log_info "Skipping cedar-client creds setup in non-adminvm environment" +else + gen3_log_info "Checking cedar-client creds" + setup_creds +fi if ! gen3 secrets decode cedar-g3auto cedar_api_key.txt > /dev/null 2>&1; then gen3_log_err "No CEDAR api key present in cedar-g3auto secret, not rolling CEDAR wrapper" From ec4053eb7eb41a12b0aa65aab96343ae03d8fff3 Mon Sep 17 00:00:00 2001 From: Luca Graglia Date: Tue, 30 Apr 2024 14:13:48 -0500 Subject: [PATCH 333/362] Update workon.sh (#2039) Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- gen3/bin/workon.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gen3/bin/workon.sh b/gen3/bin/workon.sh index e7b951d1c..f614cf662 100644 --- a/gen3/bin/workon.sh +++ b/gen3/bin/workon.sh @@ -113,7 +113,7 @@ if [[ ! -f "$bucketCheckFlag" && "$GEN3_FLAVOR" == "AWS" ]]; then } EOM ) - gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" --create-bucket-configuration ‘{“LocationConstraint”:“‘$(aws configure get $GEN3_PROFILE.region)‘“}’ + gen3_aws_run aws s3api create-bucket --acl private --bucket "$GEN3_S3_BUCKET" $([[ $(aws configure get $GEN3_PROFILE.region) = "us-east-1" ]] && echo "" || echo --create-bucket-configuration LocationConstraint="$(aws configure get $GEN3_PROFILE.region)") sleep 5 # Avoid race conditions if gen3_aws_run aws s3api put-bucket-encryption --bucket "$GEN3_S3_BUCKET" --server-side-encryption-configuration "$S3_POLICY"; then touch "$bucketCheckFlag" From 7f56512a802a7ee6579359fe9cbc7f033300376a Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 1 May 2024 14:54:44 -0400 Subject: [PATCH 334/362] Fix/karpenter setup with sed (#2536) * Fixing the sed command in the create karpenter resources job * Fixing some silliness * Please let me blame that on Friday brain * Let's do it * What was I doing before? * Using a more robust method for grabbing workflow and usernames --- .../argo-events/workflows/sensor-created.yaml | 20 ++++++++++++++--- .../karpenter-reconciler-cronjob.yaml | 22 ++++++++++--------- 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/kube/services/argo-events/workflows/sensor-created.yaml b/kube/services/argo-events/workflows/sensor-created.yaml index 4221f5742..9f6de2c83 100644 --- a/kube/services/argo-events/workflows/sensor-created.yaml +++ b/kube/services/argo-events/workflows/sensor-created.yaml @@ -59,12 +59,22 @@ spec: args: - "-c" - | + #!/bin/bash + if [ -z "$PROVISIONER_TEMPLATE" ]; then + PROVISIONER_TEMPLATE="provisioner.yaml" + fi + + if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then + AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" + fi + + if ! kubectl get awsnodetemplate workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - fi if ! kubectl get provisioner workflow-$WORKFLOW_NAME >/dev/null 2>&1; then - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" | kubectl apply -f - + sed -e "s/WORKFLOW_NAME/$WORKFLOW_NAME/" -e "s/GEN3_USERNAME/$GEN3_USERNAME/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - fi env: - name: WORKFLOW_NAME @@ -76,9 +86,13 @@ spec: configMapKeyRef: name: environment key: environment + - name: PROVISIONER_TEMPLATE + value: /manifests/provisioner.yaml + - name: AWSNODETEMPLATE_TEMPLATE + value: /manifests/nodetemplate.yaml volumeMounts: - name: karpenter-templates-volume - mountPath: /home/manifests + mountPath: /manifests volumes: - name: karpenter-templates-volume configMap: diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml index 4f82e9d43..aef5d6c49 100644 --- a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml +++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob.yaml @@ -43,9 +43,7 @@ spec: ENVIRONMENT=$(kubectl -n default get configmap global -o jsonpath="{.data.environment}") - RAW_WORKFLOWS=$(kubectl get workflows -n argo -o yaml) - - WORKFLOWS=$(echo "${RAW_WORKFLOWS}" | yq -r '.items[] | [.metadata.name, .metadata.labels.gen3username] | join(" ")') + WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}') WORKFLOW_ARRAY=() @@ -53,20 +51,24 @@ spec: WORKFLOW_ARRAY+=("$line") done <<< "$WORKFLOWS" + echo $WORKFLOWS + for workflow in "${WORKFLOW_ARRAY[@]}" do workflow_name=$(echo "$workflow" | awk '{print $1}') workflow_user=$(echo "$workflow" | awk '{print $2}') - if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then - echo "No awsnodetemplate found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - - fi + if [ ! -z "$workflow_name" ]; then + if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then + echo "No awsnodetemplate found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - + fi - if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then - echo "No provisioner found for ${workflow_name}, creating one" - sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then + echo "No provisioner found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + fi fi done restartPolicy: OnFailure From 4d346fffe9631ebe31d0297d17a885cc7f598867 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Wed, 1 May 2024 16:15:07 -0400 Subject: [PATCH 335/362] Let's get the va-testing reconciler onto master (#2537) --- ...rpenter-reconciler-cronjob-va-testing.yaml | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml diff --git a/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml new file mode 100644 index 000000000..aaba57b07 --- /dev/null +++ b/kube/services/karpenter-reconciler/karpenter-reconciler-cronjob-va-testing.yaml @@ -0,0 +1,71 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: karpenter-reconciler-cronjob-va-testing + namespace: argo-events +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccount: karpenter-reconciler + volumes: + - name: karpenter-templates-volume + configMap: + name: karpenter-templates + containers: + - name: karpenter-reconciler + image: quay.io/cdis/awshelper + volumeMounts: + - name: karpenter-templates-volume + mountPath: /manifests + env: + - name: PROVISIONER_TEMPLATE + value: /manifests/provisioner.yaml + - name: AWSNODETEMPLATE_TEMPLATE + value: /manifests/nodetemplate.yaml + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + if [ -z "$PROVISIONER_TEMPLATE" ]; then + PROVISIONER_TEMPLATE="provisioner.yaml" + fi + + if [ -z "$AWSNODETEMPLATE_TEMPLATE" ]; then + AWSNODETEMPLATE_TEMPLATE="nodetemplate.yaml" + fi + + ENVIRONMENT=$(kubectl -n va-testing get configmap global -o jsonpath="{.data.environment}") + + WORKFLOWS=$(kubectl get workflows -n argo -o=jsonpath='{range .items[*]}{.metadata.name}{" "}{.metadata.labels.gen3username}{"\n"}') + + WORKFLOW_ARRAY=() + + while IFS= read -r line; do + WORKFLOW_ARRAY+=("$line") + done <<< "$WORKFLOWS" + + for workflow in "${WORKFLOW_ARRAY[@]}" + do + echo "Running loop for workflow: $workflow" + workflow_name=$(echo "$workflow" | awk '{print $1}') + workflow_user=$(echo "$workflow" | awk '{print $2}') + + if ! kubectl get awsnodetemplate workflow-$workflow_name >/dev/null 2>&1; then + echo "No awsnodetemplate found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$AWSNODETEMPLATE_TEMPLATE" | kubectl apply -f - + fi + + if ! kubectl get provisioner workflow-$workflow_name >/dev/null 2>&1; then + echo "No provisioner found for ${workflow_name}, creating one" + sed -e "s/WORKFLOW_NAME/$workflow_name/" -e "s/GEN3_USERNAME/$workflow_user/" -e "s/ENVIRONMENT/$ENVIRONMENT/" "$PROVISIONER_TEMPLATE" | kubectl apply -f - + + fi + done + restartPolicy: OnFailure From 76831781e04480dc78279b56bfd077980e7728cf Mon Sep 17 00:00:00 2001 From: Hara Prasad Date: Wed, 1 May 2024 14:13:08 -0700 Subject: [PATCH 336/362] Roll arborist before indexd (#2535) --- gen3/bin/kube-roll-all.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/gen3/bin/kube-roll-all.sh b/gen3/bin/kube-roll-all.sh index 1dca87c68..744e8e288 100644 --- a/gen3/bin/kube-roll-all.sh +++ b/gen3/bin/kube-roll-all.sh @@ -51,20 +51,20 @@ fi gen3 kube-setup-networkpolicy disable # -# Hopefull core secrets/config in place - start bringing up services +# Hopefully core secrets/config in place - start bringing up services # -if g3k_manifest_lookup .versions.indexd 2> /dev/null; then - gen3 kube-setup-indexd & -else - gen3_log_info "no manifest entry for indexd" -fi - if g3k_manifest_lookup .versions.arborist 2> /dev/null; then gen3 kube-setup-arborist || gen3_log_err "arborist setup failed?" else gen3_log_info "no manifest entry for arborist" fi +if g3k_manifest_lookup .versions.indexd 2> /dev/null; then + gen3 kube-setup-indexd & +else + gen3_log_info "no manifest entry for indexd" +fi + if g3k_manifest_lookup '.versions["audit-service"]' 2> /dev/null; then gen3 kube-setup-audit-service else From 229f9a5281819db415d9929c2c2220ec67f3f700 Mon Sep 17 00:00:00 2001 From: George Thomas <98996322+george42-ctds@users.noreply.github.com> Date: Thu, 2 May 2024 09:40:36 -0700 Subject: [PATCH 337/362] HP-1470 Use cedar instance id for mds (#2532) * (HP-1470): use CEDAR instance id for mds queries * (HP-1470): handle case of negative limit from small total --- .../healdata/heal-cedar-data-ingest.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index e0c4b3c46..7b4c638ab 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -227,24 +227,24 @@ def get_related_studies(serial_num, guid, hostname): returned_records = len(metadata_return["metadata"]["records"]) print(f"Successfully got {returned_records} record(s) from CEDAR directory") for cedar_record in metadata_return["metadata"]["records"]: - # get the appl id from cedar for querying in our MDS - cedar_appl_id = pydash.get( - cedar_record, "metadata_location.nih_application_id" + # get the CEDAR instance id from cedar for querying in our MDS + cedar_instance_id = pydash.get( + cedar_record, "metadata_location.cedar_study_level_metadata_template_instance_ID" ) - if cedar_appl_id is None: - print("This record doesn't have appl_id, skipping...") + if cedar_instance_id is None: + print("This record doesn't have CEDAR instance id, skipping...") continue - # Get the metadata record for the nih_application_id + # Get the metadata record for the CEDAR instance id mds = requests.get( - f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.nih_application_id={cedar_appl_id}&data=true" + f"http://revproxy-service/mds/metadata?gen3_discovery.study_metadata.metadata_location.cedar_study_level_metadata_template_instance_ID={cedar_instance_id}&data=true" ) if mds.status_code == 200: mds_res = mds.json() # the query result key is the record of the metadata. If it doesn't return anything then our query failed. if len(list(mds_res.keys())) == 0 or len(list(mds_res.keys())) > 1: - print("Query returned nothing for", cedar_appl_id, "appl id") + print(f"Query returned nothing for template_instance_ID={cedar_instance_id}&data=true") continue # get the key for our mds record @@ -394,3 +394,6 @@ def get_related_studies(serial_num, guid, hostname): offset = offset + limit if (offset + limit) > total: limit = total - offset + + if limit < 0: + break From 4cb747f3fd7dfc4c17b593dd42f5ffdb7614d035 Mon Sep 17 00:00:00 2001 From: Pauline Ribeyre <4224001+paulineribeyre@users.noreply.github.com> Date: Thu, 2 May 2024 14:31:55 -0500 Subject: [PATCH 338/362] MIDRC-639 DICOM viewer v3: fix prefix in config and port (#2533) --- gen3/bin/kube-setup-dicom.sh | 4 ++-- kube/services/dicom-viewer/dicom-viewer-service.yaml | 2 +- kube/services/ohif-viewer/ohif-viewer-deploy.yaml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/gen3/bin/kube-setup-dicom.sh b/gen3/bin/kube-setup-dicom.sh index 42110eea2..e49060ecb 100644 --- a/gen3/bin/kube-setup-dicom.sh +++ b/gen3/bin/kube-setup-dicom.sh @@ -83,12 +83,12 @@ EOM EOM fi - if g3k_manifest_lookup .versions["dicom-server"] > /dev/null 2>&1; then + if g3k_manifest_lookup '.versions["dicom-server"]' > /dev/null 2>&1; then export DICOM_SERVER_URL="/dicom-server" gen3_log_info "attaching ohif viewer to old dicom-server (orthanc w/ aurora)" fi - if g3k_manifest_lookup .versions["orthanc"] > /dev/null 2>&1; then + if g3k_manifest_lookup .versions.orthanc > /dev/null 2>&1; then export DICOM_SERVER_URL="/orthanc" gen3_log_info "attaching ohif viewer to new dicom-server (orthanc w/ s3)" fi diff --git a/kube/services/dicom-viewer/dicom-viewer-service.yaml b/kube/services/dicom-viewer/dicom-viewer-service.yaml index ea2576584..26f3a21b0 100644 --- a/kube/services/dicom-viewer/dicom-viewer-service.yaml +++ b/kube/services/dicom-viewer/dicom-viewer-service.yaml @@ -12,4 +12,4 @@ spec: nodePort: null name: http type: ClusterIP - \ No newline at end of file + diff --git a/kube/services/ohif-viewer/ohif-viewer-deploy.yaml b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml index fc45434ca..e2df93cd0 100644 --- a/kube/services/ohif-viewer/ohif-viewer-deploy.yaml +++ b/kube/services/ohif-viewer/ohif-viewer-deploy.yaml @@ -86,7 +86,7 @@ spec: periodSeconds: 60 timeoutSeconds: 30 ports: - - containerPort: 80 + - containerPort: 8080 volumeMounts: - name: config-volume-g3auto readOnly: true From 2d3860cfb01d9547c0774a873f38869564a43d1d Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Tue, 7 May 2024 11:04:19 -0400 Subject: [PATCH 339/362] Feat/long running workflow alert (#2538) * Let's see if this works * Fixing typos * Fixing some silliness * Finalizing our workflow monitor * Fixing the branch we point to --- .../workflow-age-monitor/application.yaml | 22 ++++++++ .../argo-workflow-age.yaml | 55 +++++++++++++++++++ kube/services/workflow-age-monitor/auth.yaml | 18 ++++++ 3 files changed, 95 insertions(+) create mode 100644 kube/services/workflow-age-monitor/application.yaml create mode 100644 kube/services/workflow-age-monitor/argo-workflow-age.yaml create mode 100644 kube/services/workflow-age-monitor/auth.yaml diff --git a/kube/services/workflow-age-monitor/application.yaml b/kube/services/workflow-age-monitor/application.yaml new file mode 100644 index 000000000..99798bb2b --- /dev/null +++ b/kube/services/workflow-age-monitor/application.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: argo-workflow-age-monitor-application + namespace: argocd +spec: + destination: + namespace: default + server: https://kubernetes.default.svc + project: default + source: + repoURL: https://github.com/uc-cdis/cloud-automation.git + targetRevision: master + path: kube/services/workflow-age-monitor/ + directory: + exclude: "application.yaml" + syncPolicy: + automated: + prune: true + selfHeal: true + syncOptions: + - CreateNamespace=true diff --git a/kube/services/workflow-age-monitor/argo-workflow-age.yaml b/kube/services/workflow-age-monitor/argo-workflow-age.yaml new file mode 100644 index 000000000..0d0c29115 --- /dev/null +++ b/kube/services/workflow-age-monitor/argo-workflow-age.yaml @@ -0,0 +1,55 @@ +apiVersion: batch/v1 +kind: CronJob +metadata: + name: argo-workflow-age + namespace: default +spec: + schedule: "*/5 * * * *" + jobTemplate: + spec: + template: + metadata: + labels: + app: gen3job + spec: + serviceAccountName: argo-workflow-monitor + containers: + - name: kubectl + image: quay.io/cdis/awshelper + env: + # This is 3 * 3600, or 3 hours + - name: THRESHOLD_TIME + value: "10800" + - name: SLACK_WEBHOOK_URL + valueFrom: + configMapKeyRef: + name: global + key: slack_webhook + + command: ["/bin/bash"] + args: + - "-c" + - | + #!/bin/bash + # Get all workflows with specific label and check their age + kubectl get workflows --all-namespaces -o json | jq -c '.items[] | {name: .metadata.name, creationTimestamp: .metadata.creationTimestamp}' | while read workflow_info; do + WORKFLOW_NAME=$(echo $workflow_info | jq -r '.name') + CREATION_TIMESTAMP=$(echo $workflow_info | jq -r '.creationTimestamp') + + # Convert creation timestamp to Unix Epoch time + CREATION_EPOCH=$(date -d "$CREATION_TIMESTAMP" +%s) + + # Get current Unix Epoch time + CURRENT_EPOCH=$(date +%s) + + # Calculate workflow age in seconds + WORKFLOW_AGE=$(($CURRENT_EPOCH - $CREATION_EPOCH)) + + # Check if workflow age is greater than threshold + if [ "$WORKFLOW_AGE" -gt "$THRESHOLD_TIME" ]; then + echo "Workflow $WORKFLOW_NAME has been running for over $THRESHOLD_TIME seconds, sending an alert" + # Send alert to Slack + curl -X POST -H 'Content-type: application/json' --data "{\"text\":\"WARNING: Workflow \`${WORKFLOW_NAME}\` has been running longer than $THRESHOLD_TIME seconds\"}" $SLACK_WEBHOOK_URL + fi + done + restartPolicy: OnFailure diff --git a/kube/services/workflow-age-monitor/auth.yaml b/kube/services/workflow-age-monitor/auth.yaml new file mode 100644 index 000000000..fb7970a3e --- /dev/null +++ b/kube/services/workflow-age-monitor/auth.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-workflow-monitor + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-workflow-monitor-binding +subjects: + - kind: ServiceAccount + name: argo-workflow-monitor + namespace: default +roleRef: + kind: ClusterRole + name: argo-argo-workflows-view + apiGroup: rbac.authorization.k8s.io From 4c467f571a57d43f82895d71cbdec0f5ba61545e Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Thu, 9 May 2024 08:35:53 -0600 Subject: [PATCH 340/362] Update Jenkinsfile to point to GPE-1309 jenkins-lib (#2542) --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 908c2d01a..0d1f9f34b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,7 +1,7 @@ #!groovy // See 'Loading libraries dynamically' here: https://jenkins.io/doc/book/pipeline/shared-libraries/ -library 'cdis-jenkins-lib@master' +library 'cdis-jenkins-lib@feat/GPE-1309' import org.jenkinsci.plugins.pipeline.modeldefinition.Utils From 01a2b2fe7e0dcdde50bf1a01e1d98594ad46501c Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Fri, 10 May 2024 08:35:01 -0600 Subject: [PATCH 341/362] Update Jenkinsfile (#2545) --- Jenkinsfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 0d1f9f34b..eaf4dd9c0 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,7 +1,7 @@ #!groovy // See 'Loading libraries dynamically' here: https://jenkins.io/doc/book/pipeline/shared-libraries/ -library 'cdis-jenkins-lib@feat/GPE-1309' +library 'cdis-jenkins-lib@feat/master' import org.jenkinsci.plugins.pipeline.modeldefinition.Utils From 7ea1380b5a8dab63c939c39cb8005d9800d41a5b Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 13 May 2024 11:05:41 -0400 Subject: [PATCH 342/362] Adding a purpose label to workflow nodes, so our old node monitoring can catch them (#2546) --- kube/services/argo-events/workflows/configmap.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/kube/services/argo-events/workflows/configmap.yaml b/kube/services/argo-events/workflows/configmap.yaml index 8d9045714..cd82478c2 100644 --- a/kube/services/argo-events/workflows/configmap.yaml +++ b/kube/services/argo-events/workflows/configmap.yaml @@ -75,6 +75,7 @@ data: effect: NoSchedule labels: role: WORKFLOW_NAME + purpose: workflow limits: resources: cpu: 2000 From 6dca75f1dd30a7aca4c2308b45f8cb0829ac473b Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Tue, 14 May 2024 09:28:30 -0500 Subject: [PATCH 343/362] Update web_wildcard_whitelist (#2547) * Update web_wildcard_whitelist * Update Jenkinsfile --- Jenkinsfile | 2 +- files/squid_whitelist/web_wildcard_whitelist | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index eaf4dd9c0..908c2d01a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,7 +1,7 @@ #!groovy // See 'Loading libraries dynamically' here: https://jenkins.io/doc/book/pipeline/shared-libraries/ -library 'cdis-jenkins-lib@feat/master' +library 'cdis-jenkins-lib@master' import org.jenkinsci.plugins.pipeline.modeldefinition.Utils diff --git a/files/squid_whitelist/web_wildcard_whitelist b/files/squid_whitelist/web_wildcard_whitelist index 1374c5d67..1717b4443 100644 --- a/files/squid_whitelist/web_wildcard_whitelist +++ b/files/squid_whitelist/web_wildcard_whitelist @@ -40,6 +40,7 @@ .dockerproject.org .dph.illinois.gov .elasticsearch.org +.eramba.org .erlang-solutions.com .external-secrets.io .extjs.com From ad7d2580f9174e7be8502190d04ccc578a199c24 Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Tue, 14 May 2024 10:52:47 -0600 Subject: [PATCH 344/362] correcting networkpolicy format for ssjdispatcher and sower (#2539) --- kube/services/netpolicy/gen3/services/sower_netpolicy.yaml | 1 - .../netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml | 1 - 2 files changed, 2 deletions(-) diff --git a/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml b/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml index 7ad51caca..93c2de3c3 100644 --- a/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml +++ b/kube/services/netpolicy/gen3/services/sower_netpolicy.yaml @@ -3,7 +3,6 @@ kind: NetworkPolicy metadata: name: netpolicy-sowerjob spec: - spec: podSelector: matchLabels: app: sowerjob diff --git a/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml b/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml index 7b1f85c29..bd6e03f05 100644 --- a/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml +++ b/kube/services/netpolicy/gen3/services/ssjdispatcherjob_netpolicy.yaml @@ -3,7 +3,6 @@ kind: NetworkPolicy metadata: name: netpolicy-ssjdispatcherjob spec: - spec: podSelector: matchLabels: app: ssjdispatcherjob From 132bdd1112d8e948ecd5b5589b9b2803b3c152cc Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Wed, 15 May 2024 18:14:25 -0500 Subject: [PATCH 345/362] pass AGG_MDS_DEFAULT_DATA_DICT_FIELD to mds (#2550) * pass AGG_MDS_DEFAULT_DATA_DICT_FIELD to mds * fix indent --- kube/services/jobs/metadata-aggregate-sync-job.yaml | 6 ++++++ kube/services/metadata/metadata-deploy.yaml | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/kube/services/jobs/metadata-aggregate-sync-job.yaml b/kube/services/jobs/metadata-aggregate-sync-job.yaml index 8ef33532f..7f4043753 100644 --- a/kube/services/jobs/metadata-aggregate-sync-job.yaml +++ b/kube/services/jobs/metadata-aggregate-sync-job.yaml @@ -74,6 +74,12 @@ spec: name: manifest-metadata key: AGG_MDS_NAMESPACE optional: true + - name: AGG_MDS_DEFAULT_DATA_DICT_FIELD + valueFrom: + configMapKeyRef: + name: manifest-metadata + key: AGG_MDS_DEFAULT_DATA_DICT_FIELD + optional: true imagePullPolicy: Always command: ["/bin/sh"] args: diff --git a/kube/services/metadata/metadata-deploy.yaml b/kube/services/metadata/metadata-deploy.yaml index 9bb6ac9c5..72986e795 100644 --- a/kube/services/metadata/metadata-deploy.yaml +++ b/kube/services/metadata/metadata-deploy.yaml @@ -91,6 +91,12 @@ spec: name: manifest-metadata key: AGG_MDS_NAMESPACE optional: true + - name: AGG_MDS_DEFAULT_DATA_DICT_FIELD + valueFrom: + configMapKeyRef: + name: manifest-metadata + key: AGG_MDS_DEFAULT_DATA_DICT_FIELD + optional: true imagePullPolicy: Always livenessProbe: httpGet: From 59bf8609d56713483b60e0e3524ec613c7f935d1 Mon Sep 17 00:00:00 2001 From: Mingfei Shao <2475897+mfshao@users.noreply.github.com> Date: Mon, 20 May 2024 12:20:22 -0500 Subject: [PATCH 346/362] Update heal-cedar-data-ingest.py (#2551) --- files/scripts/healdata/heal-cedar-data-ingest.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/files/scripts/healdata/heal-cedar-data-ingest.py b/files/scripts/healdata/heal-cedar-data-ingest.py index 7b4c638ab..45098400f 100644 --- a/files/scripts/healdata/heal-cedar-data-ingest.py +++ b/files/scripts/healdata/heal-cedar-data-ingest.py @@ -74,7 +74,9 @@ def is_valid_uuid(uuid_to_test, version=4): def update_filter_metadata(metadata_to_update): - filter_metadata = [] + # Retain these from existing filters + save_filters = ["Common Data Elements"] + filter_metadata = [filter for filter in metadata_to_update["advSearchFilters"] if filter["key"] in save_filters] for metadata_field_key, filter_field_key in FILTER_FIELD_MAPPINGS.items(): filter_field_values = pydash.get(metadata_to_update, metadata_field_key) if filter_field_values: @@ -97,7 +99,7 @@ def update_filter_metadata(metadata_to_update): filter_metadata = pydash.uniq(filter_metadata) metadata_to_update["advSearchFilters"] = filter_metadata # Retain these from existing tags - save_tags = ["Data Repository"] + save_tags = ["Data Repository", "Common Data Elements"] tags = [tag for tag in metadata_to_update["tags"] if tag["category"] in save_tags] # Add any new tags from advSearchFilters for f in metadata_to_update["advSearchFilters"]: From 898a023ba9b1c24c7e908f9c3b4d3cc34ecab41a Mon Sep 17 00:00:00 2001 From: EliseCastle23 <109446148+EliseCastle23@users.noreply.github.com> Date: Mon, 20 May 2024 12:07:09 -0600 Subject: [PATCH 347/362] Priority Class and More Resource Requests (#2541) * adding a priority class for aws-es-proxy and adding larger requests for etl-job to test in CI environment * increasing requests and resources for cronjob too * adding quotes * missed that the jobs already had a limit. Fixed the syntax error. * reducing memory --- gen3/bin/kube-setup-aws-es-proxy.sh | 1 + kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml | 1 + .../services/aws-es-proxy/aws-es-proxy-priority-class.yaml | 7 +++++++ kube/services/jobs/etl-cronjob.yaml | 4 +++- kube/services/jobs/etl-job.yaml | 4 +++- 5 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml diff --git a/gen3/bin/kube-setup-aws-es-proxy.sh b/gen3/bin/kube-setup-aws-es-proxy.sh index f13a4d411..986c5bf05 100644 --- a/gen3/bin/kube-setup-aws-es-proxy.sh +++ b/gen3/bin/kube-setup-aws-es-proxy.sh @@ -21,6 +21,7 @@ if g3kubectl get secrets/aws-es-proxy > /dev/null 2>&1; then if ES_ENDPOINT="$(aws es describe-elasticsearch-domains --domain-names ${envname}-gen3-metadata-2 --query "DomainStatusList[*].Endpoints" --output text)" \ && [[ -n "${ES_ENDPOINT}" && -n "${envname}" ]]; then gen3 roll aws-es-proxy GEN3_ES_ENDPOINT "${ES_ENDPOINT}" + g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml" g3kubectl apply -f "${GEN3_HOME}/kube/services/aws-es-proxy/aws-es-proxy-service.yaml" gen3_log_info "kube-setup-aws-es-proxy" "The aws-es-proxy service has been deployed onto the k8s cluster." else diff --git a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml index ad74fc25b..34f18d973 100644 --- a/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml +++ b/kube/services/aws-es-proxy/aws-es-proxy-deploy.yaml @@ -44,6 +44,7 @@ spec: - name: credentials secret: secretName: "aws-es-proxy" + priorityClassName: aws-es-proxy-high-priority containers: - name: esproxy GEN3_AWS-ES-PROXY_IMAGE|-image: quay.io/cdis/aws-es-proxy:0.8-| diff --git a/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml b/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml new file mode 100644 index 000000000..6bd619a22 --- /dev/null +++ b/kube/services/aws-es-proxy/aws-es-proxy-priority-class.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: aws-es-proxy-high-priority +value: 1000000 +globalDefault: false +description: "Priority class for aws-es-proxy service" diff --git a/kube/services/jobs/etl-cronjob.yaml b/kube/services/jobs/etl-cronjob.yaml index 95b423deb..3c3828dac 100644 --- a/kube/services/jobs/etl-cronjob.yaml +++ b/kube/services/jobs/etl-cronjob.yaml @@ -95,8 +95,10 @@ spec: subPath: user.yaml resources: limits: - cpu: 1 + cpu: 2 memory: 10Gi + requests: + cpu: 2 command: ["/bin/bash"] args: - "-c" diff --git a/kube/services/jobs/etl-job.yaml b/kube/services/jobs/etl-job.yaml index 6b9b887ec..266b0410c 100644 --- a/kube/services/jobs/etl-job.yaml +++ b/kube/services/jobs/etl-job.yaml @@ -91,8 +91,10 @@ spec: subPath: user.yaml resources: limits: - cpu: 1 + cpu: 2 memory: 10Gi + requests: + cpu: 2 command: ["/bin/bash" ] args: - "-c" From fb88985f6b50226a2c932167c75f6a4798cb9400 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 20 May 2024 12:09:32 -0600 Subject: [PATCH 348/362] feat(karpenter-upgrade): Upgraded karpenter version support in kube-setup-karpenter (#2530) * feat(karpenter-upgrade): Upgraded karpenter version support in kube-setup-karpenter * Update kube-setup-karpenter.sh * Update kube-setup-karpenter.sh --------- Co-authored-by: Edward Malinowski --- gen3/bin/kube-setup-karpenter.sh | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/gen3/bin/kube-setup-karpenter.sh b/gen3/bin/kube-setup-karpenter.sh index 949c1ccd1..0a743f7ed 100644 --- a/gen3/bin/kube-setup-karpenter.sh +++ b/gen3/bin/kube-setup-karpenter.sh @@ -24,13 +24,15 @@ gen3_deploy_karpenter() { karpenter=$(g3k_config_lookup .global.karpenter_version) fi export clusterversion=`kubectl version -o json | jq -r .serverVersion.minor` - if [ "${clusterversion}" = "25+" ]; then + if [ "${clusterversion}" = "28+" ]; then + karpenter=${karpenter:-v0.32.9} + elif [ "${clusterversion}" = "25+" ]; then karpenter=${karpenter:-v0.27.0} elif [ "${clusterversion}" = "24+" ]; then karpenter=${karpenter:-v0.24.0} else - karpenter=${karpenter:-v0.22.0} - fi + karpenter=${karpenter:-v0.32.9} + fi local queue_name="$(gen3 api safe-name karpenter-sqs)" echo '{ "Statement": [ @@ -38,6 +40,7 @@ gen3_deploy_karpenter() { "Action": [ "ssm:GetParameter", "iam:PassRole", + "iam:*InstanceProfile", "ec2:DescribeImages", "ec2:RunInstances", "ec2:DescribeSubnets", @@ -142,6 +145,7 @@ gen3_deploy_karpenter() { sleep 15 aws eks create-fargate-profile --fargate-profile-name karpenter-profile --cluster-name $vpc_name --pod-execution-role-arn arn:aws:iam::$(aws sts get-caller-identity --output text --query "Account"):role/AmazonEKSFargatePodExecutionRole-${vpc_name} --subnets $subnets --selectors '{"namespace": "karpenter"}' || true gen3_log_info "Installing karpenter using helm" + helm template karpenter-crd oci://public.ecr.aws/karpenter/karpenter-crd --version ${karpenter} --namespace "karpenter" | g3kubectl apply -f - helm upgrade --install karpenter oci://public.ecr.aws/karpenter/karpenter --version ${karpenter} --namespace karpenter --wait \ --set settings.aws.defaultInstanceProfile=${vpc_name}_EKS_workers \ --set settings.aws.clusterEndpoint="${cluster_endpoint}" \ From 9b4e5c889867923355d874dd1ea763a56d12f0dc Mon Sep 17 00:00:00 2001 From: Binam Bajracharya <44302895+BinamB@users.noreply.github.com> Date: Mon, 20 May 2024 13:11:17 -0500 Subject: [PATCH 349/362] fix: Docker/sidecar/Dockerfile to reduce vulnerabilities (#2524) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-ALPINE38-BZIP2-452633 - https://snyk.io/vuln/SNYK-ALPINE38-GD-344573 - https://snyk.io/vuln/SNYK-ALPINE38-GD-344662 - https://snyk.io/vuln/SNYK-ALPINE38-LIBXSLT-344661 - https://snyk.io/vuln/SNYK-ALPINE38-MUSL-458276 Co-authored-by: snyk-bot --- Docker/sidecar/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/sidecar/Dockerfile b/Docker/sidecar/Dockerfile index ad784ba55..5e07ceaf4 100644 --- a/Docker/sidecar/Dockerfile +++ b/Docker/sidecar/Dockerfile @@ -1,4 +1,4 @@ -FROM nginx:1.15.6-alpine +FROM nginx:1-alpine COPY nginx.conf /etc/nginx/nginx.conf COPY uwsgi.conf.template /etc/nginx/gen3.conf.d/uwsgi.conf.template From db6e16cab931ee25a3c92eb7f3161d037e75b7e8 Mon Sep 17 00:00:00 2001 From: Binam Bajracharya <44302895+BinamB@users.noreply.github.com> Date: Mon, 20 May 2024 13:13:11 -0500 Subject: [PATCH 350/362] fix: Docker/nginx-prometheus-exporter-wrapper/Dockerfile to reduce vulnerabilities (#2520) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-ALPINE313-APKTOOLS-1246345 - https://snyk.io/vuln/SNYK-ALPINE313-APKTOOLS-1533754 - https://snyk.io/vuln/SNYK-ALPINE313-OPENSSL-1569448 - https://snyk.io/vuln/SNYK-ALPINE313-OPENSSL-1569448 - https://snyk.io/vuln/SNYK-ALPINE313-ZLIB-2976175 Co-authored-by: snyk-bot --- Docker/nginx-prometheus-exporter-wrapper/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/nginx-prometheus-exporter-wrapper/Dockerfile b/Docker/nginx-prometheus-exporter-wrapper/Dockerfile index 5134ce440..9b883b0ab 100644 --- a/Docker/nginx-prometheus-exporter-wrapper/Dockerfile +++ b/Docker/nginx-prometheus-exporter-wrapper/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.14-alpine as build-deps +FROM golang:1.21.8-alpine as build-deps RUN apk update && apk add --no-cache git gcc curl bash From 332f8715b6bc637aae7715e2786a1a25737c27a6 Mon Sep 17 00:00:00 2001 From: Binam Bajracharya <44302895+BinamB@users.noreply.github.com> Date: Mon, 20 May 2024 13:14:43 -0500 Subject: [PATCH 351/362] fix: package.json & package-lock.json to reduce vulnerabilities (#2513) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-JS-EXPRESS-6474509 Co-authored-by: snyk-bot --- package-lock.json | 1089 ++++++++++++++++++++++++++++++++++++--------- package.json | 2 +- 2 files changed, 869 insertions(+), 222 deletions(-) diff --git a/package-lock.json b/package-lock.json index 69c298911..bd0b13589 100644 --- a/package-lock.json +++ b/package-lock.json @@ -13,7 +13,7 @@ "async": "^3.2.2", "aws-sdk": "^2.814.0", "elasticdump": "^6.84.1", - "express": "^4.17.1", + "express": "^4.19.2", "json-schema": "^0.4.0", "minimatch": "^3.0.5", "minimist": "^1.2.6", @@ -32,7 +32,14 @@ "node": ">= 0.6" } }, - "node_modules/ajv": {}, + "node_modules/accepts/node_modules/negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==", + "engines": { + "node": ">= 0.6" + } + }, "node_modules/ansi-regex": { "version": "6.0.1", "license": "MIT", @@ -47,6 +54,14 @@ "version": "1.1.1", "license": "MIT" }, + "node_modules/asn1": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "dependencies": { + "safer-buffer": "~2.1.0" + } + }, "node_modules/assert-plus": { "version": "1.0.0", "license": "MIT", @@ -116,6 +131,14 @@ ], "license": "MIT" }, + "node_modules/bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "dependencies": { + "tweetnacl": "^0.14.3" + } + }, "node_modules/big.js": { "version": "5.2.2", "license": "MIT", @@ -124,12 +147,12 @@ } }, "node_modules/body-parser": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", - "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", "dependencies": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", @@ -137,7 +160,7 @@ "iconv-lite": "0.4.24", "on-finished": "2.4.1", "qs": "6.11.0", - "raw-body": "2.5.1", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" }, @@ -175,6 +198,42 @@ "node": ">= 0.8" } }, + "node_modules/call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/caseless": { "version": "0.12.0", "license": "Apache-2.0" @@ -204,15 +263,17 @@ } }, "node_modules/content-type": { - "version": "1.0.4", - "license": "MIT", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", "engines": { "node": ">= 0.6" } }, "node_modules/cookie": { - "version": "0.5.0", - "license": "MIT", + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==", "engines": { "node": ">= 0.6" } @@ -221,6 +282,22 @@ "version": "1.0.6", "license": "MIT" }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "node_modules/dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", + "dependencies": { + "assert-plus": "^1.0.0" + }, + "engines": { + "node": ">=0.10" + } + }, "node_modules/debug": { "version": "2.6.9", "license": "MIT", @@ -228,6 +305,22 @@ "ms": "2.0.0" } }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/delay": { "version": "5.0.0", "license": "MIT", @@ -260,6 +353,15 @@ "npm": "1.2.8000 || >= 1.4.16" } }, + "node_modules/ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", + "dependencies": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, "node_modules/ee-first": { "version": "1.1.1", "license": "MIT" @@ -343,6 +445,43 @@ "node": ">= 0.8" } }, + "node_modules/es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "dependencies": { + "get-intrinsic": "^1.2.4" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-define-property/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "engines": { + "node": ">= 0.4" + } + }, "node_modules/escape-html": { "version": "1.0.3", "license": "MIT" @@ -362,16 +501,16 @@ } }, "node_modules/express": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", - "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.1", + "body-parser": "1.20.2", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", @@ -467,6 +606,16 @@ "resolved": "https://registry.npmjs.org/@types/node/-/node-14.18.34.tgz", "integrity": "sha512-hcU9AIQVHmPnmjRK+XUUYlILlr9pQrsqSrwov/JK1pnf3GTQowVBhx54FbvM0AU/VXGH4i3+vgXS5EguR7fysA==" }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, "node_modules/finalhandler": { "version": "1.2.0", "license": "MIT", @@ -490,6 +639,17 @@ "is-callable": "^1.1.3" } }, + "node_modules/for-each/node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/forever-agent": { "version": "0.6.1", "license": "Apache-2.0", @@ -524,10 +684,21 @@ } }, "node_modules/function-bind": { - "version": "1.1.1", - "license": "MIT" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "dependencies": { + "assert-plus": "^1.0.0" + } }, - "node_modules/get-intrinsic": {}, "node_modules/gopd": { "version": "1.0.1", "license": "MIT", @@ -538,6 +709,24 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/gopd/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/har-schema": { "version": "2.0.0", "license": "ISC", @@ -556,14 +745,63 @@ "node": ">=6" } }, - "node_modules/has": { + "node_modules/har-validator/node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { "version": "1.0.3", - "license": "MIT", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dependencies": { - "function-bind": "^1.1.1" + "function-bind": "^1.1.2" }, "engines": { - "node": ">= 0.4.0" + "node": ">= 0.4" } }, "node_modules/http-errors": { @@ -593,6 +831,30 @@ "npm": ">=1.3.7" } }, + "node_modules/http-signature/node_modules/sshpk": { + "version": "1.18.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz", + "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==", + "dependencies": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + }, + "bin": { + "sshpk-conv": "bin/sshpk-conv", + "sshpk-sign": "bin/sshpk-sign", + "sshpk-verify": "bin/sshpk-verify" + }, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/http-status": { "version": "1.5.3", "resolved": "https://registry.npmjs.org/http-status/-/http-status-1.5.3.tgz", @@ -603,7 +865,8 @@ }, "node_modules/iconv-lite": { "version": "0.4.24", - "license": "MIT", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "dependencies": { "safer-buffer": ">= 2.1.2 < 3" }, @@ -626,7 +889,6 @@ "node": ">=10" } }, - "node_modules/ip-address": {}, "node_modules/ipaddr.js": { "version": "1.9.1", "license": "MIT", @@ -634,7 +896,6 @@ "node": ">= 0.10" } }, - "node_modules/is-callable": {}, "node_modules/is-typedarray": { "version": "1.0.0", "license": "MIT" @@ -655,11 +916,21 @@ "node": ">= 0.6.0" } }, + "node_modules/jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==" + }, "node_modules/json-schema": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, "node_modules/json-stringify-safe": { "version": "5.0.1", "license": "ISC" @@ -742,7 +1013,8 @@ }, "node_modules/media-typer": { "version": "0.3.0", - "license": "MIT", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==", "engines": { "node": ">= 0.6" } @@ -775,7 +1047,6 @@ "node": ">=4" } }, - "node_modules/mime-db": {}, "node_modules/mime-types": { "version": "2.1.35", "license": "MIT", @@ -786,6 +1057,14 @@ "node": ">= 0.6" } }, + "node_modules/mime-types/node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, "node_modules/minimatch": { "version": "3.1.2", "license": "ISC", @@ -808,7 +1087,6 @@ "version": "2.0.0", "license": "MIT" }, - "node_modules/negotiator": {}, "node_modules/oauth-sign": { "version": "0.9.0", "license": "Apache-2.0", @@ -816,6 +1094,14 @@ "node": "*" } }, + "node_modules/object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/on-finished": { "version": "2.4.1", "license": "MIT", @@ -878,6 +1164,11 @@ "version": "2.1.0", "license": "MIT" }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, "node_modules/proxy-addr": { "version": "2.0.7", "license": "MIT", @@ -889,7 +1180,6 @@ "node": ">= 0.10" } }, - "node_modules/psl": {}, "node_modules/punycode": { "version": "2.1.1", "license": "MIT", @@ -911,63 +1201,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/qs/node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/qs/node_modules/get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/qs/node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/qs/node_modules/object-inspect": { - "version": "1.12.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", - "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/qs/node_modules/side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "dependencies": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/querystring": { "version": "0.2.0", "engines": { @@ -982,8 +1215,9 @@ } }, "node_modules/raw-body": { - "version": "2.5.1", - "license": "MIT", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "dependencies": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -994,7 +1228,6 @@ "node": ">= 0.8" } }, - "node_modules/readable-stream": {}, "node_modules/request": { "version": "2.88.2", "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", @@ -1066,6 +1299,25 @@ "readable-stream": "^2.3.0" } }, + "node_modules/s3-stream-upload/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/s3-stream-upload/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, "node_modules/s3signed": { "version": "0.1.0", "license": "ISC", @@ -1107,7 +1359,8 @@ }, "node_modules/safer-buffer": { "version": "2.1.2", - "license": "MIT" + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "node_modules/sax": { "version": "1.2.1", @@ -1160,10 +1413,79 @@ "node": ">= 0.8.0" } }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/set-function-length/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/setprototypeof": { "version": "1.2.0", "license": "ISC" }, + "node_modules/side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "dependencies": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/side-channel/node_modules/get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/socks5-client": { "version": "1.2.8", "license": "MIT", @@ -1174,6 +1496,24 @@ "node": ">= 6.4.0" } }, + "node_modules/socks5-client/node_modules/ip-address": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-6.1.0.tgz", + "integrity": "sha512-u9YYtb1p2fWSbzpKmZ/b3QXWA+diRYPxc2c4y5lFB/MMk5WZ7wNZv8S3CFcIGVJ5XtlaCAl/FQy/D3eQ2XtdOA==", + "dependencies": { + "jsbn": "1.1.0", + "lodash": "^4.17.15", + "sprintf-js": "1.1.2" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/socks5-client/node_modules/jsbn": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==" + }, "node_modules/socks5-http-client": { "version": "1.0.4", "license": "MIT", @@ -1194,7 +1534,11 @@ "node": ">= 6.4.0" } }, - "node_modules/sshpk": {}, + "node_modules/sprintf-js": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", + "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==" + }, "node_modules/statuses": { "version": "2.0.1", "license": "MIT", @@ -1202,6 +1546,19 @@ "node": ">= 0.8" } }, + "node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/string_decoder/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, "node_modules/through": { "version": "2.3.8", "license": "MIT" @@ -1224,6 +1581,11 @@ "node": ">=0.8" } }, + "node_modules/tough-cookie/node_modules/psl": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", + "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==" + }, "node_modules/tunnel-agent": { "version": "0.6.0", "license": "Apache-2.0", @@ -1234,9 +1596,15 @@ "node": "*" } }, + "node_modules/tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" + }, "node_modules/type-is": { "version": "1.6.18", - "license": "MIT", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", "dependencies": { "media-typer": "0.3.0", "mime-types": "~2.1.24" @@ -1252,6 +1620,14 @@ "node": ">= 0.8" } }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dependencies": { + "punycode": "^2.1.0" + } + }, "node_modules/url": { "version": "0.10.3", "license": "MIT", @@ -1276,6 +1652,11 @@ "which-typed-array": "^1.1.2" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, "node_modules/util/node_modules/available-typed-arrays": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", @@ -1287,42 +1668,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/util/node_modules/call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "dependencies": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/util/node_modules/get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "dependencies": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/util/node_modules/has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/util/node_modules/has-tostringtag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", @@ -1463,15 +1808,29 @@ "requires": { "mime-types": "~2.1.34", "negotiator": "0.6.3" + }, + "dependencies": { + "negotiator": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", + "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==" + } } }, - "ajv": {}, "ansi-regex": { "version": "6.0.1" }, "array-flatten": { "version": "1.1.1" }, + "asn1": { + "version": "0.2.6", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.6.tgz", + "integrity": "sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==", + "requires": { + "safer-buffer": "~2.1.0" + } + }, "assert-plus": { "version": "1.0.0" }, @@ -1512,16 +1871,24 @@ "base64-js": { "version": "1.5.1" }, + "bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==", + "requires": { + "tweetnacl": "^0.14.3" + } + }, "big.js": { "version": "5.2.2" }, "body-parser": { - "version": "1.20.1", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.1.tgz", - "integrity": "sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==", + "version": "1.20.2", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.2.tgz", + "integrity": "sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==", "requires": { "bytes": "3.1.2", - "content-type": "~1.0.4", + "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", @@ -1529,7 +1896,7 @@ "iconv-lite": "0.4.24", "on-finished": "2.4.1", "qs": "6.11.0", - "raw-body": "2.5.1", + "raw-body": "2.5.2", "type-is": "~1.6.18", "unpipe": "1.0.0" } @@ -1557,6 +1924,32 @@ "bytes": { "version": "3.1.2" }, + "call-bind": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz", + "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==", + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.1" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } + } + }, "caseless": { "version": "0.12.0" }, @@ -1576,20 +1969,47 @@ } }, "content-type": { - "version": "1.0.4" + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", + "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==" }, "cookie": { - "version": "0.5.0" + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==" }, "cookie-signature": { "version": "1.0.6" }, + "core-util-is": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==" + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==", + "requires": { + "assert-plus": "^1.0.0" + } + }, "debug": { "version": "2.6.9", "requires": { "ms": "2.0.0" } }, + "define-data-property": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "requires": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + } + }, "delay": { "version": "5.0.0" }, @@ -1602,6 +2022,15 @@ "destroy": { "version": "1.2.0" }, + "ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==", + "requires": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, "ee-first": { "version": "1.1.1" }, @@ -1668,6 +2097,33 @@ "encodeurl": { "version": "1.0.2" }, + "es-define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz", + "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==", + "requires": { + "get-intrinsic": "^1.2.4" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } + } + }, + "es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==" + }, "escape-html": { "version": "1.0.3" }, @@ -1678,16 +2134,16 @@ "version": "1.1.1" }, "express": { - "version": "4.18.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.18.2.tgz", - "integrity": "sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==", + "version": "4.19.2", + "resolved": "https://registry.npmjs.org/express/-/express-4.19.2.tgz", + "integrity": "sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==", "requires": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.1", + "body-parser": "1.20.2", "content-disposition": "0.5.4", "content-type": "~1.0.4", - "cookie": "0.5.0", + "cookie": "0.6.0", "cookie-signature": "1.0.6", "debug": "2.6.9", "depd": "2.0.0", @@ -1770,6 +2226,16 @@ } } }, + "fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + }, + "fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" + }, "finalhandler": { "version": "1.2.0", "requires": { @@ -1786,6 +2252,13 @@ "version": "0.3.3", "requires": { "is-callable": "^1.1.3" + }, + "dependencies": { + "is-callable": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==" + } } }, "forever-agent": { @@ -1806,13 +2279,36 @@ "version": "0.5.2" }, "function-bind": { - "version": "1.1.1" + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==" + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==", + "requires": { + "assert-plus": "^1.0.0" + } }, - "get-intrinsic": {}, "gopd": { "version": "1.0.1", "requires": { "get-intrinsic": "^1.1.3" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } } }, "har-schema": { @@ -1823,12 +2319,45 @@ "requires": { "ajv": "^6.12.3", "har-schema": "^2.0.0" + }, + "dependencies": { + "ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "requires": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + } + } } }, - "has": { + "has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "requires": { + "es-define-property": "^1.0.0" + } + }, + "has-proto": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.3.tgz", + "integrity": "sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==" + }, + "has-symbols": { "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" + }, + "hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "requires": { - "function-bind": "^1.1.1" + "function-bind": "^1.1.2" } }, "http-errors": { @@ -1847,6 +2376,24 @@ "assert-plus": "^1.0.0", "jsprim": "^1.2.2", "sshpk": "^1.7.0" + }, + "dependencies": { + "sshpk": { + "version": "1.18.0", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.18.0.tgz", + "integrity": "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==", + "requires": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + } + } } }, "http-status": { @@ -1856,6 +2403,8 @@ }, "iconv-lite": { "version": "0.4.24", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", "requires": { "safer-buffer": ">= 2.1.2 < 3" } @@ -1869,11 +2418,9 @@ "ini": { "version": "2.0.0" }, - "ip-address": {}, "ipaddr.js": { "version": "1.9.1" }, - "is-callable": {}, "is-typedarray": { "version": "1.0.0" }, @@ -1888,11 +2435,21 @@ "resolved": "https://registry.npmjs.org/jmespath/-/jmespath-0.16.0.tgz", "integrity": "sha512-9FzQjJ7MATs1tSpnco1K6ayiYE3figslrXA72G2HQ/n76RzvYlofyi5QM+iX4YRs/pu3yzxlVQSST23+dMDknw==" }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==" + }, "json-schema": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz", "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==" }, + "json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" + }, "json-stringify-safe": { "version": "5.0.1" }, @@ -1950,7 +2507,9 @@ "integrity": "sha512-RicKUuLwZVNZ6ZdJHgIZnSeA05p8qWc5NW0uR96mpPIjN9WDLUg9+kj1esQU1GkPn9iLZVKatSQK5gyiaFHgJA==" }, "media-typer": { - "version": "0.3.0" + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", + "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==" }, "merge-descriptors": { "version": "1.0.1" @@ -1964,11 +2523,17 @@ "mime": { "version": "1.6.0" }, - "mime-db": {}, "mime-types": { "version": "2.1.35", "requires": { "mime-db": "1.52.0" + }, + "dependencies": { + "mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==" + } } }, "minimatch": { @@ -1985,10 +2550,14 @@ "ms": { "version": "2.0.0" }, - "negotiator": {}, "oauth-sign": { "version": "0.9.0" }, + "object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==" + }, "on-finished": { "version": "2.4.1", "requires": { @@ -2029,6 +2598,11 @@ "performance-now": { "version": "2.1.0" }, + "process-nextick-args": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==" + }, "proxy-addr": { "version": "2.0.7", "requires": { @@ -2036,7 +2610,6 @@ "ipaddr.js": "1.9.1" } }, - "psl": {}, "punycode": { "version": "2.1.1" }, @@ -2046,47 +2619,6 @@ "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==", "requires": { "side-channel": "^1.0.4" - }, - "dependencies": { - "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - } - }, - "has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" - }, - "object-inspect": { - "version": "1.12.2", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.12.2.tgz", - "integrity": "sha512-z+cPxW0QGUp0mcqcsgQyLVRDoXFQbXOwBaqyF7VIgI4TWNQsDHrBpUQslRmIfAoYWdYzs6UlKJtB2XJpTaNSpQ==" - }, - "side-channel": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", - "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", - "requires": { - "call-bind": "^1.0.0", - "get-intrinsic": "^1.0.2", - "object-inspect": "^1.9.0" - } - } } }, "querystring": { @@ -2096,7 +2628,9 @@ "version": "1.2.1" }, "raw-body": { - "version": "2.5.1", + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", + "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", "requires": { "bytes": "3.1.2", "http-errors": "2.0.0", @@ -2104,7 +2638,6 @@ "unpipe": "1.0.0" } }, - "readable-stream": {}, "request": { "version": "2.88.2", "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", @@ -2158,6 +2691,27 @@ "requires": { "buffer-queue": "~1.0.0", "readable-stream": "^2.3.0" + }, + "dependencies": { + "readable-stream": { + "version": "2.3.8", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + } } }, "s3signed": { @@ -2177,7 +2731,9 @@ "version": "5.2.1" }, "safer-buffer": { - "version": "2.1.2" + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" }, "sax": { "version": "1.2.1" @@ -2219,13 +2775,82 @@ "send": "0.18.0" } }, + "set-function-length": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "requires": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } + } + }, "setprototypeof": { "version": "1.2.0" }, + "side-channel": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz", + "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==", + "requires": { + "call-bind": "^1.0.7", + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.4", + "object-inspect": "^1.13.1" + }, + "dependencies": { + "get-intrinsic": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz", + "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==", + "requires": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + } + } + } + }, "socks5-client": { "version": "1.2.8", "requires": { "ip-address": "~6.1.0" + }, + "dependencies": { + "ip-address": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ip-address/-/ip-address-6.1.0.tgz", + "integrity": "sha512-u9YYtb1p2fWSbzpKmZ/b3QXWA+diRYPxc2c4y5lFB/MMk5WZ7wNZv8S3CFcIGVJ5XtlaCAl/FQy/D3eQ2XtdOA==", + "requires": { + "jsbn": "1.1.0", + "lodash": "^4.17.15", + "sprintf-js": "1.1.2" + } + }, + "jsbn": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-1.1.0.tgz", + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==" + } } }, "socks5-http-client": { @@ -2240,10 +2865,29 @@ "socks5-client": "~1.2.3" } }, - "sshpk": {}, + "sprintf-js": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.2.tgz", + "integrity": "sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==" + }, "statuses": { "version": "2.0.1" }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + }, + "dependencies": { + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + } + } + }, "through": { "version": "2.3.8" }, @@ -2255,6 +2899,13 @@ "requires": { "psl": "^1.1.28", "punycode": "^2.1.1" + }, + "dependencies": { + "psl": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.9.0.tgz", + "integrity": "sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==" + } } }, "tunnel-agent": { @@ -2263,8 +2914,15 @@ "safe-buffer": "^5.0.1" } }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==" + }, "type-is": { "version": "1.6.18", + "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", + "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==", "requires": { "media-typer": "0.3.0", "mime-types": "~2.1.24" @@ -2273,6 +2931,14 @@ "unpipe": { "version": "1.0.0" }, + "uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "requires": { + "punycode": "^2.1.0" + } + }, "url": { "version": "0.10.3", "requires": { @@ -2302,30 +2968,6 @@ "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz", "integrity": "sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==" }, - "call-bind": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", - "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", - "requires": { - "function-bind": "^1.1.1", - "get-intrinsic": "^1.0.2" - } - }, - "get-intrinsic": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.1.3.tgz", - "integrity": "sha512-QJVz1Tj7MS099PevUG5jvnt9tSkXN8K14dxQlikJuPt4uD9hHAHjLyLBiLR5zELelBdD9QNRAXZzsJx0WaDL9A==", - "requires": { - "function-bind": "^1.1.1", - "has": "^1.0.3", - "has-symbols": "^1.0.3" - } - }, - "has-symbols": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", - "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==" - }, "has-tostringtag": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.0.tgz", @@ -2378,6 +3020,11 @@ } } }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, "utils-merge": { "version": "1.0.1" }, diff --git a/package.json b/package.json index fd2761a59..716d9a161 100644 --- a/package.json +++ b/package.json @@ -11,7 +11,7 @@ "async": "^3.2.2", "aws-sdk": "^2.814.0", "elasticdump": "^6.84.1", - "express": "^4.17.1", + "express": "^4.19.2", "json-schema": "^0.4.0", "minimatch": "^3.0.5", "minimist": "^1.2.6", From a1092f6b3827fae8ff996ff4fa1b05c03ac3427b Mon Sep 17 00:00:00 2001 From: Binam Bajracharya <44302895+BinamB@users.noreply.github.com> Date: Mon, 20 May 2024 13:16:04 -0500 Subject: [PATCH 352/362] fix: Docker/python-nginx/python2.7-alpine3.7/Dockerfile to reduce vulnerabilities (#2522) The following vulnerabilities are fixed with an upgrade: - https://snyk.io/vuln/SNYK-ALPINE37-MUSL-458286 - https://snyk.io/vuln/SNYK-ALPINE37-MUSL-458286 Co-authored-by: snyk-bot --- Docker/python-nginx/python2.7-alpine3.7/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docker/python-nginx/python2.7-alpine3.7/Dockerfile b/Docker/python-nginx/python2.7-alpine3.7/Dockerfile index 651bc1e7e..c4a934df5 100644 --- a/Docker/python-nginx/python2.7-alpine3.7/Dockerfile +++ b/Docker/python-nginx/python2.7-alpine3.7/Dockerfile @@ -1,6 +1,6 @@ # python2.7 microservice base image -FROM alpine:3.7 +FROM alpine:3.16.9 ENV DEBIAN_FRONTEND=noninteractive From 4c93873a7e0f7d4808b1de3de0d970d0e13d9fc4 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 20 May 2024 13:17:05 -0500 Subject: [PATCH 353/362] OpenVPN Updates (#2492) * Add ubuntu20 vpn scripts * Try to add support for AL23 vpn * Try to add support for AL23 vpn * Try to add support for AL2 vpn * Try to add support for AL2 vpn * Try to add support for AL2 vpn * Try to add support for AL2 vpn * Try to add support for AL2 vpn * Try to add support for AL2 vpn * Try to add support for AL2 vpn * Try to add support for AL2 vpn * Try to add support for AL2 vpn * Try to add support for AL2 vpn * Add whitelist for snap to work * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 * Add support for openvpn on AL2 --- .../openvpn_management_scripts/create_ovpn.sh | 4 +- .../create_seperated_vpn_zip.sh | 5 +- .../create_vpn_user.sh | 13 +- .../install_ovpn.sh | 14 +- .../reset_totp_token.sh | 12 +- .../openvpn_management_scripts/revoke_user.sh | 13 +- .../openvpn_management_scripts/send_email.sh | 2 +- .../templates/network_tweaks.sh.template | 2 + .../templates/openvpn.conf.template | 16 +- .../templates/settings.sh.template | 5 +- .../templates/vars.template | 92 +-- flavors/vpn_nlb_central/vpnvm_new.sh | 533 ++++++++++++++++++ 12 files changed, 598 insertions(+), 113 deletions(-) create mode 100644 flavors/vpn_nlb_central/vpnvm_new.sh diff --git a/files/openvpn_management_scripts/create_ovpn.sh b/files/openvpn_management_scripts/create_ovpn.sh index 4e6ba7bf5..4d351464b 100755 --- a/files/openvpn_management_scripts/create_ovpn.sh +++ b/files/openvpn_management_scripts/create_ovpn.sh @@ -29,8 +29,8 @@ set -e set -u -USER_CERT_PATH="$KEY_PATH/$1.crt" -USER_KEY_PATH="$KEY_PATH/$1.key" +USER_CERT_PATH="$KEY_PATH/issued/$1.crt" +USER_KEY_PATH="$KEY_PATH/private/$1.key" #HEADER diff --git a/files/openvpn_management_scripts/create_seperated_vpn_zip.sh b/files/openvpn_management_scripts/create_seperated_vpn_zip.sh index 1794a3b69..c7ac6ce3a 100755 --- a/files/openvpn_management_scripts/create_seperated_vpn_zip.sh +++ b/files/openvpn_management_scripts/create_seperated_vpn_zip.sh @@ -30,8 +30,8 @@ username=${username// /_} # now, clean out anything that's not alphanumeric or an underscore username=${username//[^a-zA-Z0-9_-.]/} -USER_CERT_PATH="$KEY_PATH/$1.crt" -USER_KEY_PATH="$KEY_PATH/$1.key" +USER_CERT_PATH="$KEY_PATH/issued/$1.crt" +USER_KEY_PATH="$KEY_PATH/private/$1.key" #make a temp dir TEMP_NAME="$username-$CLOUD_NAME-seperated" @@ -47,6 +47,7 @@ cp $USER_KEY_PATH $TEMP_DIR/client.key #This is because EXTHOST is a defined variable in the template while read r; do eval echo $r; done < $TEMPLATE_DIR/client_ovpn_seperate.settings >> $TEMP_DIR/${username}-${CLOUD_NAME}.ovpn +mkdir -p $KEY_DIR/ovpn_files_seperated tar -C $TEMP_DIR/../ -zcvf $KEY_DIR/ovpn_files_seperated/${username}-${CLOUD_NAME}-seperated.tgz $TEMP_NAME echo -e "Exiting ${BOLD}$_${CLEAR}" diff --git a/files/openvpn_management_scripts/create_vpn_user.sh b/files/openvpn_management_scripts/create_vpn_user.sh index 2f3ef406b..39be17fcb 100755 --- a/files/openvpn_management_scripts/create_vpn_user.sh +++ b/files/openvpn_management_scripts/create_vpn_user.sh @@ -49,13 +49,16 @@ export KEY_EMAIL=$email export KEY_ALTNAMES="DNS:${KEY_CN}" #This create the key's for the road warrior -echo -e "running ${YELLOW} build-batch-key" -build-key-batch $username &>/dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR +echo -e "running ${YELLOW} easyrsa build-client-full" +( + cd $EASYRSA_PATH + easyrsa build-client-full $username nopass &>/dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR +) #&& echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR -echo "Backup certs so we can revoke them if ever needed" -[ -d $KEY_DIR/user_certs/ ] || mkdir $KEY_DIR/user_certs/ -cp $KEY_DIR/$username.crt $KEY_DIR/user_certs/$username.crt-$(date +%F-%T) && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR +# echo "Backup certs so we can revoke them if ever needed" +# [ -d $KEY_DIR/user_certs/ ] || mkdir $KEY_DIR/user_certs/ +# cp $KEY_DIR/$username.crt $KEY_DIR/user_certs/$username.crt-$(date +%F-%T) && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR echo "Create the OVPN file for $username" $VPN_BIN_ROOT/create_ovpn.sh $KEY_CN $KEY_EMAIL > $KEY_DIR/ovpn_files/${username}-${CLOUD_NAME}.ovpn 2> /dev/null && echo -e "${GREEN}success!" || echo -e "${RED}failure";echo -e $CLEAR diff --git a/files/openvpn_management_scripts/install_ovpn.sh b/files/openvpn_management_scripts/install_ovpn.sh index 795ac17f2..4250d2ca2 100644 --- a/files/openvpn_management_scripts/install_ovpn.sh +++ b/files/openvpn_management_scripts/install_ovpn.sh @@ -12,13 +12,13 @@ VARS_PATH="$EASYRSA_PATH/vars" #EASY-RSA Vars - KEY_SIZE=4096 - COUNTRY="US" - STATE="IL" - CITY="Chicago" - ORG="CDIS" - EMAIL='support\@datacommons.io' - KEY_EXPIRE=365 +KEY_SIZE=4096 +COUNTRY="US" +STATE="IL" +CITY="Chicago" +ORG="CDIS" +EMAIL='support\@datacommons.io' +KEY_EXPIRE=365 #OpenVPN diff --git a/files/openvpn_management_scripts/reset_totp_token.sh b/files/openvpn_management_scripts/reset_totp_token.sh index b844af8f2..e937876a2 100755 --- a/files/openvpn_management_scripts/reset_totp_token.sh +++ b/files/openvpn_management_scripts/reset_totp_token.sh @@ -40,11 +40,15 @@ update_password_file() { } generate_qr_code() { - uuid=$(uuidgen) - qrcode_out=/var/www/qrcode/${uuid}.svg + mkdir -p /etc/openvpn/pki/qrcodes + qrcode_out=/etc/openvpn/pki/qrcodes/${vpn_username}.png string=$( python -c "import pyotp; print( pyotp.totp.TOTP('$totp_secret').provisioning_uri('$vpn_username', issuer_name='$CLOUD_NAME') )" ) - $( python -c "import pyqrcode; pyqrcode.create('$string').svg('${qrcode_out}', scale=8)" ) - vpn_creds_url="https://${FQDN}/$uuid.svg" + $( python -c "import qrcode; qrcode.make('$string').save('${qrcode_out}')" ) + # vpn_creds_url="https://${FQDN}/$uuid.svg" + s3Path="s3://${S3BUCKET}/qrcodes/${vpn_username}.png" + aws s3 cp ${qrcode_out} ${s3Path} + signedUrl="$(aws s3 presign "$s3Path" --expires-in "$((60*60*48))")" + vpn_creds_url=${signedUrl} } print_info() { diff --git a/files/openvpn_management_scripts/revoke_user.sh b/files/openvpn_management_scripts/revoke_user.sh index 0ffe5c364..89d102f38 100755 --- a/files/openvpn_management_scripts/revoke_user.sh +++ b/files/openvpn_management_scripts/revoke_user.sh @@ -25,18 +25,15 @@ set -e username=${1} -#Source the settings for EASY RSA -source $EASYRSA_PATH/vars #Override exports export KEY_CN=$username -set +e -#revoke-full $username || echo -e "${RED}${BOLD}${BLINK}FAILED TO REVOKE ${username}${CLEAR}" -revoke-full $username -#Apparently it doesn't exist like I expected, and says failed even when it succeeded. - -set -e +( + cd $EASYRSA_PATH + ./easyrsa revoke $username + ./easyrsa gen-crl +) sed -i "/${username},/d" $USER_PW_FILE || echo -e "${RED}${BOLD}${BLINK}Failed to remove $username from file ${USER_PW_FILE}${CLEAR}" /etc/openvpn/bin/push_to_s3.sh diff --git a/files/openvpn_management_scripts/send_email.sh b/files/openvpn_management_scripts/send_email.sh index 38ec6651a..0686af206 100755 --- a/files/openvpn_management_scripts/send_email.sh +++ b/files/openvpn_management_scripts/send_email.sh @@ -14,7 +14,7 @@ RED="\033[31m" echo -e "Entering ${BOLD}$_${CLEAR}" -S3BUCKET=WHICHVPN +export S3BUCKET=WHICHVPN if [ "${1}" == "" ] then diff --git a/files/openvpn_management_scripts/templates/network_tweaks.sh.template b/files/openvpn_management_scripts/templates/network_tweaks.sh.template index a137a8c6f..1caa8c36a 100644 --- a/files/openvpn_management_scripts/templates/network_tweaks.sh.template +++ b/files/openvpn_management_scripts/templates/network_tweaks.sh.template @@ -14,3 +14,5 @@ iptables -I FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT # Masquerade iptables -t nat -A POSTROUTING -s #VPN_SUBNET# -d #VM_SUBNET# -o $vpnserver_int -j MASQUERADE echo 1 > /proc/sys/net/ipv4/ip_forward + +service iptables save diff --git a/files/openvpn_management_scripts/templates/openvpn.conf.template b/files/openvpn_management_scripts/templates/openvpn.conf.template index d539015fe..7e692113e 100644 --- a/files/openvpn_management_scripts/templates/openvpn.conf.template +++ b/files/openvpn_management_scripts/templates/openvpn.conf.template @@ -10,16 +10,16 @@ persist-key persist-tun #certificates -ca easy-rsa/keys/ca.crt -cert easy-rsa/keys/#FQDN#.crt -key easy-rsa/keys/#FQDN#.key # This file should be kept secret -dh easy-rsa/keys/dh4096.pem -tls-auth easy-rsa/keys/ta.key 0 # This file is secret -crl-verify easy-rsa/keys/crl.pem # Revocation files +ca /etc/openvpn/easy-rsa/pki/ca.crt +cert /etc/openvpn/easy-rsa/pki/issued/#FQDN#.crt +key /etc/openvpn/easy-rsa/pki/private/#FQDN#.key # This file should be kept secret +dh /etc/openvpn/easy-rsa/pki/dh.pem +tls-auth /etc/openvpn/easy-rsa/pki/ta.key 0 # This file is secret +crl-verify /etc/openvpn/easy-rsa/pki/crl.pem # Revocation files #Password script -auth-user-pass-verify bin/auth-user-pass-verify.sh via-env -script-security 3 execve +auth-user-pass-verify /etc/openvpn/bin/auth-user-pass-verify.sh via-env +script-security 3 # execve #Cipher suite cipher AES-256-CBC diff --git a/files/openvpn_management_scripts/templates/settings.sh.template b/files/openvpn_management_scripts/templates/settings.sh.template index 2d5f46ef6..c58e8b98c 100644 --- a/files/openvpn_management_scripts/templates/settings.sh.template +++ b/files/openvpn_management_scripts/templates/settings.sh.template @@ -1,6 +1,7 @@ export VPN_SETTINGS_LOADED="1" export CLOUD_NAME='#CLOUD_NAME#' export FQDN="#FQDN#" +export EXTHOST='#CLOUD_NAME#.planx-pla.net' ## EXTHOST is set in the easy-rsa/vars env settings. I think these values have to maych so removing from here #sendemail vars @@ -28,7 +29,7 @@ export OPENVPN_MY_BIN="/etc/openvpn/bin" #CDIS OpenVPN scripts contants export TEMPLATE_DIR="/etc/openvpn/bin/templates" -export KEY_PATH="/etc/openvpn/easy-rsa/keys/" +export KEY_PATH="/etc/openvpn/easy-rsa/pki/" export CA_PATH="$KEY_PATH/ca.crt" export TA_KEY_PATH="$KEY_PATH/ta.key" export ARCHIVE_CERT_DIR="$KEY_DIR/user_certs/" @@ -37,6 +38,6 @@ export USER_PW_FILE="/etc/openvpn/user_passwd.csv" export VPN_BIN_ROOT="/etc/openvpn/bin" export VPN_USER_CSV="/etc/openvpn/user_passwd.csv" export VPN_FILE_ATTACHMENTS="-a$VPN_BIN_ROOT/OpenVPN_for_PLANX_Installation_Guide.pdf" - +export KEY_DIR="$EASYRSA_PATH/pki" export PATH=$PATH:$EASYRSA_PATH:$OPENVPN_MY_BIN source /etc/openvpn/bin/.venv/bin/activate diff --git a/files/openvpn_management_scripts/templates/vars.template b/files/openvpn_management_scripts/templates/vars.template index 0afa0c554..311f05605 100644 --- a/files/openvpn_management_scripts/templates/vars.template +++ b/files/openvpn_management_scripts/templates/vars.template @@ -1,81 +1,25 @@ -# easy-rsa parameter settings -export EXTHOST="#EXTHOST#" +# EasyRSA 3 vars file -# NOTE: If you installed from an RPM, -# don't edit this file in place in -# /usr/share/openvpn/easy-rsa -- -# instead, you should copy the whole -# easy-rsa directory to another location -# (such as /etc/openvpn) so that your -# edits will not be wiped out by a future -# OpenVPN package upgrade. +# This is a user-customized vars file for EasyRSA 3. +# Adjust these values to suit your needs. -# This variable should point to -# the top level of the easy-rsa -# tree. -export EASY_RSA="#EASY_RSA_DIR#" +# Key Size - Increase to 2048 if you are paranoid. This affects performance. +set_var EASYRSA_KEY_SIZE #KEY_SIZE# -# -# This variable should point to -# the requested executables -# -export OPENSSL="openssl" -export PKCS11TOOL="pkcs11-tool" -export GREP="grep" +# CA and Certificate Expiry - Set these to your desired expiry in days +set_var EASYRSA_CA_EXPIRE 3650 +set_var EASYRSA_CERT_EXPIRE #KEY_EXPIRE# +# Fields for the request Distinguished Name (DN) +# Adjust these to match your organization's information +set_var EASYRSA_REQ_COUNTRY "#COUNTRY#" +set_var EASYRSA_REQ_PROVINCE "#STATE#" +set_var EASYRSA_REQ_CITY "#CITY#" +set_var EASYRSA_REQ_ORG "#ORG#" +set_var EASYRSA_REQ_EMAIL "#EMAIL#" +set_var EASYRSA_REQ_OU "#OU#" -# This variable should point to -# the openssl.cnf file included -# with easy-rsa. -export KEY_CONFIG=`$EASY_RSA/whichopensslcnf $EASY_RSA` -# Edit this variable to point to -# your soon-to-be-created key -# directory. -# -# WARNING: clean-all will do -# a rm -rf on this directory -# so make sure you define -# it correctly! -export KEY_DIR="$EASY_RSA/keys" +set_var EASYRSA_BATCH "1" -# Issue rm -rf warning -echo NOTE: If you run ./clean-all, I will be doing a rm -rf on $KEY_DIR - -# PKCS11 fixes -export PKCS11_MODULE_PATH="dummy" -export PKCS11_PIN="dummy" - -# Increase this to 2048 if you -# are paranoid. This will slow -# down TLS negotiation performance -# as well as the one-time DH parms -# generation process. -export KEY_SIZE=#KEY_SIZE# - -# In how many days should the root CA key expire? -export CA_EXPIRE=3650 - -# In how many days should certificates expire? -export KEY_EXPIRE=#KEY_EXPIRE# - -# These are the default values for fields -# which will be placed in the certificate. -# Don't leave any of these fields blank. -export KEY_COUNTRY="#COUNTRY#" -export KEY_PROVINCE="#STATE#" -export KEY_CITY="#CITY#" -export KEY_ORG="#ORG#" -export KEY_EMAIL="#EMAIL#" -export KEY_OU="#OU#" - -# X509 Subject Field -export KEY_NAME="#KEY_NAME#" - -# PKCS11 Smart Card -# export PKCS11_MODULE_PATH="/usr/lib/changeme.so" -# export PKCS11_PIN=1234 - -# If you'd like to sign all keys with the same Common Name, uncomment the KEY_CN export below -# You will also need to make sure your OpenVPN server config has the duplicate-cn option set -# export KEY_CN="CommonName" +# Note: Do not leave any of the fields blank as it may cause the script to fail. diff --git a/flavors/vpn_nlb_central/vpnvm_new.sh b/flavors/vpn_nlb_central/vpnvm_new.sh new file mode 100644 index 000000000..00f8306fc --- /dev/null +++ b/flavors/vpn_nlb_central/vpnvm_new.sh @@ -0,0 +1,533 @@ +#!/bin/bash + +############################################################### +# variables +############################################################### + +MAGIC_URL="http://169.254.169.254/latest/meta-data/" +AVAILABILITY_ZONE=$(curl -s ${MAGIC_URL}placement/availability-zone) +PRIVATE_IPV4=$(curl -s ${MAGIC_URL}local-ipv4) +PUBLIC_IPV4=$(curl -s ${MAGIC_URL}public-ipv4) +REGION=$(echo ${AVAILABILITY_ZONE::-1}) +#DOCKER_DOWNLOAD_URL="https://download.docker.com/linux/ubuntu" +AWSLOGS_DOWNLOAD_URL="https://s3.amazonaws.com/amazoncloudwatch-agent/ubuntu/amd64/latest/amazon-cloudwatch-agent.deb" +#TERRAFORM_DOWNLOAD_URL="https://releases.hashicorp.com/terraform/0.11.15/terraform_0.11.15_linux_amd64.zip" +DISTRO=$(awk -F '[="]*' '/^NAME/ { print $2 }' < /etc/os-release) +if [[ $DISTRO == "Ubuntu" ]]; then + WORK_USER="ubuntu" +else + WORK_USER="ec2-user" +fi +HOME_FOLDER="/home/${WORK_USER}" +SUB_FOLDER="${HOME_FOLDER}/cloud-automation" + +OPENVPN_PATH='/etc/openvpn' +BIN_PATH="${OPENVPN_PATH}/bin" +EASYRSA_PATH="${OPENVPN_PATH}/easy-rsa" +VARS_PATH="${EASYRSA_PATH}/vars" + +#EASY-RSA Vars +KEY_SIZE=4096 +COUNTRY="US" +STATE="IL" +CITY="Chicago" +ORG="CTDS" +EMAIL='support\@datacommons.io' +KEY_EXPIRE=365 + +#OpenVPN +PROTO=tcp + + +############################################################### +# get any variables we want coming from terraform variables +############################################################### +if [ $# -eq 0 ]; +then + echo "No arguments supplied, something is wrong" + exit 1 +else + #OIFS=$IFS + echo $1 + IFS=';' read -ra ADDR <<< "$1" + echo ${ADDR[@]} + for i in "${ADDR[@]}"; do + echo $i + if [[ $i = *"cwl_group"* ]]; + then + CWL_GROUP="${CWL_GROUP:-$(echo ${i} | cut -d= -f2)}" + elif [[ ${i} = *"vpn_nlb_name"* ]]; + then + VPN_NLB_NAME="$(echo ${i} | cut -d= -f2)" + elif [[ ${i} = *"cloud_name"* ]]; + then + CLOUD_NAME="$(echo ${i} | cut -d= -f2)" + elif [[ ${i} = *"csoc_vpn_subnet"* ]]; + then + CSOC_VPN_SUBNET="$(echo ${i} | cut -d= -f2)" + elif [[ ${i} = *"csoc_vm_subnet"* ]]; + then + CSOC_VM_SUBNET="$(echo ${i} | cut -d= -f2)" + elif [[ $i = *"account_id"* ]]; + then + ACCOUNT_ID="$(echo ${i} | cut -d= -f2)" + elif [[ $i = *"alternate_cwlg"* ]]; + then + CWL_GROUP="$(echo ${i} | cut -d= -f2)" + fi + done + echo $1 +fi + +S3_BUCKET="vpn-certs-and-files-${VPN_NLB_NAME}" + +function logs_helper(){ + echo -e "****************** ${1} ******************" +} + +function install_basics() { + + logs_helper "Installing Basics" + if [[ $DISTRO == "Ubuntu" ]]; then + apt -y install python3-pip build-essential sipcalc wget curl jq apt-transport-https ca-certificates software-properties-common fail2ban libyaml-dev + apt -y install postfix mailutils python-virtualenv uuid-runtime lighttpd net-tools + apt -y install openvpn bridge-utils libssl-dev openssl zlib1g-dev easy-rsa haveged zip mutt sipcalc python-dev python3-venv + # For openVPN + debconf-set-selections <<< "postfix postfix/mailname string planx-pla.net" + debconf-set-selections <<< "postfix postfix/main_mailer_type string 'Internet Site'" + else + amazon-linux-extras install epel + yum -y -q install epel-release iptables-services + yum -y -q install python3-pip python3-devel gcc sipcalc wget curl jq ca-certificates software-properties-common fail2ban libyaml-dev + yum -y -q install postfix mailutils python-virtualenv uuid-runtime lighttpd net-tools + yum -y -q install openvpn bridge-utils openssl zlib1g-dev easy-rsa haveged zip mutt sipcalc python-dev python3-venv + fi + pip3 install awscli + useradd --shell /bin/nologin --system openvpn + + logs_helper "Basics installed" +} + + +function configure_basics() { + + logs_helper "Configuring Basics" + + local dest_path="/root/openvpn_management_scripts" + local src_path="${SUB_FOLDER}/files/openvpn_management_scripts" + cp -r ${src_path} /root + + # Different buckets for different CSOC vpn environments + sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/push_to_s3.sh + sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/recover_from_s3.sh + sed -i "s/WHICHVPN/${S3_BUCKET}\/${VPN_NLB_NAME}/" ${dest_path}/send_email.sh + + # Replace the User variable for hostname, VPN subnet and VM subnet + #sed -i "s/SERVERNAME/${VPN_NLB_NAME}/" ${dest_path}/csoc_vpn_user_variable + #sed -i "s/CLOUDNAME/${CLOUD_NAME}/" ${dest_path}/csoc_vpn_user_variable + + #VPN_SUBNET=${CSOC_VPN_SUBNET} + #VPN_SUBNET_BASE=$( sipcalc $VPN_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"') + #VPN_SUBNET_MASK_BITS=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' ) + #sed -i "s/VPN_SUBNET/$VPN_SUBNET_BASE\/$VPN_SUBNET_MASK_BITS/" ${dest_path}/csoc_vpn_user_variable + + #VM_SUBNET=${CSOC_VM_SUBNET} + #VM_SUBNET_BASE=$( sipcalc $VM_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"') + #VM_SUBNET_MASK_BITS=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' ) + #sed -i "s/VM_SUBNET/$VM_SUBNET_BASE\/$VM_SUBNET_MASK_BITS/" ${dest_path}/csoc_vpn_user_variable + + echo "aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ && ${dest_path}/recover_from_s3.sh" + aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ && ${dest_path}/recover_from_s3.sh + + logs_helper "Copying modified scripts to /etc/openvpn" + cp -vr /root/openvpn_management_scripts /etc/openvpn/ + + logs_helper "Basics configured" + +} + + +function configure_awscli() { + + logs_helper "Configuring AWS" + mkdir -p ${HOME_FOLDER}/.aws + cat < ${HOME_FOLDER}/.aws/config +[default] +output = json +region = us-east-1 + +[profile csoc] +output = json +region = us-east-1 +EOT + + mkdir -p /root/.aws + cat > /root/.aws/config <> ${config_json} < /root/server.pem + fi + + export FQDN=${CLOUD_NAME} + export cloud=${VPN_NLB_NAME} + export SERVER_PEM="/root/server.pem" + export VM_SUBNET=${CSOC_VM_SUBNET} + export VM_SUBNET_BASE=$( sipcalc $VM_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"') + export VM_SUBNET_MASK=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask\s+-\s+(\S+)| && print "$1"' ) + export VM_SUBNET_MASK_BITS=$( sipcalc $VM_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' ) + export VPN_SUBNET=${CSOC_VPN_SUBNET} + export VPN_SUBNET_BASE=$( sipcalc $VPN_SUBNET | perl -ne 'm|Host address\s+-\s+(\S+)| && print "$1"') + export VPN_SUBNET_MASK=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask\s+-\s+(\S+)| && print "$1"' ) + export VPN_SUBNET_MASK_BITS=$( sipcalc $VPN_SUBNET | perl -ne 'm|Network mask \(bits\)\s+-\s+(\S+)| && print "$1"' ) + export server_pem="/root/server.pem" + echo "*******" + echo "${FQDN} -- ${cloud} -- ${SERVER_PEM} -- ${VPN_SUBNET} -- ${VPN_SUBNET_BASE} -- ${VPN_SUBNET_MASK_BITS} --/ ${VM_SUBNET} -- ${VM_SUBNET_BASE} -- ${VM_SUBNET_MASK_BITS}" + echo "*******" + #export FQDN="$SERVERNAME.planx-pla.net"; export cloud="$CLOUDNAME"; export SERVER_PEM="/root/server.pem"; + + #cp /etc/openvpn/bin/templates/lighttpd.conf.template /etc/lighttpd/lighttpd.conf + #mkdir -p --mode=750 /var/www/qrcode + #chown openvpn:www-data /var/www/qrcode + #mkdir -p /etc/lighttpd/certs + #cp /root/server.pem /etc/lighttpd/certs/server.pem + #service lighttpd restart + + #systemctl restart openvpn + + logs_helper "openVPN init complete" + +} + +function install_easyrsa() { + + logs_helper "Installing easyRSA" + if [[ -f $EASYRSA_PATH/easyrsa ]]; + then + logs_helper "easyRSA already installed" + return + fi + easyRsaVer="3.1.7" + wget https://github.com/OpenVPN/easy-rsa/releases/download/v3.1.7/EasyRSA-${easyRsaVer}.tgz + # extract to a folder called easyrsa + tar xvf EasyRSA-${easyRsaVer}.tgz + mv EasyRSA-${easyRsaVer}/ $EASYRSA_PATH + rm EasyRSA-${easyRsaVer}.tgz + cp "$OPENVPN_PATH/bin/templates/vars.template" $VARS_PATH + +# local easy_rsa_dir="$EASYRSA_PATH" +# local exthost="$FQDN" +# local ou="$cloud" +# local key_name="$ou-OpenVPN" + + perl -p -i -e "s|#EASY_RSA_DIR#|${EASYRSA_PATH}|" $VARS_PATH + perl -p -i -e "s|#EXTHOST#|${FQDN}|" $VARS_PATH + perl -p -i -e "s|#KEY_SIZE#|${KEY_SIZE}|" $VARS_PATH + perl -p -i -e "s|#COUNTRY#|${COUNTRY}|" $VARS_PATH + perl -p -i -e "s|#STATE#|${STATE}|" $VARS_PATH + perl -p -i -e "s|#CITY#|${CITY}|" $VARS_PATH + perl -p -i -e "s|#ORG#|${ORG}|" $VARS_PATH + perl -p -i -e "s|#EMAIL#|${EMAIL}|" $VARS_PATH + perl -p -i -e "s|#OU#|${cloud}|" $VARS_PATH + perl -p -i -e "s|#KEY_NAME#|${cloud}-OpenVPN|" $VARS_PATH + perl -p -i -e "s|#KEY_EXPIRE#|${KEY_EXPIRE}|" $VARS_PATH + + sed -i 's/^subjectAltName/#subjectAltName/' $EASYRSA_PATH/openssl-*.cnf + logs_helper "easyRSA complete" +} + +function install_custom_scripts() { + + logs_helper "installing custom scripts" + cd $OPENVPN_PATH + + #pull our openvpn scripts + #cp -r /root/openvpn_management_scripts /etc/openvpn/ + ln -sfn openvpn_management_scripts bin + cd $BIN_PATH + python3 -m venv .venv + #virtualenv .venv + #This is needed or else you get : .venv/bin/activate: line 57: PS1: unbound variable + set +u + # ( source .venv/bin/activate; pip install pyotp pyqrcode bcrypt ) + ( source .venv/bin/activate; pip3 install pyotp qrcode bcrypt ) + set -u + + logs_helper "custom scripts done" +} + +install_settings() { + + logs_helper "installing settings" + SETTINGS_PATH="$BIN_PATH/settings.sh" + cp "$OPENVPN_PATH/bin/templates/settings.sh.template" "$SETTINGS_PATH" + perl -p -i -e "s|#FQDN#|$FQDN|" $SETTINGS_PATH + perl -p -i -e "s|#EMAIL#|$EMAIL|" $SETTINGS_PATH + perl -p -i -e "s|#CLOUD_NAME#|${cloud}|" $SETTINGS_PATH + + logs_helper "settings installed" +} + +build_PKI() { + + logs_helper "building pki" + cd $EASYRSA_PATH + # ln -s openssl-1.0.0.cnf openssl.cnf + echo "This is long" + # ./easyrsa clean-all nopass + ./easyrsa init-pki + ./easyrsa build-ca nopass + ./easyrsa gen-dh + ./easyrsa gen-crl + ./easyrsa build-server-full $CLOUD_NAME nopass + # ./easyrsa gen-req $VPN_NLB_NAME.planx-pla.net nopass + openvpn --genkey --secret ta.key + mv ta.key $EASYRSA_PATH/pki/ta.key + + #This will error but thats fine, the crl.pem was created (without it openvpn server crashes) + set +e + ./revoke-full client &>/dev/null || true + set -e + logs_helper "pki done" + +} + +configure_ovpn() { + + logs_helper "configuring openvpn" + if [[ $DISTRO == "Ubuntu" ]]; then + OVPNCONF_PATH="/etc/openvpn/openvpn.conf" + else + OVPNCONF_PATH="/etc/openvpn/server/server.conf" + fi + cp "$OPENVPN_PATH/bin/templates/openvpn.conf.template" "$OVPNCONF_PATH" + + perl -p -i -e "s|#FQDN#|$FQDN|" $OVPNCONF_PATH + + perl -p -i -e "s|#VPN_SUBNET_BASE#|$VPN_SUBNET_BASE|" $OVPNCONF_PATH + perl -p -i -e "s|#VPN_SUBNET_MASK#|$VPN_SUBNET_MASK|" $OVPNCONF_PATH + + perl -p -i -e "s|#VM_SUBNET_BASE#|$VM_SUBNET_BASE|" $OVPNCONF_PATH + perl -p -i -e "s|#VM_SUBNET_MASK#|$VM_SUBNET_MASK|" $OVPNCONF_PATH + + perl -p -i -e "s|#PROTO#|$PROTO|" $OVPNCONF_PATH + + if [[ $DISTRO == "Ubuntu" ]]; then + systemctl restart openvpn + else + systemctl enable openvpn-server@server + systemctl start openvpn-server@server + fi + + logs_helper "openvpn configured" +} + +tweak_network() { + + logs_helper "tweaking network" + local nettweaks_path="$OPENVPN_PATH/bin/network_tweaks.sh" + cp "$OPENVPN_PATH/bin/templates/network_tweaks.sh.template" "${nettweaks_path}" + perl -p -i -e "s|#VPN_SUBNET#|$VPN_SUBNET|" ${nettweaks_path} + perl -p -i -e "s|#VM_SUBNET#|$VM_SUBNET|" ${nettweaks_path} + perl -p -i -e "s|#PROTO#|$PROTO|" ${nettweaks_path} + + chmod +x ${nettweaks_path} + ${nettweaks_path} + + # Disable firewall in amazonlinux + systemctl stop firewalld + systemctl disable firewalld + + #cp /etc/rc.local /etc/rc.local.bak + #sed -i 's/^exit/#exit/' /etc/rc.local + #echo /etc/openvpn/bin/network_tweaks.sh >> /etc/rc.local + #echo exit 0 >> /etc/rc.local + + + logs_helper "network tweaked" + +} + +install_webserver() { + + + logs_helper "installing webserver" + #Webserver used for QRCodes + if [[ $DISTRO == "Ubuntu" ]]; then + apt -y install lighttpd + else + yum -y install lighttpd + fi + cp "$OPENVPN_PATH/bin/templates/lighttpd.conf.template" /etc/lighttpd/lighttpd.conf + + mkdir -p --mode=750 /var/www/qrcode + chown openvpn:www-data /var/www/qrcode + + if [ -f $SERVER_PEM ] + then + mkdir --mode=700 /etc/lighttpd/certs + cp $SERVER_PEM /etc/lighttpd/certs/server.pem + service lighttpd restart + fi + + logs_helper "webserver installed" +} + + +install_cron() { + cp "$OPENVPN_PATH/bin/templates/cron.template" /etc/cron.d/openvpn +} + +misc() { + + logs_helper "installing misc" + cd $OPENVPN_PATH + mkdir -p easy-rsa/pki/ovpn_files + ln -sfn easy-rsa/pki/ovpn_files + + #If openvpn fails to start its cause perms. Init needs root rw to start, but service needs openvpn rw to work + mkdir --mode 775 -p clients.d/ + mkdir --mode 775 -p clients.d/tmp/ + chown root:openvpn clients.d/tmp/ + + mkdir -p easy-rsa/pki/ovpn_files_seperated/ + mkdir -p easy-rsa/pki/ovpn_files_systemd/ + mkdir -p easy-rsa/pki/ovpn_files_resolvconf/ + + touch user_passwd.csv + + mkdir -p environments + mkdir -p client-restrictions + + chown -R openvpn:openvpn easy-rsa/ user_passwd.csv clients.d/tmp/ + #ahhem. + chown :root /etc/openvpn/clients.d/tmp + chmod g+rwx /etc/openvpn/clients.d/tmp + # systemctl restart openvpn + + logs_helper "misc done" +} + +function main() { + install_basics + configure_awscli + configure_basics + + if [[ $DISTRO == "Ubuntu" ]]; then + install_awslogs + fi + install_openvpn + + set -e + set -u + install_custom_scripts + # if [! -d "/etc/openvpn/easy-rsa"]; then + aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ || install_easyrsa + + install_settings + + # if [! -d "/etc/openvpn/easy-rsa"]; then + aws s3 ls s3://${S3_BUCKET}/${VPN_NLB_NAME}/ || build_PKI + #fi + misc + configure_ovpn + tweak_network + + install_cron + + + mkdir -p --mode=750 /var/www/qrcode + + logs_helper "openvpn setup complete" + +} + +main From c191d74087aca3b004b7448da51f98ce996c2669 Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 20 May 2024 14:20:33 -0400 Subject: [PATCH 354/362] Adding a check to prevent dropping the Argo database when we reset (#2437) --- gen3/bin/reset.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/gen3/bin/reset.sh b/gen3/bin/reset.sh index 6dac0ea16..045da4319 100644 --- a/gen3/bin/reset.sh +++ b/gen3/bin/reset.sh @@ -137,8 +137,12 @@ sleep 30 # for serviceName in $(gen3 db services); do if [[ "$serviceName" != "peregrine" ]]; then # sheepdog and peregrine share the same db - # --force will also drop connections to the database to ensure database gets dropped - gen3 db reset "$serviceName" --force + if [[ "$serviceName" != "argo"]]; then + # --force will also drop connections to the database to ensure database gets dropped + gen3 db reset "$serviceName" --force + else + echo "Skipping the Argo DB reset, as that will delete archived workflows." + fi fi done From a4d7b2fd9556a229015c14dfbf442dcbee83b0b2 Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 20 May 2024 13:21:18 -0500 Subject: [PATCH 355/362] Add CUR access IAM policy to hatchery (#2401) --- gen3/bin/kube-setup-hatchery.sh | 41 +++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/gen3/bin/kube-setup-hatchery.sh b/gen3/bin/kube-setup-hatchery.sh index bdcff8ed0..dadbbd930 100644 --- a/gen3/bin/kube-setup-hatchery.sh +++ b/gen3/bin/kube-setup-hatchery.sh @@ -5,6 +5,44 @@ source "${GEN3_HOME}/gen3/lib/utils.sh" gen3_load "gen3/gen3setup" +function CostUsagePolicy() { + roleName="$(gen3 api safe-name hatchery-sa)" + # Cost Usage Report policy + curPolicy="costUsageReportPolicy" + + # Use the AWS CLI to list all policies attached to the role and then grep to search for the policy name + policyArn=$(aws iam list-role-policies --role-name "$roleName" | grep "$curPolicy") + + # Check if the policy ARN variable is empty or not + if [ -n "$policyArn" ]; then + echo "Policy $curPolicy is attached to the role $roleName." + else + echo "Policy $curPolicy is NOT attached to the role $roleName." + echo "Attaching policy" + # Define the policy document + policyDocument='{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "VisualEditor0", + "Effect": "Allow", + "Action": "ce:GetCostAndUsage", + "Resource": "*" + } + ] + }' + + # Create an inline policy for the role + aws iam put-role-policy --role-name "$roleName" --policy-name "$curPolicy" --policy-document "$policyDocument" + if [ $? -eq 0 ]; then + echo "Inline policy $curPolicy has been successfully created and attached to the role $roleName." + else + echo "There was an error creating the inline policy $curPolicy." + fi + + fi +} + # Jenkins friendly export WORKSPACE="${WORKSPACE:-$HOME}" @@ -209,6 +247,9 @@ if ! g3kubectl get sa "$saName" -o json | jq -e '.metadata.annotations | ."eks.a gen3 awsrole attach-policy "arn:aws:iam::aws:policy/AWSResourceAccessManagerFullAccess" --role-name ${roleName} --force-aws-cli || exit 1 fi +# function to setup IAM policies for CostUsageReport +CostUsagePolicy + if [[ -f "$(gen3_secrets_folder)/prisma/apikey.json" ]]; then ACCESSKEYID=$(jq -r .AccessKeyID "$(gen3_secrets_folder)/prisma/apikey.json") SECRETKEY=$(jq -r .SecretKey "$(gen3_secrets_folder)/prisma/apikey.json") From 8c3b1ce2659a8b93d8881f013baca1f21033cedb Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 20 May 2024 13:25:25 -0500 Subject: [PATCH 356/362] Nextflow AMI pipeline (#2384) --- tf_files/aws/nextflow_ami_pipeline/data.tf | 24 +++ tf_files/aws/nextflow_ami_pipeline/iam.tf | 36 ++++ .../aws/nextflow_ami_pipeline/imagebuilder.tf | 161 ++++++++++++++++++ .../aws/nextflow_ami_pipeline/manifest.json | 6 + tf_files/aws/nextflow_ami_pipeline/root.tf | 7 + .../aws/nextflow_ami_pipeline/sample.tfvars | 1 + .../aws/nextflow_ami_pipeline/variables.tf | 28 +++ 7 files changed, 263 insertions(+) create mode 100644 tf_files/aws/nextflow_ami_pipeline/data.tf create mode 100644 tf_files/aws/nextflow_ami_pipeline/iam.tf create mode 100644 tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf create mode 100644 tf_files/aws/nextflow_ami_pipeline/manifest.json create mode 100644 tf_files/aws/nextflow_ami_pipeline/root.tf create mode 100644 tf_files/aws/nextflow_ami_pipeline/sample.tfvars create mode 100644 tf_files/aws/nextflow_ami_pipeline/variables.tf diff --git a/tf_files/aws/nextflow_ami_pipeline/data.tf b/tf_files/aws/nextflow_ami_pipeline/data.tf new file mode 100644 index 000000000..a8b950b2a --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/data.tf @@ -0,0 +1,24 @@ +data "aws_vpc" "selected" { + filter { + name = "tag:Name" + values = [var.vpc_name] + } +} + +data "aws_security_group" "default" { + vpc_id = data.aws_vpc.selected.id + + filter { + name = "group-name" + values = ["default"] + } +} + +data "aws_subnet" "private" { + vpc_id = data.aws_vpc.selected.id + + filter { + name = "tag:Name" + values = [var.subnet_name] + } +} diff --git a/tf_files/aws/nextflow_ami_pipeline/iam.tf b/tf_files/aws/nextflow_ami_pipeline/iam.tf new file mode 100644 index 000000000..0b3594dd4 --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/iam.tf @@ -0,0 +1,36 @@ +## IAM Instance Profile for image builder + +resource "aws_iam_role" "image_builder" { + name = "EC2InstanceProfileForImageBuilder-nextflow" + assume_role_policy = data.aws_iam_policy_document.assume_role.json +} + +data "aws_iam_policy_document" "assume_role" { + statement { + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +resource "aws_iam_role_policy_attachment" "amazon_ssm" { + role = aws_iam_role.image_builder.name + policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" +} + +resource "aws_iam_role_policy_attachment" "image_builder" { + role = aws_iam_role.image_builder.name + policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilder" +} + +resource "aws_iam_role_policy_attachment" "image_builder_ecr" { + role = aws_iam_role.image_builder.name + policy_arn = "arn:aws:iam::aws:policy/EC2InstanceProfileForImageBuilderECRContainerBuilds" +} + +resource "aws_iam_instance_profile" "image_builder" { + name = "image-builder-profile" + role = aws_iam_role.image_builder.name +} diff --git a/tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf b/tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf new file mode 100644 index 000000000..0c3415003 --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/imagebuilder.tf @@ -0,0 +1,161 @@ +## Image builder component to install AWS cli using conda + +resource "aws_imagebuilder_component" "install_software" { + name = "InstallSoftware" + platform = "Linux" + version = "1.0.1" + + data = yamlencode({ + name = "InstallSoftware" + description = "Installs bzip2, wget, Miniconda3 and awscli" + schemaVersion = 1.0 + + phases = [{ + name = "build" + steps = [{ + name = "InstallPackages" + action = "ExecuteBash" + inputs = { + commands = [ + "sudo yum install -y bzip2 wget" + ] + } + }, + { + name = "InstallMiniconda" + action = "ExecuteBash" + inputs = { + commands = [ + "sudo su ec2-user", + "mkdir -p /home/ec2-user", + "export HOME=/home/ec2-user/", + "cd $HOME", + "# Download and install miniconda in ec2-user's home dir", + "wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda-install.sh", + "bash miniconda-install.sh -b -f -p /home/ec2-user/miniconda", + "rm miniconda-install.sh" + ] + } + }, + { + name = "InstallAWSCLI" + action = "ExecuteBash" + inputs = { + commands = [ + "export HOME=/home/ec2-user/", + "/home/ec2-user/miniconda/bin/conda install -c conda-forge -y awscli" + ] + } + }] + }, + { + name = "validate" + steps = [{ + name = "CheckInstalls" + action = "ExecuteBash" + inputs = { + commands = [ + "which bzip2", + "which wget", + "which conda", + "/home/ec2-user/miniconda/bin/conda list | grep awscli" + ] + } + }] + }, + { + name = "test" + steps = [{ + name = "TestAWSCLI" + action = "ExecuteBash" + inputs = { + commands = [ + "/home/ec2-user/miniconda/bin/aws --version" + ] + } + }] + }] + }) +} + + +## Image builder infrastructure config +resource "aws_imagebuilder_infrastructure_configuration" "image_builder" { + name = "nextflow-infra-config" + instance_profile_name = aws_iam_instance_profile.image_builder.name + security_group_ids = [data.aws_security_group.default.id] + subnet_id = data.aws_subnet.private.id + terminate_instance_on_failure = true +} + + +## Make sure the ami produced is public + +resource "aws_imagebuilder_distribution_configuration" "public_ami" { + name = "public-ami-distribution" + + distribution { + ami_distribution_configuration { + name = "gen3-nextflow-{{ imagebuilder:buildDate }}" + + ami_tags = { + Role = "Public Image" + } + + launch_permission { + user_groups = ["all"] + } + } + + region = "us-east-1" + } +} + + +## Image recipe +resource "aws_imagebuilder_image_recipe" "recipe" { + name = "nextflow-fips-recipe" + + parent_image = var.base_image + + version = "1.0.0" + + block_device_mapping { + device_name = "/dev/xvda" + ebs { + delete_on_termination = true + volume_size = 30 + volume_type = "gp2" + encrypted = false + } + } + + user_data_base64 = try(base64encode(var.user_data), null) + + component { + component_arn = "arn:aws:imagebuilder:us-east-1:aws:component/docker-ce-linux/1.0.0/1" + } + + component { + component_arn = aws_imagebuilder_component.install_software.arn + } + + + +} + + +# Image builder pipeline + +resource "aws_imagebuilder_image_pipeline" "nextflow" { + image_recipe_arn = aws_imagebuilder_image_recipe.recipe.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.image_builder.arn + name = "nextflow-fips" + + distribution_configuration_arn = aws_imagebuilder_distribution_configuration.public_ami.arn + + image_scanning_configuration { + image_scanning_enabled = true + } + +} diff --git a/tf_files/aws/nextflow_ami_pipeline/manifest.json b/tf_files/aws/nextflow_ami_pipeline/manifest.json new file mode 100644 index 000000000..62394dc4a --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/manifest.json @@ -0,0 +1,6 @@ +{ + "terraform": { + "module_version" : "0.12" + } + } + \ No newline at end of file diff --git a/tf_files/aws/nextflow_ami_pipeline/root.tf b/tf_files/aws/nextflow_ami_pipeline/root.tf new file mode 100644 index 000000000..8ccad5e14 --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/root.tf @@ -0,0 +1,7 @@ +# Inject credentials via the AWS_PROFILE environment variable and shared credentials file +# and/or EC2 metadata service +terraform { + backend "s3" { + encrypt = "true" + } +} \ No newline at end of file diff --git a/tf_files/aws/nextflow_ami_pipeline/sample.tfvars b/tf_files/aws/nextflow_ami_pipeline/sample.tfvars new file mode 100644 index 000000000..e6423d359 --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/sample.tfvars @@ -0,0 +1 @@ +vpc_name = "devplanetv2" \ No newline at end of file diff --git a/tf_files/aws/nextflow_ami_pipeline/variables.tf b/tf_files/aws/nextflow_ami_pipeline/variables.tf new file mode 100644 index 000000000..58af6430f --- /dev/null +++ b/tf_files/aws/nextflow_ami_pipeline/variables.tf @@ -0,0 +1,28 @@ +variable "vpc_name" { + type = string +} + + +variable "subnet_name" { + type = string + default = "eks_private_0" +} + +variable "base_image" { + type = string + default = "arn:aws:imagebuilder:us-east-1:aws:image/amazon-linux-2-ecs-optimized-kernel-5-x86/x.x.x" +} + +variable "user_data" { + type = string + default = <> /opt/fips-install.log +sudo dracut -f +# configure grub +sudo /sbin/grubby --update-kernel=ALL --args="fips=1" +EOT +} \ No newline at end of file From 0cb7eaf9c68cbedd301fd85f4179b0ed10eeb1d9 Mon Sep 17 00:00:00 2001 From: Alexander VanTol Date: Mon, 20 May 2024 13:30:47 -0500 Subject: [PATCH 357/362] fix(access-backend): better default user.yaml (#2305) --- gen3/bin/kube-setup-access-backend.sh | 56 +++++++++++++++++++-------- 1 file changed, 40 insertions(+), 16 deletions(-) diff --git a/gen3/bin/kube-setup-access-backend.sh b/gen3/bin/kube-setup-access-backend.sh index bbb3ae663..60d4758c5 100644 --- a/gen3/bin/kube-setup-access-backend.sh +++ b/gen3/bin/kube-setup-access-backend.sh @@ -210,8 +210,10 @@ authz: - /programs/tutorial - /programs/open_access role_ids: - - reader - - storage_reader + - guppy_reader + - fence_reader + - peregrine_reader + - sheepdog_reader - description: full access to indexd API id: indexd_admin resource_paths: @@ -226,18 +228,22 @@ authz: - /programs/open_access role_ids: - creator - - reader + - guppy_reader + - fence_reader + - peregrine_reader + - sheepdog_reader - updater - deleter - storage_writer - - storage_reader - description: '' id: all_programs_reader resource_paths: - /programs role_ids: - - reader - - storage_reader + - guppy_reader + - fence_reader + - peregrine_reader + - sheepdog_reader - id: 'all_programs_writer' description: '' role_ids: @@ -328,12 +334,37 @@ authz: service: '*' id: creator - description: '' - id: reader + id: guppy_reader permissions: - action: method: read - service: '*' - id: reader + service: 'guppy' + id: guppy_reader + - description: '' + id: fence_reader + permissions: + - action: + method: read + service: 'fence' + id: fence_reader + - action: + method: read-storage + service: 'fence' + id: fence_storage_reader + - description: '' + id: peregrine_reader + permissions: + - action: + method: read + service: 'peregrine' + id: peregrine_reader + - description: '' + id: sheepdog_reader + permissions: + - action: + method: read + service: 'sheepdog' + id: sheepdog_reader - description: '' id: updater permissions: @@ -355,13 +386,6 @@ authz: method: write-storage service: '*' id: storage_creator - - description: '' - id: storage_reader - permissions: - - action: - method: read-storage - service: '*' - id: storage_reader - id: mds_user permissions: - action: From db5f876c9ead979bdcaeb327a329f0b688a8b95c Mon Sep 17 00:00:00 2001 From: "J. Q" <55899496+jawadqur@users.noreply.github.com> Date: Mon, 20 May 2024 13:32:40 -0500 Subject: [PATCH 358/362] Update revproxy-deploy.yaml (#2210) * Update revproxy-deploy.yaml Reverting reduced pod size for revproxy, as it gets overwhelmed during data submission * Update revproxy-deploy.yaml --- kube/services/revproxy/revproxy-deploy.yaml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kube/services/revproxy/revproxy-deploy.yaml b/kube/services/revproxy/revproxy-deploy.yaml index 9f10ce90b..7ea798b77 100644 --- a/kube/services/revproxy/revproxy-deploy.yaml +++ b/kube/services/revproxy/revproxy-deploy.yaml @@ -196,11 +196,12 @@ spec: mountPath: "/usr/local/share/ca-certificates/cdis/cdis-ca.crt" subPath: "ca.pem" resources: - requests: - cpu: 100m - memory: 100Mi + requests: + cpu: 0.5 + memory: 1024Mi limits: - memory: 800Mi + cpu: 1.0 + memory: 2048Mi command: ["/bin/sh" ] args: - "-c" From 136de96dbb3c077ae3cc9f08fca796cef67a645d Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 20 May 2024 12:38:57 -0600 Subject: [PATCH 359/362] fix(tests): Fixed jenkins tests (#2178) Co-authored-by: Edward Malinowski From 05c5587af449130b8936aa80374804ea214a0d9d Mon Sep 17 00:00:00 2001 From: Aidan Hilt <11202897+AidanHilt@users.noreply.github.com> Date: Mon, 20 May 2024 14:46:51 -0400 Subject: [PATCH 360/362] Update web_whitelist (#2112) * Update web_whitelist Added bioconductor.org to the whitelist so Kyle Hernandez can install some R packages. * Update web_whitelist --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 6896314ab..58799d6bb 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -22,6 +22,7 @@ archive.cloudera.com archive.linux.duke.edu aws.github.io bay.uchicago.edu +bioconductor.org bionimbus.tabix.oicrsofteng.org bits.netbeans.org centos.chicago.waneq.com From 562efa3d31fc425e345d4a04e109f0707a679b2c Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 20 May 2024 12:53:18 -0600 Subject: [PATCH 361/362] chore(helm-whitelist): Updated squid whitelist to include new cdis repo (#2043) Co-authored-by: Edward Malinowski Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- files/squid_whitelist/web_whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/files/squid_whitelist/web_whitelist b/files/squid_whitelist/web_whitelist index 58799d6bb..4d4c8f393 100644 --- a/files/squid_whitelist/web_whitelist +++ b/files/squid_whitelist/web_whitelist @@ -157,6 +157,7 @@ sa-update.space-pro.be security.debian.org services.mathworks.com streaming.stat.iastate.edu +uc-cdis.github.io us-east4-docker.pkg.dev us-central1-docker.pkg.dev www.google.com From aae26ac3d22a2d6564b92c9f7554dcd5d6402cb4 Mon Sep 17 00:00:00 2001 From: emalinowski Date: Mon, 20 May 2024 12:57:47 -0600 Subject: [PATCH 362/362] fix(secondary-subnet): Added route table associates for all of the new subnets (#1999) Co-authored-by: Edward Malinowski Co-authored-by: jawadqur <55899496+jawadqur@users.noreply.github.com> --- tf_files/aws/modules/eks/cloud.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tf_files/aws/modules/eks/cloud.tf b/tf_files/aws/modules/eks/cloud.tf index f8b237eeb..693462b1c 100644 --- a/tf_files/aws/modules/eks/cloud.tf +++ b/tf_files/aws/modules/eks/cloud.tf @@ -254,7 +254,7 @@ resource "aws_route_table_association" "private_kube" { } resource "aws_route_table_association" "secondary_subnet_kube" { - count = "${var.secondary_cidr_block != "" ? 1 : 0}" + count = "${var.secondary_cidr_block != "" ? 4 : 0}" subnet_id = "${aws_subnet.eks_secondary_subnet.*.id[count.index]}" route_table_id = "${aws_route_table.eks_private.id}" depends_on = ["aws_subnet.eks_secondary_subnet"]