From f6fb0f5a63055e20c6d9da97fa08e5c8cb1cef21 Mon Sep 17 00:00:00 2001 From: Vlad Klokun Date: Mon, 30 Oct 2023 04:13:13 +0200 Subject: [PATCH 1/5] feat: support continuous scanning This commit adds support for Continuous Scanning. It exposes new values that control: - whether to enable Continuous Scanning - which resources to monitor. Signed-off-by: Vlad Klokun --- .../configs/matchingRules-configmap.yaml | 11 ++ .../templates/operator/deployment.yaml | 10 ++ .../__snapshot__/snapshot_test.yaml.snap | 158 ++++++++++-------- charts/kubescape-operator/values.yaml | 23 ++- 4 files changed, 129 insertions(+), 73 deletions(-) create mode 100644 charts/kubescape-operator/templates/configs/matchingRules-configmap.yaml diff --git a/charts/kubescape-operator/templates/configs/matchingRules-configmap.yaml b/charts/kubescape-operator/templates/configs/matchingRules-configmap.yaml new file mode 100644 index 00000000..d837314c --- /dev/null +++ b/charts/kubescape-operator/templates/configs/matchingRules-configmap.yaml @@ -0,0 +1,11 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: {{ .Values.continuousScanning.configMapName }} + namespace: {{ .Values.ksNamespace }} + labels: + app: {{ .Values.ksLabel }} + tier: {{ .Values.global.namespaceTier }} +data: + matchingRules.json: | + {{ mustToJson .Values.continuousScanning.matchingRules }} diff --git a/charts/kubescape-operator/templates/operator/deployment.yaml b/charts/kubescape-operator/templates/operator/deployment.yaml index 9cd81f5b..2b80bae3 100644 --- a/charts/kubescape-operator/templates/operator/deployment.yaml +++ b/charts/kubescape-operator/templates/operator/deployment.yaml @@ -120,6 +120,10 @@ spec: mountPath: /etc/config/capabilities.json readOnly: true subPath: "capabilities.json" + - name: {{ .Values.continuousScanning.configMapName }} + mountPath: /etc/config/matchingRules.json + readOnly: true + subPath: "matchingRules.json" - name: config mountPath: /etc/config/config.json readOnly: true @@ -168,6 +172,12 @@ spec: items: - key: "config.json" path: "config.json" + - name: {{ .Values.continuousScanning.configMapName }} + configMap: + name: {{ .Values.continuousScanning.configMapName }} + items: + - key: "matchingRules.json" + path: "matchingRules.json" {{- if .Values.volumes }} {{ toYaml .Values.volumes | indent 8 }} {{- end }} diff --git a/charts/kubescape-operator/tests/__snapshot__/snapshot_test.yaml.snap b/charts/kubescape-operator/tests/__snapshot__/snapshot_test.yaml.snap index 606a19be..12313608 100644 --- a/charts/kubescape-operator/tests/__snapshot__/snapshot_test.yaml.snap +++ b/charts/kubescape-operator/tests/__snapshot__/snapshot_test.yaml.snap @@ -173,6 +173,18 @@ matches the snapshot: name: ks-capabilities namespace: kubescape 9: | + apiVersion: v1 + data: + matchingRules.json: | + {"match":[{"apiGroups":["apps"],"apiVersions":["v1"],"resources":["deployments"]}],"namespaces":["kube-system","default"]} + kind: ConfigMap + metadata: + labels: + app: kubescape + tier: ks-control-plane + name: cs-matching-rules + namespace: kubescape + 10: | apiVersion: apps/v1 kind: Deployment metadata: @@ -295,7 +307,7 @@ matches the snapshot: path: services.json name: ks-cloud-config name: ks-cloud-config - 10: | + 11: | apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: @@ -328,7 +340,7 @@ matches the snapshot: policyTypes: - Ingress - Egress - 11: | + 12: | apiVersion: v1 kind: Service metadata: @@ -349,7 +361,7 @@ matches the snapshot: selector: app: gateway type: ClusterIP - 12: | + 13: | apiVersion: apps/v1 kind: Deployment metadata: @@ -400,7 +412,7 @@ matches the snapshot: securityContext: allowPrivilegeEscalation: false runAsNonRoot: true - 13: | + 14: | apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: @@ -427,7 +439,7 @@ matches the snapshot: tier: ks-control-plane policyTypes: - Ingress - 14: | + 15: | apiVersion: v1 kind: Service metadata: @@ -443,7 +455,7 @@ matches the snapshot: selector: app: grype-offline-db type: ClusterIP - 15: | + 16: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -482,7 +494,7 @@ matches the snapshot: - get - watch - list - 16: | + 17: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -495,7 +507,7 @@ matches the snapshot: - kind: ServiceAccount name: kollector namespace: kubescape - 17: | + 18: | apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: @@ -522,7 +534,7 @@ matches the snapshot: tier: ks-control-plane policyTypes: - Egress - 18: | + 19: | apiVersion: v1 automountServiceAccountToken: false kind: ServiceAccount @@ -531,7 +543,7 @@ matches the snapshot: app: kollector name: kollector namespace: kubescape - 19: | + 20: | apiVersion: apps/v1 kind: StatefulSet metadata: @@ -649,7 +661,7 @@ matches the snapshot: path: services.json name: ks-cloud-config name: ks-cloud-config - 20: | + 21: | apiVersion: v1 data: request-body.json: '{"commands":[{"CommandName":"kubescapeScan","args":{"scanV1": {}}}]}' @@ -660,7 +672,7 @@ matches the snapshot: tier: ks-control-plane name: kubescape-scheduler namespace: kubescape - 21: | + 22: | apiVersion: batch/v1 kind: CronJob metadata: @@ -716,7 +728,7 @@ matches the snapshot: name: kubescape-scheduler name: kubescape-scheduler schedule: 1 2 3 4 5 - 22: | + 23: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -892,7 +904,7 @@ matches the snapshot: - create - update - patch - 23: | + 24: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -905,7 +917,7 @@ matches the snapshot: - kind: ServiceAccount name: kubescape namespace: kubescape - 24: | + 25: | apiVersion: apps/v1 kind: Deployment metadata: @@ -1059,7 +1071,7 @@ matches the snapshot: name: results - emptyDir: {} name: failed - 25: | + 26: | apiVersion: v1 data: host-scanner-yaml: |- @@ -1154,7 +1166,7 @@ matches the snapshot: tier: ks-control-plane name: host-scanner-definition namespace: kubescape - 26: | + 27: | apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: @@ -1187,7 +1199,7 @@ matches the snapshot: policyTypes: - Ingress - Egress - 27: | + 28: | apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: @@ -1206,7 +1218,7 @@ matches the snapshot: - list - patch - delete - 28: | + 29: | apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -1220,7 +1232,7 @@ matches the snapshot: - kind: ServiceAccount name: kubescape namespace: kubescape - 29: | + 30: | apiVersion: v1 kind: Service metadata: @@ -1237,7 +1249,7 @@ matches the snapshot: selector: app: kubescape type: ClusterIP - 30: | + 31: | apiVersion: v1 automountServiceAccountToken: false kind: ServiceAccount @@ -1246,7 +1258,7 @@ matches the snapshot: app: kubescape name: kubescape namespace: kubescape - 31: | + 32: | apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: @@ -1267,7 +1279,7 @@ matches the snapshot: selector: matchLabels: app: kubescape - 32: | + 33: | apiVersion: v1 data: request-body.json: '{"commands":[{"commandName":"scan","designators":[{"designatorType":"Attributes","attributes":{}}]}]}' @@ -1278,7 +1290,7 @@ matches the snapshot: tier: ks-control-plane name: kubevuln-scheduler namespace: kubescape - 33: | + 34: | apiVersion: batch/v1 kind: CronJob metadata: @@ -1334,7 +1346,7 @@ matches the snapshot: name: kubevuln-scheduler name: kubevuln-scheduler schedule: 1 2 3 4 5 - 34: | + 35: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -1363,7 +1375,7 @@ matches the snapshot: - get - watch - list - 35: | + 36: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -1376,7 +1388,7 @@ matches the snapshot: - kind: ServiceAccount name: kubevuln namespace: kubescape - 36: | + 37: | apiVersion: apps/v1 kind: Deployment metadata: @@ -1502,7 +1514,7 @@ matches the snapshot: name: ks-cloud-config - emptyDir: {} name: grype-db - 37: | + 38: | apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: @@ -1534,7 +1546,7 @@ matches the snapshot: policyTypes: - Ingress - Egress - 38: | + 39: | apiVersion: v1 kind: Service metadata: @@ -1550,7 +1562,7 @@ matches the snapshot: selector: app: kubevuln type: ClusterIP - 39: | + 40: | apiVersion: v1 automountServiceAccountToken: false kind: ServiceAccount @@ -1559,7 +1571,7 @@ matches the snapshot: app: kubevuln name: kubevuln namespace: kubescape - 40: | + 41: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -1625,7 +1637,7 @@ matches the snapshot: - watch - list - patch - 41: | + 42: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -1638,7 +1650,7 @@ matches the snapshot: - kind: ServiceAccount name: node-agent namespace: kubescape - 42: | + 43: | apiVersion: v1 data: config.json: | @@ -1653,7 +1665,7 @@ matches the snapshot: metadata: name: node-agent namespace: kubescape - 43: | + 44: | apiVersion: apps/v1 kind: DaemonSet metadata: @@ -1819,13 +1831,13 @@ matches the snapshot: - name: proxy-secret secret: secretName: kubescape-proxy-certificate - 44: | + 45: | apiVersion: v1 kind: ServiceAccount metadata: name: node-agent namespace: kubescape - 45: | + 46: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -1879,7 +1891,7 @@ matches the snapshot: - watch - list - delete - 46: | + 47: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -1892,7 +1904,7 @@ matches the snapshot: - kind: ServiceAccount name: operator namespace: kubescape - 47: | + 48: | apiVersion: v1 data: config.json: | @@ -1904,7 +1916,7 @@ matches the snapshot: metadata: name: operator namespace: kubescape - 48: | + 49: | apiVersion: apps/v1 kind: Deployment metadata: @@ -1954,7 +1966,7 @@ matches the snapshot: value: zap - name: OTEL_COLLECTOR_SVC value: otel-collector:4317 - image: quay.io/kubescape/operator:v0.1.58 + image: vklokun/ks-operator-test:continuous-scanning imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -2005,6 +2017,10 @@ matches the snapshot: name: ks-capabilities readOnly: true subPath: capabilities.json + - mountPath: /etc/config/matchingRules.json + name: cs-matching-rules + readOnly: true + subPath: matchingRules.json - mountPath: /etc/config/config.json name: config readOnly: true @@ -2045,7 +2061,13 @@ matches the snapshot: path: config.json name: operator name: config - 49: | + - configMap: + items: + - key: matchingRules.json + path: matchingRules.json + name: cs-matching-rules + name: cs-matching-rules + 50: | apiVersion: v1 data: cronjobTemplate: "apiVersion: batch/v1\nkind: CronJob\nmetadata:\n name: kubescape-scheduler\n namespace: kubescape\n labels:\n app: kubescape-scheduler\n tier: ks-control-plane\n armo.tier: \"kubescape-scan\"\nspec:\n schedule: \"1 2 3 4 5\"\n jobTemplate:\n spec:\n template:\n metadata:\n labels:\n armo.tier: \"kubescape-scan\"\n spec:\n containers:\n - name: kubescape-scheduler\n image: \"quay.io/kubescape/http-request:v0.0.14\"\n imagePullPolicy: IfNotPresent\n securityContext:\n allowPrivilegeEscalation: false\n readOnlyRootFilesystem: true\n runAsNonRoot: true\n runAsUser: 100\n resources:\n limits:\n cpu: 10m\n memory: 20Mi\n requests:\n cpu: 1m\n memory: 10Mi\n args: \n - -method=post\n - -scheme=http\n - -host=operator:4002\n - -path=v1/triggerAction\n - -headers=\"Content-Type:application/json\"\n - -path-body=/home/ks/request-body.json\n volumeMounts:\n - name: \"request-body-volume\"\n mountPath: /home/ks/request-body.json\n subPath: request-body.json\n readOnly: true\n restartPolicy: Never\n automountServiceAccountToken: false\n volumes:\n - name: \"request-body-volume\" # placeholder\n configMap:\n name: kubescape-scheduler" @@ -2056,7 +2078,7 @@ matches the snapshot: tier: ks-control-plane name: kubescape-cronjob-template namespace: kubescape - 50: | + 51: | apiVersion: v1 data: cronjobTemplate: "apiVersion: batch/v1\nkind: CronJob\nmetadata:\n name: kubevuln-scheduler\n namespace: kubescape\n labels:\n app: kubevuln-scheduler\n tier: ks-control-plane\n armo.tier: \"vuln-scan\"\nspec:\n schedule: \"1 2 3 4 5\" \n jobTemplate:\n spec:\n template:\n metadata:\n labels:\n armo.tier: \"vuln-scan\"\n spec:\n containers:\n - name: kubevuln-scheduler\n image: \"quay.io/kubescape/http-request:v0.0.14\"\n imagePullPolicy: IfNotPresent\n securityContext:\n allowPrivilegeEscalation: false\n readOnlyRootFilesystem: true\n runAsNonRoot: true\n runAsUser: 100\n resources:\n limits:\n cpu: 10m\n memory: 20Mi\n requests:\n cpu: 1m\n memory: 10Mi\n args: \n - -method=post\n - -scheme=http\n - -host=operator:4002\n - -path=v1/triggerAction\n - -headers=\"Content-Type:application/json\"\n - -path-body=/home/ks/request-body.json\n volumeMounts:\n - name: \"request-body-volume\"\n mountPath: /home/ks/request-body.json\n subPath: request-body.json\n readOnly: true\n restartPolicy: Never\n automountServiceAccountToken: false\n volumes:\n - name: \"request-body-volume\" # placeholder\n configMap:\n name: kubevuln-scheduler" @@ -2067,7 +2089,7 @@ matches the snapshot: tier: ks-control-plane name: kubevuln-cronjob-template namespace: kubescape - 51: | + 52: | apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: @@ -2106,7 +2128,7 @@ matches the snapshot: policyTypes: - Ingress - Egress - 52: | + 53: | apiVersion: v1 data: cronjobTemplate: "apiVersion: batch/v1\nkind: CronJob\nmetadata:\n name: registry-scheduler\n namespace: kubescape\n labels:\n app: registry-scheduler\n tier: ks-control-plane\n armo.tier: \"registry-scan\"\nspec:\n schedule: \"0 0 * * *\"\n jobTemplate:\n spec:\n template:\n metadata:\n labels:\n armo.tier: \"registry-scan\"\n spec:\n containers:\n - name: registry-scheduler\n image: \"quay.io/kubescape/http-request:v0.0.14\"\n imagePullPolicy: IfNotPresent\n securityContext:\n allowPrivilegeEscalation: false\n readOnlyRootFilesystem: true\n runAsNonRoot: true\n runAsUser: 100\n resources:\n limits:\n cpu: 10m\n memory: 20Mi\n requests:\n cpu: 1m\n memory: 10Mi\n args: \n - -method=post\n - -scheme=http\n - -host=operator:4002\n - -path=v1/triggerAction\n - -headers=\"Content-Type:application/json\"\n - -path-body=/home/ks/request-body.json\n volumeMounts:\n - name: \"request-body-volume\"\n mountPath: /home/ks/request-body.json\n subPath: request-body.json\n readOnly: true\n restartPolicy: Never\n automountServiceAccountToken: false\n volumes:\n - name: \"request-body-volume\" # placeholder\n configMap:\n name: registry-scheduler" @@ -2117,7 +2139,7 @@ matches the snapshot: tier: ks-control-plane name: registry-scan-cronjob-template namespace: kubescape - 53: | + 54: | apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: @@ -2149,7 +2171,7 @@ matches the snapshot: - list - patch - delete - 54: | + 55: | apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -2163,7 +2185,7 @@ matches the snapshot: - kind: ServiceAccount name: operator namespace: kubescape - 55: | + 56: | apiVersion: v1 kind: Service metadata: @@ -2179,7 +2201,7 @@ matches the snapshot: selector: app: operator type: ClusterIP - 56: | + 57: | apiVersion: v1 automountServiceAccountToken: false kind: ServiceAccount @@ -2188,7 +2210,7 @@ matches the snapshot: app: operator name: operator namespace: kubescape - 57: | + 58: | apiVersion: v1 data: otel-collector-config.yaml: "\n# receivers configure how data gets into the Collector.\nreceivers:\n otlp:\n protocols:\n grpc:\n http:\n hostmetrics:\n collection_interval: 30s\n scrapers:\n cpu:\n memory:\n\n# processors specify what happens with the received data.\nprocessors:\n attributes/ksCloud:\n actions:\n - key: account_id\n value: \"9e6c0c2c-6bd0-4919-815b-55030de7c9a0\"\n action: upsert\n - key: cluster_name\n value: \"kind-kind\"\n action: upsert\n batch:\n send_batch_size: 10000\n timeout: 10s\n\n# exporters configure how to send processed data to one or more backends.\nexporters:\n otlp/ksCloud:\n endpoint: ${env:CLOUD_OTEL_COLLECTOR_URL}\n tls:\n insecure: false\n otlp:\n endpoint: \"otelCollector:4317\"\n tls:\n insecure: true\n headers:\n uptrace-dsn: \n\n# service pulls the configured receivers, processors, and exporters together into\n# processing pipelines. Unused receivers/processors/exporters are ignored.\nservice:\n pipelines:\n traces:\n receivers: [otlp]\n processors: [batch]\n exporters:\n - otlp/ksCloud\n - otlp\n metrics/2:\n receivers: [hostmetrics]\n processors: [attributes/ksCloud, batch]\n exporters:\n - otlp/ksCloud\n - otlp\n metrics:\n receivers: [otlp]\n processors: [batch]\n exporters:\n - otlp/ksCloud\n - otlp\n logs:\n receivers: [otlp]\n processors: [batch]\n exporters:\n - otlp/ksCloud\n - otlp" @@ -2199,7 +2221,7 @@ matches the snapshot: tier: ks-control-plane name: otel-collector-config namespace: kubescape - 58: | + 59: | apiVersion: apps/v1 kind: Deployment metadata: @@ -2280,7 +2302,7 @@ matches the snapshot: - configMap: name: otel-collector-config name: otel-collector-config-volume - 59: | + 60: | apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: @@ -2313,7 +2335,7 @@ matches the snapshot: policyTypes: - Ingress - Egress - 60: | + 61: | apiVersion: v1 kind: Service metadata: @@ -2330,7 +2352,7 @@ matches the snapshot: selector: app: otel-collector type: ClusterIP - 61: | + 62: | apiVersion: v1 data: proxy.crt: foo @@ -2339,7 +2361,7 @@ matches the snapshot: name: kubescape-proxy-certificate namespace: kubescape type: Opaque - 62: | + 63: | apiVersion: batch/v1 kind: Job metadata: @@ -2414,7 +2436,7 @@ matches the snapshot: volumes: - emptyDir: {} name: shared-data - 63: | + 64: | apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: @@ -2435,7 +2457,7 @@ matches the snapshot: - patch - get - list - 64: | + 65: | apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -2452,7 +2474,7 @@ matches the snapshot: - kind: ServiceAccount name: service-discovery namespace: kubescape - 65: | + 66: | apiVersion: v1 kind: ServiceAccount metadata: @@ -2462,7 +2484,7 @@ matches the snapshot: helm.sh/hook-weight: "0" name: service-discovery namespace: kubescape - 66: | + 67: | apiVersion: apiregistration.k8s.io/v1 kind: APIService metadata: @@ -2476,7 +2498,7 @@ matches the snapshot: namespace: kubescape version: v1beta1 versionPriority: 15 - 67: | + 68: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -2508,7 +2530,7 @@ matches the snapshot: - get - watch - list - 68: | + 69: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -2521,7 +2543,7 @@ matches the snapshot: - kind: ServiceAccount name: storage namespace: kubescape - 69: | + 70: | apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -2534,7 +2556,7 @@ matches the snapshot: - kind: ServiceAccount name: storage namespace: kubescape - 70: | + 71: | apiVersion: apps/v1 kind: Deployment metadata: @@ -2573,8 +2595,8 @@ matches the snapshot: name: cloud-secret - name: OTEL_COLLECTOR_SVC value: otel-collector:4317 - image: quay.io/kubescape/storage:v0.0.32 - imagePullPolicy: IfNotPresent + image: vklokun/storage:v0.0.31-alreadyexists-revert + imagePullPolicy: Never name: apiserver resources: limits: @@ -2621,7 +2643,7 @@ matches the snapshot: path: services.json name: ks-cloud-config name: ks-cloud-config - 71: | + 72: | apiVersion: v1 kind: PersistentVolumeClaim metadata: @@ -2637,7 +2659,7 @@ matches the snapshot: resources: requests: storage: 5Gi - 72: | + 73: | apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: @@ -2651,7 +2673,7 @@ matches the snapshot: - kind: ServiceAccount name: storage namespace: kubescape - 73: | + 74: | apiVersion: v1 kind: Service metadata: @@ -2666,7 +2688,7 @@ matches the snapshot: app.kubernetes.io/component: apiserver app.kubernetes.io/name: storage app.kubernetes.io/part-of: kubescape-storage - 74: | + 75: | apiVersion: v1 kind: ServiceAccount metadata: diff --git a/charts/kubescape-operator/values.yaml b/charts/kubescape-operator/values.yaml index d612d1a5..9451497f 100644 --- a/charts/kubescape-operator/values.yaml +++ b/charts/kubescape-operator/values.yaml @@ -8,6 +8,7 @@ ksLabel: kubescape capabilities: relevancy: enable configurationScan: enable + # Continuous Scanning continuously evaluates the security posture of your cluster. continuousScan: disable vulnerabilityScan: enable nodeScan: enable @@ -93,6 +94,17 @@ global: enabled: false createEgressRules: false +continuousScanning: + configMapName: cs-matching-rules + matchingRules: + match: + - apiGroups: ["apps"] + apiVersions: ["v1"] + resources: ["deployments"] + namespaces: + - kube-system + - default + # kubescape scheduled scan using a CronJob kubescapeScheduler: @@ -192,6 +204,7 @@ kubescape: # Operator will trigger kubescape and kubevuln scanning operator: + replicaCount: 1 # operator Deployment name @@ -199,8 +212,8 @@ operator: image: # -- source code: https://github.com/kubescape/operator - repository: quay.io/kubescape/operator - tag: v0.1.58 + repository: vklokun/ks-operator-test + tag: continuous-scanning pullPolicy: IfNotPresent service: @@ -512,9 +525,9 @@ storage: replicaCount: 1 image: - repository: quay.io/kubescape/storage - tag: v0.0.32 - pullPolicy: IfNotPresent + repository: vklokun/storage + tag: v0.0.31-alreadyexists-revert + pullPolicy: Never grypeOfflineDB: enabled: false From ba95141c8760698f1658b9191e81d0a6e27d46a7 Mon Sep 17 00:00:00 2001 From: Vlad Klokun Date: Tue, 17 Oct 2023 20:15:09 +0300 Subject: [PATCH 2/5] docs: document the Continuous Scanning feature Signed-off-by: Vlad Klokun --- charts/kubescape-operator/README.md | 105 ++++++++++++++++++++++++++ charts/kubescape-operator/values.yaml | 8 +- 2 files changed, 111 insertions(+), 2 deletions(-) diff --git a/charts/kubescape-operator/README.md b/charts/kubescape-operator/README.md index 666472b4..615a347a 100644 --- a/charts/kubescape-operator/README.md +++ b/charts/kubescape-operator/README.md @@ -732,3 +732,108 @@ kubectl -n kubescape delete clusterrolebinding/helm-release-upgrader \ ``` Once it finishes, you should have no traces of the Release Upgrader in your cluster. + +# Continuous Scanning + +To let cluster operators see the current security posture of their cluster, Kubescape provides the Continuous Scanning feature. Once enabled, Kubescape will constantly monitor the cluster for changes, evaluate their impact on the overall cluster security and reflect its findings in the cluster-, namespace- and workload-scoped security reports. Essentially, this means you'll always get the latest scoop on your cluster’s security! + +## Installation + +Continuous Scanning is built into the Kubescape Operator Helm chart. To use this capability, you only need to enable it. Start by navigating to the `values.yaml` file and make sure that the corresponding `capabilities.continuousScan` key is set to `enabled`, like so: + +```yaml +capabilities: + continuousScan: enable # Make sure this is set to "enable" +``` + +Once you apply the chart with the capability enabled, Kubescape will continuously secure your cluster and provide the scan results as Custom Resources. + +## Accessing Results + +### For the Whole Cluster + +Kubescape provides scan results as Custom Resources so you can access them in the same convenient way you access other Kubernetes objects. Let’s assume you’d like to see a birds-eye view of you cluster’s security. In Kubescape terms, that would mean taking a look at the cluster-level configuration scan summary: + +``` +kubectl get workloadconfigurationscansummaries -o yaml +``` + +Running this command will return you a YAML-formatted list of configuration scan summaries for your cluster by namespaces. + +On clusters with many namespaces, the results might be overwhelming and might even exceed your terminal history. Since Kubescape serves results as Kubernetes objects, which are YAML files at its core, you can do your usual tricks: pipe them to files, text editors etc. A trick we commonly use is: + +``` +kubectl get workloadconfigurationscansummaries -o yaml | less +``` + +This way you get the entire results and browse the file as you see fit. + +### By Namespace + +Let’s say you have a namespace `k8s-bad-practices`. It runs badly misconfigured insecure workloads and you would like to see how Kubescape sees them. To get the configuration scan results for this namespace, run the following command: + +``` +kubectl get -n k8s-bad-practices workloadconfigurationscansummaries -o yaml | less +``` + +You should see a summary for the insecure namespace only. + +### By Workload + +You could also be interested in checking how secure a specific workload is. To see the results, use: + +``` +kubectl get -n k8s-bad-practices workloadconfigurationscansummaries trusty-reverse-proxy -o yaml | less +``` + +That should provide you with a configuration scan summary for this workload. + +## How It Works + +With Continuous Scanning enabled, Kubescape continuously monitors a cluster for changes, determines how they affect the security of your cluster and reflect their security impact in the overall cluster’s security report, as well as individual workload security reports. Let’s take a look at a sequence diagram. + + +```mermaid +sequenceDiagram + actor u as User + participant k8a as K8sAPI + participant kso as Operator + participant kss as Storage + participant ks as Kubescape + + u ->> k8a: Install Kubescape Helm Chart + par Install Operator + k8a ->> kso: Start + and Install Storage + k8a ->> kss: Start + and Install Kubescape + k8a ->> ks: Start + end + + kso ->> kso: Fetch list of Interesting Kinds + Note right of kso: Interesting things include resource Kinds and Namespaces + kso ->> k8a: Request watches for Interesting Kinds + k8a ->>+ kso: Return watches for Interesting Kinds + + alt User modifies a resource + u ->> k8a: modify Resource A + k8a ->> kso: Resource A created + kso ->> ks: Trigger scan for Resource A + ks ->> ks: Scan Resource A + ks ->> kss: Store results as Custom Resource + else User Deletes a resource + u ->> k8a: Delete Resource A + k8a ->>+ kso: Resource A deleted + alt Resource A is a Role + Note right of kso: Roles can be used later, so do nothing + else Resource A is a simple resource / RBAC Subject + kso ->> kso: Extract Custom Resource name from Resource A + kso ->>+ kss: Remove Custom Resource A + kss ->>- kso: Resource removed + else Resource A is a RoleBinding + kso ->> kso: Read the RoleBinding + kso ->> kso: Calculate Custom Resource names for subjects S_1, ..., S_n + kso ->> kss: Delete calculated Custom Resources for S_1, ..., S_n + end + end +``` diff --git a/charts/kubescape-operator/values.yaml b/charts/kubescape-operator/values.yaml index 9451497f..e6e767cb 100644 --- a/charts/kubescape-operator/values.yaml +++ b/charts/kubescape-operator/values.yaml @@ -14,7 +14,7 @@ capabilities: nodeScan: enable runtimeObservability: disable # This is an experimental capability with an elevated security risk. Read the - # matching README section before enabling. + # matching docs before enabling. autoUpgrading: disable # networkGenerator: disable # seccompGenerator: disable @@ -94,15 +94,19 @@ global: enabled: false createEgressRules: false +# Continuous scanning configurations continuousScanning: configMapName: cs-matching-rules + + # Matching rules for the monitored resources. + # Kubescape will watch resources of every provided GVR across the provided + # namespaces. matchingRules: match: - apiGroups: ["apps"] apiVersions: ["v1"] resources: ["deployments"] namespaces: - - kube-system - default # kubescape scheduled scan using a CronJob From 8d97ee65a47e140b68bbe3f06d0b4d99c3a40cc0 Mon Sep 17 00:00:00 2001 From: Vlad Klokun Date: Wed, 1 Nov 2023 10:34:29 +0200 Subject: [PATCH 3/5] chore: explain how to use continuous scanning in NOTES.txt Closes #290. Signed-off-by: Vlad Klokun --- charts/kubescape-operator/templates/NOTES.txt | 25 ++++++++++++++++--- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/charts/kubescape-operator/templates/NOTES.txt b/charts/kubescape-operator/templates/NOTES.txt index 1c59fca8..c38895e3 100644 --- a/charts/kubescape-operator/templates/NOTES.txt +++ b/charts/kubescape-operator/templates/NOTES.txt @@ -1,11 +1,28 @@ Thank you for installing {{ .Chart.Name }} version {{ .Chart.Version }}. -You can see and change the values of your's recurring configurations daily scan in the following link: -https://cloud.armosec.io/settings/assets/clusters/scheduled-scans?cluster={{ regexReplaceAll "\\W+" .Values.clusterName "-" }} +{{ $components := fromYaml (include "components" .) -}} +{{ if $components.kubescapeScheduler.enabled -}} +You can view your cluster's configuration scanning schedule using the following command: > kubectl -n {{ .Values.ksNamespace }} get cj {{ .Values.kubescapeScheduler.name }} -o=jsonpath='{.metadata.name}{"\t"}{.spec.schedule}{"\n"}' -You can see and change the values of your's recurring images daily scan in the following link: -https://cloud.armosec.io/settings/assets/images +To change the schedule, set the `.spec.schedule` key to the value you need with: +> kubectl -n {{ .Values.ksNamespace }} edit cj {{ .Values.kubescapeScheduler.name }} +{{- end }} + +{{ if $components.kubevulnScheduler.enabled -}} +The image scanning schedule is available with the following command: > kubectl -n {{ .Values.ksNamespace }} get cj {{ .Values.kubevulnScheduler.name }} -o=jsonpath='{.metadata.name}{"\t"}{.spec.schedule}{"\n"}' +To change the schedule, edit the `.spec.schedule` key with: +> kubectl -n {{ .Values.ksNamespace }} edit cj {{ .Values.kubevulnScheduler.name }} +{{- end }} + +{{ if eq .Values.capabilities.continuousScan "enable" -}} +Once Kubescape finishes initial scanning, you can view a list of Kubescape configuration scan results using the following command: +> kubectl get workloadconfigurationscansummaries -A + +When you see an interesting workload, take a closer look at its security posture with: +> kubectl get workloadconfigurationscansummary -n {EXAMPLE_RESOURCE_NAMESPACE} {EXAMPLE_RESOURCE_NAME} -o yaml +{{- end }} + See you!!! From 1b78d152e1590edf3be4e0dc447900d42e7173f2 Mon Sep 17 00:00:00 2001 From: Vlad Klokun Date: Wed, 1 Nov 2023 10:56:19 +0200 Subject: [PATCH 4/5] chore: update component images to mainline Signed-off-by: Vlad Klokun --- charts/kubescape-operator/values.yaml | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/charts/kubescape-operator/values.yaml b/charts/kubescape-operator/values.yaml index e6e767cb..45d013ef 100644 --- a/charts/kubescape-operator/values.yaml +++ b/charts/kubescape-operator/values.yaml @@ -208,7 +208,6 @@ kubescape: # Operator will trigger kubescape and kubevuln scanning operator: - replicaCount: 1 # operator Deployment name @@ -216,8 +215,8 @@ operator: image: # -- source code: https://github.com/kubescape/operator - repository: vklokun/ks-operator-test - tag: continuous-scanning + repository: quay.io/kubescape/operator + tag: v0.1.60 pullPolicy: IfNotPresent service: @@ -529,9 +528,9 @@ storage: replicaCount: 1 image: - repository: vklokun/storage - tag: v0.0.31-alreadyexists-revert - pullPolicy: Never + repository: quay.io/kubescape/storage + tag: v0.0.32 + pullPolicy: IfNotPresent grypeOfflineDB: enabled: false From 6e79b744a5f211b7f5dbc9b58c51d170d95b5967 Mon Sep 17 00:00:00 2001 From: Vlad Klokun Date: Wed, 1 Nov 2023 14:47:47 +0200 Subject: [PATCH 5/5] tests: update expected Helm snapshot Signed-off-by: Vlad Klokun --- .../__snapshot__/snapshot_test.yaml.snap | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/charts/kubescape-operator/tests/__snapshot__/snapshot_test.yaml.snap b/charts/kubescape-operator/tests/__snapshot__/snapshot_test.yaml.snap index 12313608..b3f4ffa2 100644 --- a/charts/kubescape-operator/tests/__snapshot__/snapshot_test.yaml.snap +++ b/charts/kubescape-operator/tests/__snapshot__/snapshot_test.yaml.snap @@ -3,14 +3,20 @@ matches the snapshot: raw: | Thank you for installing kubescape-operator version 1.16.1. - You can see and change the values of your's recurring configurations daily scan in the following link: - https://cloud.armosec.io/settings/assets/clusters/scheduled-scans?cluster=kind-kind + You can view your cluster's configuration scanning schedule using the following command: > kubectl -n kubescape get cj kubescape-scheduler -o=jsonpath='{.metadata.name}{"\t"}{.spec.schedule}{"\n"}' - You can see and change the values of your's recurring images daily scan in the following link: - https://cloud.armosec.io/settings/assets/images + To change the schedule, set the `.spec.schedule` key to the value you need with: + > kubectl -n kubescape edit cj kubescape-scheduler + + The image scanning schedule is available with the following command: > kubectl -n kubescape get cj kubevuln-scheduler -o=jsonpath='{.metadata.name}{"\t"}{.spec.schedule}{"\n"}' + To change the schedule, edit the `.spec.schedule` key with: + > kubectl -n kubescape edit cj kubevuln-scheduler + + + See you!!! 2: | apiVersion: batch/v1 @@ -176,7 +182,7 @@ matches the snapshot: apiVersion: v1 data: matchingRules.json: | - {"match":[{"apiGroups":["apps"],"apiVersions":["v1"],"resources":["deployments"]}],"namespaces":["kube-system","default"]} + {"match":[{"apiGroups":["apps"],"apiVersions":["v1"],"resources":["deployments"]}],"namespaces":["default"]} kind: ConfigMap metadata: labels: @@ -1966,7 +1972,7 @@ matches the snapshot: value: zap - name: OTEL_COLLECTOR_SVC value: otel-collector:4317 - image: vklokun/ks-operator-test:continuous-scanning + image: quay.io/kubescape/operator:v0.1.60 imagePullPolicy: IfNotPresent livenessProbe: httpGet: @@ -2595,8 +2601,8 @@ matches the snapshot: name: cloud-secret - name: OTEL_COLLECTOR_SVC value: otel-collector:4317 - image: vklokun/storage:v0.0.31-alreadyexists-revert - imagePullPolicy: Never + image: quay.io/kubescape/storage:v0.0.32 + imagePullPolicy: IfNotPresent name: apiserver resources: limits: