forked from DataDog/KubeHound
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathkubehound-reference.yaml
135 lines (110 loc) · 3.7 KB
/
kubehound-reference.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
#
# Reference KubeHound configuration
#
#
# K8s collector configuration
#
collector:
# Type of collector to use
type: live-k8s-api-collector
# Live collector configuration
live:
# Rate limit of requests/second to the Kubernetes API
# NOTE: most (>90%) of the current runtime of KubeHound is spent in the transfer of data from the remote K8s API server,
# and the bulk of that is spent waiting on rate limit. As such increasing this will improve performance roughly linearly.
rate_limit_per_second: 60
# # Number of entries retrieved by each call on the API (same for all Kubernetes entry types)
# page_size: 500
# # Number of pages to buffer
# page_buffer_size: 10
# Uncomment to use the file collector
# type: file-collector
# File collector configuration
# file:
# # Directory holding the K8s json data files
# directory: /path/to/directory
#
# # Target cluster name
# cluster: <cluster name>
#
# General storage configuration
#
storage:
# Whether or not to wipe all data on startup
wipe: true
# Number of connection retries before declaring an error
retry: 5
# Delay between connection retries
retry_delay: 10s
# Store database configuration
mongodb:
# Connection URL to the mongo DB instance
url: "mongodb://localhost:27017"
# Timeout on requests to the mongo DB instance
connection_timeout: 30s
# Graph database configuration
janusgraph:
# Connection URL to the JanusGraph DB instance
url: "ws://localhost:8182/gremlin"
# Timeout on requests to the JanusGraph DB instance
connection_timeout: 30s
#
# Datadog telemetry configuration
#
telemetry:
# Whether to enable Datadog telemetry (default false)
enabled: true
# Default tags to add to all telemetry (free form key-value map)
# tags:
# team: ase
# Statsd configuration for metics support
statsd:
# URL to send statsd data to the Datadog agent
url: "127.0.0.1:8225"
# Tracer configuration for APM support
tracer:
# URL to send tracer data to the Datadog agent
url: "127.0.0.1:8226"
#
# Graph builder configuration
#
# NOTE: increasing batch sizes can have some performance improvements by reducing network latency in transferring data
# between KubeGraph and the application. However, increasing it past a certain level can overload the backend leading
# to instability and eventually exceed the size limits of the websocket buffer used to transfer the data. Changing this
# is not recommended.
#
builder:
# Vertex builder configuration
# vertex:
# # Batch size for vertex inserts
# batch_size: 500
#
# # Small batch size for vertex inserts
# batch_size_small: 100
# Edge builder configuration
edge:
# Enable for large clusters to prevent number of edges growing exponentially
large_cluster_optimizations: true
# # Size of the worker pool handling parallel edge inserts
# # NOTE: this should only be changed if granting additional resources to the KubeGraph container
# worker_pool_size: 5
# # Capacity of the worker pool handling parallel edge inserts
# # NOTE: this should only be changed in conjunction with the worker_pool_size
# worker_pool_capacity: 100
# # Batch size for edge inserts
# batch_size: 500
# # Small batch size for edge inserts
# batch_size_small: 75
# # Cluster impact batch size for edge inserts
# batch_size_cluster_impact: 1
# Ingestor configuration (for KHaaS)
# ingestor:
# blob:
# bucket: "" # (i.e.: s3://your-bucket)
# region: "" # (i.e.: us-east-1)
# temp_dir: "/tmp/kubehound"
# archive_name: "archive.tar.gz"
# max_archive_size: 1073741824 # 1GB
# api: # GRPC endpoint for the ingestor
# endpoint: "127.0.0.1:9000"
# insecure: true