forked from DTStack/jlogstash
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathexample.yml
170 lines (161 loc) · 6.54 KB
/
example.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
inputs:
# - PlainFile:
# path:
# - /tmp/file/^file.*\.txt$
# heartbeatHost: 172.16.1.52:8854
# userToken: kRhisbdoTQMCZU5KqQqGkQ7sDA7BM9kpldnQ5Nf2al8ER9yp
# offsetDbSyncIntervalMs: 60000
# heartbeatIntevalMs: 60000
- Kafka:
codec: json
encoding: UTF8 # defaut UTF8
topic:
{"dt_all_log": 1}
consumerSettings:
group.id: dt_all_log_group_hao
zookeeper.connect: 172.16.1.181:2181,172.16.1.186:2181,172.16.1.226:2181/kafka2
auto.commit.interval.ms: "1000"
# - KafkaReset:
# codec: json
# encoding: UTF8 # defaut UTF8
# minTime: "2017-08-11 20:33:33"
# maxTime: "2017-08-11 20:34:33"
# topic:
# {"dt_all_log": 3}
# consumerSettings:
# group.id: dt_all_log_group_hao
# zookeeper.connect: 172.16.1.181:2181,172.16.1.186:2181,172.16.1.226:2181/kafka2
# auto.commit.interval.ms: "1000"
# - Netty:
# port: 8635
# whiteListPath:
# codec: json
# isExtract: false
filters:
- Performance:
interval: 1
path: /tmp/filter-performance-%{+YYYY.MM.dd}.txt
timeZone: Asia/Shanghai
monitorPath: {"/tmp/filter-performance-%{+YYYY.MM.dd}.txt":"8"}
- dtstack.jdtloguserauth.DtLogUserAuth:
apiServer: 172.16.1.52:8668
useSsl: false
redisHost: 172.16.1.52
redisPort: 6379
isRedis: true
redisDB: 0
redisPassword: taukwnlwrd9
- dtstack.jdtlogparser.DtLogParser:
apiServer: 172.16.1.52:82
useSsl: false
redisHost: 172.16.1.52
redisPort: 6379
isRedis: true
redisDB: 0
redisPassword: taukwnlwrd9
timeWasteConfig: {"/tmp/timewaste-%{+yyyy.MM.dd}.log":"7"} #记录解析日志耗时的配置,格式和Performance的一致,key为独立保存文件的路径,val为保留的最近日志数
timeWasteLogMaxFreq: 100 #设置解析日志耗时的写io的频率的上限,避免把磁盘打爆
parsedTimeThreshold: 2
parseFailedConfig : {"/tmp/parsefailed-%{+yyyy.MM.dd}.log":"7"} #记录解析失败的配置,格式和Performance的一致,key为独立保存文件的路径,val为保留的最近日志数
parseFailedLogMaxFreq: 100 #设置解析失败时的写io的频率的上限,避免把磁盘打爆
outputs:
- Performance:
interval: 1
path: /tmp/beat-performance-%{+YYYY.MM.dd}.txt
timeZone: Asia/Shanghai
monitorPath: {"/tmp/beat-performance-%{+YYYY.MM.dd}.txt":"8"}
- Odps:
configs:
redis.address: redis://:[email protected]:6379/1
redis.max.idle: 100
redis.max.total: 1024
redis.max.wait.mills: 3000
redis.timeout: 2000
redis.map.info.key: od-ps-cfg
redis.queue.info.key: od-ps-cfg-msg
http.map.info.api: http://172.16.1.52:81/api/v1/odps/provide_task_list/
task.thread.pool.size: 5000
#task.thread.cycle.commit.time: 30000
task.thread.commit.interval: 30000
task.tunnel.timezone: Asia/Shanghai
task.tunnel.retry.limit: 720 #commit日志失败的重试次数
task.tunnel.retry.interval: 5 #commit日志的间隔时间,秒级别
task.partitions.lru.size: 30000
task.report.status.address: 172.16.1.52:81
task.report.status.interval: 300000
scala.kafka.producer.brokerlist: 172.16.1.145:9092
scala.kafka.zookeeper: 172.16.1.181:2181,172.16.1.186:2181,172.16.1.226:2181/kafka
task.retry,kafka.groupid: odps_retry_event_group_test
task.retry.kafka.topic: odps_retry_event_topic_test
# - Elasticsearch5:
# hosts: ["172.16.1.145:9300"]
# indexTimezone: "UTC"
# cluster: poc_dtstack
# concurrentRequests: 1
# index: 'dtlog-%{tenant_id}-%{appname}-%{keeptype}-%{+YYYY.MM.dd}'
# errorEventLogConfig: {"/tmp/error-event-%{+YYYY.MM.dd}.txt":"3"}
# ERROR_PROTECT_KEYS: "@timestamp,appname,keeptype,logtype,tag,message,timestamp,local_ip,tenant_id,hostname,path,agent_type,offset,uuid,bajie_test"
# documentType: logs # default logs
# consistency: true # defalut false
# inputs:
# # - Beats:
# # codec: json
# # port: 8635
# - Kafka:
# codec: json
# encoding: UTF8 # defaut UTF8
# topic:
# dt_all_log: 5
# consumerSettings:
# group.id: dt_all_log_group
# zookeeper.connect: 172.16.1.181:2181,172.16.1.186:2181,172.16.1.226:2181/kafka
# auto.commit.interval.ms: "1000"
# filters:
# # - Performance:
# # path: /home/admin/jlogserver/logs/beat-filters-performance-%{+YYYY.MM.dd}.txt
# # timeZone: Asia/Shanghai
# # monitorPath: {"/home/admin/jlogserver/logs/beat-filters-performance-%{+YYYY.MM.dd}.txt":"8"}
# - dtstack.jdtloguserauth.DtLogUserAuth:
# apiServer: 172.16.1.52
# useSsl: false
# redisHost: 172.16.1.52
# redisPort: 6379
# isRedis: true
# redisDB: 1
# redisPassword: taukwnlwrd9
# - Add:
# fields: {"agent_type":"@metadata.beat","hostname":"beat.hostname","host":"beat.name"}
# - Remove:
# fields: ["@metadata","count","offset","beat"]
# - Rename:
# fields: {"source":"path"}
# - dtstack.jdtlogparser.DtLogParser:
# apiServer: 172.16.1.52:81
# useSsl: false
# redisHost: 172.16.1.52
# redisPort: 6379
# isRedis: true
# redisDB: 0
# redisPassword: taukwnlwrd9
# - dtstack.jdtlogcreatemessage.DtLogCreateMessage:
# repeatFields: ["path"]
# - Flow:
# configs:
# flow.control.counttype: unset
# flow.control.threshold: 10KB
# flow.stat.counttype: unset
# flow.stat.report.commit.delay.second: 3
# flow.stat.report.interval: 1000
# flow.stat.report.addr.template: "http://172.16.10.123:8854/api/logagent/test?uuid=%s&time=%s&bandwidth=%s"
# outputs:
# # - Performance:
# # path: /home/admin/jlogserver/logs/beat-performance-%{+YYYY.MM.dd}.txt
# # timeZone: Asia/Shanghai
# # monitorPath: {"/tmp/output-performance-%{+YYYY.MM.dd}.txt":"8"}
# - Netty:
# host: 172.16.1.58
# port: 8635
# openCompression: true
# compressionLevel: 6
# openCollectIp: true
# # format: ${HOSTNAME} ${appname} [${user_token} type=${logtype} tag=${logtag}] ${path} jlogstash/$${timestamp}/$${message}