-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathpillar.example
150 lines (129 loc) · 5.46 KB
/
pillar.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
cluster:
# Cluster name
name: hacluster
# Node which will create the cluster.
# To create multiple clusters, set different
# pillar data with different nodes as
# firstnode. Each firstnode will create
# a separate cluster
init: minion-1
# optional: nodes that should be removed
# from the cluster.
# remove: []
# optional: Configure the watchdog device. module and device are mandatory
# watchdog:
# module: softdog
# device: /dev/watchdog
# optional: Network interface to use for cluster communication
# interface: eth0
# optional: UDP instead of multicast
# unicast: true
# optional: Time in seconds between hawk service is available in the first
# node and join command is executed (20s by default)
# wait_for_initialization: 20
# optional: Timeout in seconds to join to the cluster
# join_timeout: 60
# optional: Configure a virtual IP resource in cluster
# admin_ip: 10.20.30.40
# optional: Configure SBD
# Diskless and using a disk are self exclusive options
# sbd:
# # Configure SBD diskless
# device: False
# # Configure an SBD device
# device: /dev/by-label/sbd-disk
# # multiple disks can be set using a list
# device:
# - /dev/by-label/sbd-disk1
# - /dev/by-label/sbd-disk2
# # optional: Configure sbd cluster resource. This updates the resource stonith-sbd created if sbd is enabled
# # Warning: sbd configuration will be overwritten if the resource is defined in the configure template later
# configure_resource:
# #force: False # Force commit. Use with caution, this can allow invalid parameters in the configuration
# params:
# pcmk_delay_max: 15
# op:
# monitor:
# timeout: 15
# interval: 15
# meta:
# resource-stickiness: 3000
# optional: Configure OCFS2 device
# ocfs2:
# device: /dev/by-label/ocfs2-disk
# mount: /srv/cluster_ocfs2
# optional: Configure qdevice
# qdevice:
# qnetd_hostname: hostname
# optional: Install required rpm packages to install SAP Netweaver (true by default).
# Pre-configured packages sometimes exist for development purposes.
# If set to false, these packages must be installed manually before formula execution.
# install_packages: true
# optional: ntp server
# ntp: pool.ntp.org
# optional: enables monitoring via ha_cluster_exporter (disabled by default)
# the exporter will be installed and configured in all the nodes
monitoring_enabled: true
# optional: corosync configuration. If the entry is not set the values set by crmsh will be used
# To configure the values, just create a dictionary with the values that have to be changed
# If the values exists, it will be changed, otherwise it will be added.
# Warning. The code doesn't check if the values make sense, it will put them in the configuration file
# Here an examples
corosync:
totem:
token: 30000
interface:
bindnetaddr: 192.168.100.1
quorum:
expected_votes: 3
# optional: update hacluster password
# hacluster_password: mypassword
# optional: Manage ssh keys usage
# If your nodes don't have ssh keys, you should put the root password here, the system
# will make them for you. If there are ssh keys, you can leave this parameter.
# (In the previous versions the keys were overwritten sometimes. It happenes no more.)
# ssheys:
# # First node root password. This entry is used to configure the authorized_keys file from the joining nodes
# password: admin # not set by default
# optional: Resource agents packages to install
# resource_agents:
# - resouce_agent_pkg
# optional: Configure cluster resource agents and constraints
# configure:
# # optional, configure cluster properties and defaults. Find more information running man pacemaker-schedulerd
# # add key/value entries for each specific configuration set
# properties:
# stonith-timeout: 144s
# stonith-enabled: true
# rsc_defaults:
# resource-stickiness: 1000
# migration-threshold: 5000
# op_defaults:
# timeout: 600
# record-pending: true
# # optional, url or template file path to configure the cluster with a cluster configuration file
# # Warning: values in the configuration file for properties, rsc_defaults or op_defaults have
# # preference over the previous data in this pillar file, so they will be overwritten
# url: path_to_configfile
# method: update # update by default
# is_xml: False # False by default
# force: False # Force commit. Use with caution, this can allow invalid parameters in the configuration. False by default
# # optional, jinja2 template can be used to create the configuration file.
# template:
# source: path_to_template
# # optional
# destination: path_to_destination_file
# # optional: parameters to add in the template
# # the template must start with to use them: {% set data = pillar.cluster.configure.template.parameters %}
# parameters:
# param1: value1
# param2: value2
# # examples to use native fencing
# ## on azure
# azure_subscription_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
# azure_resource_group_name: my-resource-group
# azure_tenant_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
# azure_fence_agent_app_id: XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
# azure_fence_agent_client_secret: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# -*- mode: yaml -*-
# vim: ft=yaml