Skip to content

Commit

Permalink
Fix conflict
Browse files Browse the repository at this point in the history
  • Loading branch information
Peter Varkoly committed Dec 13, 2023
2 parents ec1a656 + a93308a commit 848b21a
Show file tree
Hide file tree
Showing 14 changed files with 100 additions and 161 deletions.
11 changes: 0 additions & 11 deletions aux/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -122,17 +122,6 @@ cluster: !ruby/object:SapHA::Configuration::Cluster
- :@keys
- :@append_hosts
- :@host_passwords
cluster_finalizer: !ruby/object:SapHA::Configuration::ClusterFinalizer
global_config: *5
screen_name: Cluster Configuration Finalizer
exception_type: &6 !ruby/class 'SapHA::Exceptions::BaseConfigException'
yaml_exclude:
- :@nlog
instance_variables:
- :@global_config
- :@screen_name
- :@exception_type
- :@yaml_exclude
fencing: !ruby/object:SapHA::Configuration::Fencing
global_config: *5
screen_name: Fencing Mechanism
Expand Down
11 changes: 0 additions & 11 deletions aux/config_prd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -121,17 +121,6 @@ cluster: !ruby/object:SapHA::Configuration::Cluster
- :@enable_csync2
- :@keys
- :@append_hosts
cluster_finalizer: !ruby/object:SapHA::Configuration::ClusterFinalizer
global_config: *5
screen_name: Cluster Configuration Finalizer
exception_type: &6 !ruby/class 'SapHA::Exceptions::BaseConfigException'
yaml_exclude:
- :@nlog
instance_variables:
- :@global_config
- :@screen_name
- :@exception_type
- :@yaml_exclude
fencing: !ruby/object:SapHA::Configuration::Fencing
global_config: *5
screen_name: Fencing Mechanism
Expand Down
11 changes: 0 additions & 11 deletions aux/config_prd_sps03.yml
Original file line number Diff line number Diff line change
Expand Up @@ -122,17 +122,6 @@ cluster: !ruby/object:SapHA::Configuration::Cluster
- :@enable_csync2
- :@keys
- :@append_hosts
cluster_finalizer: !ruby/object:SapHA::Configuration::ClusterFinalizer
global_config: *5
screen_name: Cluster Configuration Finalizer
exception_type: &6 !ruby/class 'SapHA::Exceptions::BaseConfigException'
yaml_exclude:
- :@nlog
instance_variables:
- :@global_config
- :@screen_name
- :@exception_type
- :@yaml_exclude
fencing: !ruby/object:SapHA::Configuration::Fencing
global_config: *5
screen_name: Fencing Mechanism
Expand Down
15 changes: 15 additions & 0 deletions aux/is_hana_running.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/bin/bash
#

while true
do
if [ "$( /usr/sap/hostctrl/exe/sapcontrol -nr 00 -function GetProcessList | grep hdbindexserver )" ]; then
echo -n "RUN " >> /var/log/hana-state
date >> /var/log/hana-state
else
echo -n "NOT " >> /var/log/hana-state
date >> /var/log/hana-state
fi
sleep 1
done

12 changes: 12 additions & 0 deletions package/yast2-sap-ha.changes
Original file line number Diff line number Diff line change
@@ -1,3 +1,15 @@
-------------------------------------------------------------------
Wed Nov 29 07:52:36 UTC 2023 - Peter Varkoly <[email protected]>

- yast2-sap-ha setup workflow is bad (bsc#1217596)
Reworking the workflow:
1. Setting up SAP HANA System Replication
2. Setting up SAP HANA HA/DR providers
3. Confiugring the base cluster on all nodes
4. Configuring cluster properties and resources with the new function HANA.finalize
The whole class ClusterFinlizer was removed.
- 4.6.2

-------------------------------------------------------------------
Thu Nov 9 08:31:53 UTC 2023 - Peter Varkoly <[email protected]>

Expand Down
2 changes: 1 addition & 1 deletion package/yast2-sap-ha.spec
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@


Name: yast2-sap-ha
Version: 4.6.1
Version: 4.6.2
Release: 0
BuildArch: noarch
Source0: %{name}-%{version}.tar.bz2
Expand Down
2 changes: 1 addition & 1 deletion src/data/sap_ha/scenarios.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
- ntp
- watchdog
- fencing
- cluster
- hana
- cluster
screen_sequence: &id002
- prerequisites
- communication_layer
Expand Down
2 changes: 1 addition & 1 deletion src/data/sap_ha/tmpl_cluster_config.erb
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ primitive rsc_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> ocf:suse:SAPHana \
meta priority="100"

ms msl_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> rsc_SAPHana_<%= @system_id -%>_HDB<%= @instance -%> \
meta clone-max="2" clone-node-max="1" interleave="true"
meta clone-max="2" clone-node-max="1" interleave="true" maintenance="true"

primitive rsc_ip_<%= @system_id -%>_HDB<%= @instance -%> ocf:heartbeat:IPaddr2 \
op monitor interval="10" timeout="20" \
Expand Down
3 changes: 0 additions & 3 deletions src/lib/sap_ha/configuration.rb
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@
require "sap_ha/helpers"
require "sap_ha/node_logger"
require "sap_ha/configuration/cluster"
require "sap_ha/configuration/cluster_finalizer"
require "sap_ha/configuration/fencing"
require "sap_ha/configuration/watchdog"
require "sap_ha/configuration/hana"
Expand All @@ -53,7 +52,6 @@ class HAConfiguration
:watchdog,
:hana,
:ntp,
:cluster_finalizer,
:imported,
:unattended,
:completed,
Expand All @@ -80,7 +78,6 @@ def initialize(role = :master)
@scenario_summary = nil
@yaml_configuration = load_scenarios
@cluster = Configuration::Cluster.new(self)
@cluster_finalizer = Configuration::ClusterFinalizer.new(self)
@fencing = Configuration::Fencing.new(self)
@watchdog = Configuration::Watchdog.new(self)
@hana = Configuration::HANA.new(self)
Expand Down
15 changes: 6 additions & 9 deletions src/lib/sap_ha/configuration/cluster.rb
Original file line number Diff line number Diff line change
Expand Up @@ -287,12 +287,12 @@ def other_nodes_ext

def get_primary_on_primary
SapHA::System::Network.ip_addresses.each do |my_ip|
@nodes.each do |_, node|
if node[:ip_ring1] == my_ip
return node[:host_name]
end
end
end
@nodes.each do |_, node|
if node[:ip_ring1] == my_ip
return node[:host_name]
end
end
end
return nil
end

Expand Down Expand Up @@ -381,10 +381,7 @@ def apply(role)
@nlog.log_status(status, "Exported configuration for yast2-cluster",
"Could not export configuration for yast2-cluster")
flag &= status
#Handle firewall
SapHA::System::Local.config_firewall(@fw_config, role)
flag &= SapHA::System::Local.start_cluster_services
flag &= SapHA::System::Local.cluster_maintenance(:on) if role == :master
flag &= SapHA::System::Local.add_stonith_resource if role == :master
flag
end
Expand Down
59 changes: 0 additions & 59 deletions src/lib/sap_ha/configuration/cluster_finalizer.rb

This file was deleted.

85 changes: 63 additions & 22 deletions src/lib/sap_ha/configuration/hana.rb
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def initialize(global_config)
def additional_instance=(value)
@additional_instance = value
return unless value
@prefer_takeover = false
@prefer_takeover = false
@production_constraints = {
global_alloc_limit_prod: "0",
global_alloc_limit_non: "0",
Expand Down Expand Up @@ -215,7 +215,7 @@ def non_production_constraints_validation(check, hash)
def apply(role)
return false unless configured?
@nlog.info("Applying HANA Configuration")
config_firewall(role)
configure_firewall(role)
if role == :master
if @perform_backup
SapHA::System::Hana.make_backup(@system_id, @backup_user, @backup_file, @instance)
Expand All @@ -224,7 +224,6 @@ def apply(role)
secondary_password = @global_config.cluster.host_passwords[secondary_host_name]
SapHA::System::Hana.copy_ssfs_keys(@system_id, secondary_host_name, secondary_password)
SapHA::System::Hana.enable_primary(@system_id, @site_name_1)
configure_crm
else # secondary node
SapHA::System::Hana.hdb_stop(@system_id)
primary_host_name = @global_config.cluster.other_nodes_ext.first[:hostname]
Expand All @@ -238,20 +237,15 @@ def apply(role)
true
end

def cleanup_hana_resources
# @FIXME: Workaround for Azure-specific issue that needs investigation
# https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability
if @global_config.platform == "azure"
rsc = "rsc_SAPHana_#{@system_id}_HDB#{@instance}"
cleanup_status = exec_status("crm", "resource", "cleanup", rsc)
@nlog.log_status(cleanup_status.exitstatus == 0,
"Performed resource cleanup for #{rsc}",
"Could not clean up #{rsc}")
end
def finalize
configure_crm
wait_idle(@global_config.cluster.get_primary_on_primary)
activating_msr
end

private

def configure_crm
# TODO: move this to SapHA::System::Local.configure_crm
primary_host_name = @global_config.cluster.get_primary_on_primary
secondary_host_name = @global_config.cluster.other_nodes_ext.first[:hostname]
crm_conf = Helpers.render_template("tmpl_cluster_config.erb", binding)
Expand All @@ -262,10 +256,55 @@ def configure_crm
"Could not configure HANA cluster resources", out)
end

def config_firewall(role)
# Wait until the node is in state S_IDLE but maximal 60 seconds
def wait_idle(node)
counter = 0
while true
out, status = exec_outerr_status("crmadmin","--quiet","--status",node)
break if out == "S_IDLE"
log.info("wait_idle status of #{node} is #{out}")
counter += 1
break if counter > 10
sleep 6
end
end

def activating_msr
msr = "msl_SAPHana_#{@system_id}_HDB#{@instance}"
out, status = exec_outerr_status("crm", "resource", "refresh", msr)
@nlog.log_status(status.exitstatus == 0,
"#{msr} status refresh OK",
"Could not refresh status of #{msr}: #{out}")
out, status = exec_outerr_status("crm", "resource", "maintenance", msr, "off")
@nlog.log_status(status.exitstatus == 0,
"#{msr} maintenance turned off.",
"Could turn off maintenance on #{msr}: #{out}")
end

def cleanup_hana_resources
# @FIXME: Workaround for Azure-specific issue that needs investigation
# https://docs.microsoft.com/en-us/azure/virtual-machines/workloads/sap/sap-hana-high-availability
if @global_config.platform == "azure"
rsc = "rsc_SAPHana_#{@system_id}_HDB#{@instance}"
cleanup_status = exec_status("crm", "resource", "cleanup", rsc)
@nlog.log_status(cleanup_status.exitstatus == 0,
"Performed resource cleanup for #{rsc}",
"Could not clean up #{rsc}")
end
end

# Adapt the firewall depending on the @global_config.cluster.fw_config
# Even if the firewall is already configured the TCP port 8080 will be opened for internal RPC communication during setup
# If the firewall should be stoped during cofiguration no other action is necessary
# If the firewall should be configured in the first step the HANA-Services will be generated by hana-firewall.
# After them the generated services and the service cluster will be added to the default zone.
def configure_firewall(role)
case @global_config.cluster.fw_config
when "done"
@nlog.info("Firewall is already configured")
if role != :master
_s = exec_status("/usr/bin/firewall-cmd", "--add-port", "8080/tcp")
end
when "off"
@nlog.info("Firewall will be turned off")
SapHA::System::Local.systemd_unit(:stop, :service, "firewalld")
Expand All @@ -280,6 +319,8 @@ def config_firewall(role)
if role != :master
_s = exec_status("/usr/bin/firewall-cmd", "--add-port", "8080/tcp")
end
_s = exec_status("/usr/bin/firewall-cmd", "--add-service", "cluster")
_s = exec_status("/usr/bin/firewall-cmd", "--permanent", "--add-service", "cluster")
HANA_FW_SERVICES.each do |service|
_s = exec_status("/usr/bin/firewall-cmd", "--add-service", service)
_s = exec_status("/usr/bin/firewall-cmd", "--permanent", "--add-service", service)
Expand All @@ -291,9 +332,9 @@ def config_firewall(role)

# Creates the sudoers file
def adapt_sudoers
if File.exist?(SapHA::Helpers.data_file_path("SUDOERS_HANASR.erb"))
Helpers.write_file("/etc/sudoers.d/saphanasr.conf",Helpers.render_template("SUDOERS_HANASR.erb", binding))
end
if File.exist?(SapHA::Helpers.data_file_path("SUDOERS_HANASR.erb"))
Helpers.write_file("/etc/sudoers.d/saphanasr.conf",Helpers.render_template("SUDOERS_HANASR.erb", binding))
end
end

# Activates all necessary plugins based on role an scenario
Expand All @@ -305,14 +346,14 @@ def adjust_global_ini(role)
add_plugin_to_global_ini("SUS_COSTOPT", @system_id) if role != :master
add_plugin_to_global_ini("NON_PROD", @np_system_id) if role != :master
command = ["hdbnsutil", "-reloadHADRProviders"]
out, status = su_exec_outerr_status("#{@np_system_id.downcase}adm", *command)
_out, _status = su_exec_outerr_status("#{@np_system_id.downcase}adm", *command)
else
# performance optimized
add_plugin_to_global_ini("SUS_CHKSRV", @system_id)
add_plugin_to_global_ini("SUS_TKOVER", @system_id)
end
command = ["hdbnsutil", "-reloadHADRProviders"]
out, status = su_exec_outerr_status("#{@system_id.downcase}adm", *command)
_out, _status = su_exec_outerr_status("#{@system_id.downcase}adm", *command)
end

# Activates the plugin in global ini
Expand All @@ -321,8 +362,8 @@ def add_plugin_to_global_ini(plugin, sid)
if File.exist?("#{sr_path}.erb")
sr_path = Helpers.write_var_file(plugin, Helpers.render_template("GLOBAL_INI_#{plugin}.erb", binding))
end
command = ["/usr/sbin/SAPHanaSR-manageProvider", "--add", "--sid", sid, sr_path]
out, status = su_exec_outerr_status("#{sid.downcase}adm", *command)
command = ["/usr/sbin/SAPHanaSR-manageProvider", "--add", "--reconfigure", "--sid", sid, sr_path]
_out, _status = su_exec_outerr_status("#{sid.downcase}adm", *command)
end
end
end
Expand Down
2 changes: 1 addition & 1 deletion src/lib/sap_ha/sap_ha_installation.rb
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def run
next_node
log.info "--- #{self.class}.#{__callee__}: finished configuring node #{node[:hostname]} ---"
end
@config.cluster_finalizer.apply(:master)
@config.hana.finalize
@ui.unblock if @ui
NodeLogger.summary
:next
Expand Down
Loading

0 comments on commit 848b21a

Please sign in to comment.