From 1bf86d39767aa29df76bb834bcac22f16930349d Mon Sep 17 00:00:00 2001 From: naveena-maplelabs Date: Thu, 1 Feb 2024 07:28:56 -0800 Subject: [PATCH] Added sample scripts using V2 Client --- samples/helios/__init__.py | 0 samples/helios/helios_client.py | 55 +++++++ samples/helios/library.py | 51 +++++++ samples/helios/physical_workflow_using_v2.py | 142 +++++++++++++++++ samples/helios/view_workflow_using_v2.py | 72 +++++++++ samples/helios/vmware_workflow_using_v2.py | 151 +++++++++++++++++++ 6 files changed, 471 insertions(+) create mode 100644 samples/helios/__init__.py create mode 100644 samples/helios/helios_client.py create mode 100644 samples/helios/library.py create mode 100644 samples/helios/physical_workflow_using_v2.py create mode 100644 samples/helios/view_workflow_using_v2.py create mode 100644 samples/helios/vmware_workflow_using_v2.py diff --git a/samples/helios/__init__.py b/samples/helios/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/samples/helios/helios_client.py b/samples/helios/helios_client.py new file mode 100644 index 00000000..1694e2f2 --- /dev/null +++ b/samples/helios/helios_client.py @@ -0,0 +1,55 @@ +# Third-party import. +from cohesity_sdk.cluster.cluster_client import ClusterClient +from cohesity_sdk.helios_mcm.v1.mcm_v1_client import McmV1Client +from cohesity_sdk.helios_mcm.v2.mcm_v2_client import McmV2Client + +from config import ( + helios_ip, + helios_api_key, + cluster_vip, + cluster_username, + cluster_password, + cluster_domain, +) + + +def helios_connector(): + """ + Function to create V1 Helios client. + returns: client + """ + client = McmV1Client(cluster_vip=helios_ip, api_key=helios_api_key) + return client + + +def helios_v2_connector(): + """ + Function to create V2 Helios client. + returns: client + """ + client = McmV2Client( + cluster_vip=helios_ip, + api_key=helios_api_key, + ) + return client + + +def helios_v2_connector_with_cluster_id(): + """ + Function to create V2 Helios client with cluster Id in headers. + returns: client + """ + # Fetch the cluster Id. + cluster_client = ClusterClient( + cluster_vip=cluster_vip, + username=cluster_username, + password=cluster_password, + domain=cluster_domain, + ) + cluster_id = cluster_client.platform.get_cluster().id + client = McmV2Client( + cluster_vip=helios_ip, + api_key=helios_api_key, + access_cluster_id=cluster_id, + ) + return client diff --git a/samples/helios/library.py b/samples/helios/library.py new file mode 100644 index 00000000..d03c979b --- /dev/null +++ b/samples/helios/library.py @@ -0,0 +1,51 @@ +from cohesity_sdk.helios_mcm.v2.model.source_registration_request_params import ( + SourceRegistrationRequestParams, +) +from cohesity_sdk.helios_mcm.v2.model.vmware_source_registration_params import ( + VmwareSourceRegistrationParams, +) +from cohesity_sdk.helios_mcm.v2.model.vcenter_registration_params import ( + VcenterRegistrationParams, +) +from cohesity_sdk.helios_mcm.v2.model.physical_source_registration_params import ( + PhysicalSourceRegistrationParams, +) + + +def register_vmware_source(client, source, username, password): + """ + Function to register a VMware source + : return source Id + """ + payload = SourceRegistrationRequestParams( + environment="kVMware", + vmware_params=VmwareSourceRegistrationParams( + type="kVCenter", + v_center_params=VcenterRegistrationParams( + username=username, password=password, endpoint=source + ), + ), + ) + response = client.source.register_protection_source(payload) + return response.id + + +def register_physical_source(client, source): + """ + Function to register a physical source + : return sourceId + """ + try: + payload = SourceRegistrationRequestParams( + environment="kPhysical", + physical_params=PhysicalSourceRegistrationParams( + endpoint=source, + force_register=True, + physical_type="kHost", + host_type="kLinux", + ), + ) + response = client.source.register_protection_source(payload) + return response.id + except Exception as err: + print(err) diff --git a/samples/helios/physical_workflow_using_v2.py b/samples/helios/physical_workflow_using_v2.py new file mode 100644 index 00000000..074b99f4 --- /dev/null +++ b/samples/helios/physical_workflow_using_v2.py @@ -0,0 +1,142 @@ +import time +import random + +from cohesity_sdk.helios_mcm.v2.model.create_or_update_protection_group_request import ( + CreateOrUpdateProtectionGroupRequest, +) +from cohesity_sdk.helios_mcm.v2.model.create_protection_group_run_request import ( + CreateProtectionGroupRunRequest, +) +from cohesity_sdk.helios_mcm.v2.model.create_or_update_protection_group_request import ( + CreateOrUpdateProtectionGroupRequest, +) +from cohesity_sdk.helios_mcm.v2.model.physical_file_protection_group_params import ( + PhysicalFileProtectionGroupParams, +) +from cohesity_sdk.helios_mcm.v2.model.physical_protection_group_params import ( + PhysicalProtectionGroupParams, +) +from cohesity_sdk.helios_mcm.v2.model.physical_volume_protection_group_object_params import ( + PhysicalVolumeProtectionGroupObjectParams, +) +from cohesity_sdk.helios_mcm.v2.model.physical_volume_protection_group_params import ( + PhysicalVolumeProtectionGroupParams, +) +from config import physical_server +from library import register_physical_source +from helios_client import helios_v2_connector_with_cluster_id + +# Initialise a client. +client = helios_v2_connector_with_cluster_id() +job_name = "PhysicalJob_" + str(random.randint(1, 100)) + + +def get_storage_domain_id(): + domains = client.storage_domain.get_storage_domains().storage_domains or [] + for domain in domains: + if domain.name == "DefaultStorageDomain": + return domain.id + return -1 + + +def get_policy_id(): + policies = client.policy.get_protection_policies().policies or [] + for policy in policies: + if policy.name == "Bronze": + return policy.id + return -1 + + +def create_job(job_name): + try: + source_id = None + jobs = client.protection_group.get_protection_groups(is_deleted=False)[ + "protection_groups" + ] + # Check the job is already available. + for job in jobs: + if job["name"] == job_name: + print("Job %s is already available, skipping creation" % job_name) + return job["id"], job + + # Get storage_domain id. + print("Fetching storage domains") + domain_id = get_storage_domain_id() + if domain_id == -1: + raise Exception("Storage Domain not available.") + + # Check if the source is already registered. + sources = ( + client.source.mcm_get_protection_sources(environments=["kPhysical"]).sources + or [] + ) + for source in sources: + if source.name == physical_server: + source_id = source.source_info_list[0].source_id + if not source_id: + source_id = register_physical_source(client, physical_server) + print("Source Id %s" % str(source_id)) + body = CreateOrUpdateProtectionGroupRequest( + storage_domain_id=domain_id, + policy_id=get_policy_id(), + name=job_name, + environment="kPhysical", + physical_params=PhysicalProtectionGroupParams( + protection_type="kVolume", + volume_protection_type_params=PhysicalVolumeProtectionGroupParams( + objects=[PhysicalVolumeProtectionGroupObjectParams(id=source_id)] + ), + ), + ) + resp = client.protection_group.create_protection_group(body) + return resp + except Exception as err: + print(err) + exit() + + +def run_job(job_id): + # Check for job run and trigger one. + runs = client.protection_group.get_protection_group_runs(job_id)["runs"] + if len(runs) == 0 or runs[0]["local_backup_info"]["status"] not in [ + "Running", + "Accepted", + ]: + # Trigger a job run. + body = CreateProtectionGroupRunRequest(run_type="kRegular") + client.protection_group.create_protection_group_run(job_id, body) + + count = 15 + while count != 0: + runs = client.protection_group.get_protection_group_runs(job_id)["runs"] + if not runs: + time.sleep(10) + continue + if runs[0]["local_backup_info"]["status"] in ["Running", "Accepted"]: + time.sleep(30) + count -= 1 + else: + return True + + +if __name__ == "__main__": + print("Creating job") + job = create_job(job_name) + job_id = job.id + print("Job Content", job) + print("Updating job with id %s" % job_id) + resp = client.protection_group.update_protection_group(job_id, job) + print("Trigger job run") + run = run_job(job_id) + if run: + objects = client.search.search_protected_objects(protection_group_ids=[job_id])[ + "objects" + ] + snapshot_id = ( + objects[0].latest_snapshots_info[0].local_snapshot_info["snapshotId"] + ) + + # Delete the job. + print("Deleting created job '%s'" % job_name) + client.protection_group.delete_protection_group(job_id, delete_snapshots=True) + print("Successfully deleted job '%s'" % job_name) diff --git a/samples/helios/view_workflow_using_v2.py b/samples/helios/view_workflow_using_v2.py new file mode 100644 index 00000000..ffc7b931 --- /dev/null +++ b/samples/helios/view_workflow_using_v2.py @@ -0,0 +1,72 @@ +import random + +from cohesity_sdk.helios_mcm.v2.model.create_or_update_protection_group_request import ( + CreateOrUpdateProtectionGroupRequest, +) + +from cohesity_sdk.helios_mcm.v2.model.create_view_request import CreateViewRequest +from cohesity_sdk.helios_mcm.v2.model.view_protection_group_params import ( + ViewProtectionGroupParams, +) +from cohesity_sdk.helios_mcm.v2.model.view_protection_group_object_params import ( + ViewProtectionGroupObjectParams, +) +from cohesity_sdk.helios_mcm.v2.model.view_protocol import ViewProtocol +from cohesity_sdk.helios_mcm.v2.model.qo_s import QoS + +from helios_client import helios_v2_connector_with_cluster_id + +# Initialise a client. +client = helios_v2_connector_with_cluster_id() +name = "ViewJob_" + str(random.randint(1, 100)) +policy_id = client.policy.get_protection_policies().policies[0].id +domain_id = client.storage_domain.get_storage_domains().storage_domains[0].id + + +def create_view(): + """ + Function to create a view. + """ + try: + protocol_access = ViewProtocol(type="NFS", mode="ReadWrite") + payload = CreateViewRequest( + storage_domain_id=domain_id, + name="View_" + str(random.randint(1, 100)), + category="FileServices", + protocol_access=[protocol_access], + qos={}, + ) + response = client.view.create_view(payload) + print(response) + return response.view_id + except Exception as err: + print(err) + + +try: + # Create a view, protect the view. + views = client.view.get_views().views + if not views: + view_id = create_view() + else: + view_id = views[0].view_id + view_id = create_view() + payload = CreateOrUpdateProtectionGroupRequest( + environment="kView", + name=name, + policy_id=policy_id, + storage_domain_id=domain_id, + view_params=ViewProtectionGroupParams( + objects=[ViewProtectionGroupObjectParams(id=view_id)] + ), + ) + print("Creating protection job, name '%s'" % payload.name) + resp = client.protection_group.create_protection_group(payload) + job_id = resp.id + # Delete the job. + print("Deleting protection job and snapshots") + resp = client.protection_group.delete_protection_group( + job_id, delete_snapshots=True + ) +except Exception as err: + print(err) diff --git a/samples/helios/vmware_workflow_using_v2.py b/samples/helios/vmware_workflow_using_v2.py new file mode 100644 index 00000000..4145e182 --- /dev/null +++ b/samples/helios/vmware_workflow_using_v2.py @@ -0,0 +1,151 @@ +import time +import random + +from cohesity_sdk.helios_mcm.v2.model.create_or_update_protection_group_request import ( + CreateOrUpdateProtectionGroupRequest, +) +from cohesity_sdk.helios_mcm.v2.model.create_protection_group_run_request import ( + CreateProtectionGroupRunRequest, +) +from cohesity_sdk.helios_mcm.v2.model.create_or_update_protection_group_request import ( + CreateOrUpdateProtectionGroupRequest, +) +from cohesity_sdk.helios_mcm.v2.model.vmware_protection_group_object_params import ( + VmwareProtectionGroupObjectParams, +) +from cohesity_sdk.helios_mcm.v2.model.vmware_protection_group_params import ( + VmwareProtectionGroupParams, +) + +from config import vcenter_name, vcenter_username, vcenter_password +from library import register_vmware_source +from helios_client import helios_v2_connector_with_cluster_id + +# Initialise a client. +client = helios_v2_connector_with_cluster_id() +job_name = "VmwareJob_" + str(random.randint(1, 100)) + + +def get_storage_domain_id(): + domains = client.storage_domain.get_storage_domains().storage_domains or [] + for domain in domains: + if domain.name == "DefaultStorageDomain": + return domain.id + return -1 + + +def get_policy_id(): + policies = client.policy.get_protection_policies().policies or [] + for policy in policies: + if policy.name == "Bronze": + return policy.id + return -1 + + +def create_job(job_name): + try: + source_id = None + jobs = client.protection_group.get_protection_groups(is_deleted=False)[ + "protection_groups" + ] + # Check the job is already available. + for job in jobs: + if job["name"] == job_name: + print("Job %s is already available, skipping creation" % job_name) + return job["id"], job + + # Get storage_domain id. + print("Fetching storage domains") + domain_id = get_storage_domain_id() + if domain_id == -1: + raise Exception("Storage Domain not available.") + + # Check if the source is already registered. + sources = client.source.mcm_get_protection_sources( + environments=["kVMware"] + ).sources + for source in sources: + if source.name == vcenter_name: + source_id = source.id + if not source_id: + register_vmware_source(vcenter_name, vcenter_username, vcenter_password) + body = CreateOrUpdateProtectionGroupRequest( + storage_domain_id=domain_id, + policy_id=get_policy_id(), + name=job_name, + environment="kVMware", + vmware_params=VmwareProtectionGroupParams( + objects=[ + VmwareProtectionGroupObjectParams(id=search_objects(source_id)) + ] + ), + ) + resp = client.protection_group.create_protection_group(body) + return resp + except Exception as err: + print(err) + exit() + + +def search_objects(source_id): + """ + Function to search for Vms available in the vcenter source. + : return object Id. + """ + vm_id = None + vm_size = 99999999999 # 99 GB + vms = client.search.search_objects( + environments=["kVMware"], source_ids=[source_id] + ).objects + for vm in vms: + # Search for a VM of smaller size. + if vm.logical_size_bytes and (vm.logical_size_bytes <= vm_size): + vm_id = vm["object_protection_infos"][0]["object_id"] + return vm_id + + +def run_job(job_id): + # Check for job run and trigger one. + runs = client.protection_group.get_protection_group_runs(job_id)["runs"] + if len(runs) == 0 or runs[0]["local_backup_info"]["status"] not in [ + "Running", + "Accepted", + ]: + # Trigger a job run. + body = CreateProtectionGroupRunRequest(run_type="kRegular") + client.protection_group.create_protection_group_run(job_id, body) + + count = 15 + while count != 0: + runs = client.protection_group.get_protection_group_runs(job_id)["runs"] + if not runs: + time.sleep(10) + continue + if runs[0]["local_backup_info"]["status"] in ["Running", "Accepted"]: + time.sleep(30) + count -= 1 + else: + return True + + +if __name__ == "__main__": + print("Creating job") + job = create_job(job_name) + job_id = job.id + print("Job Content", job) + print("Updating job with id %s" % job_id) + resp = client.protection_group.update_protection_group(job_id, job) + print("Trigger job run") + run = run_job(job_id) + if run: + objects = client.search.search_protected_objects(protection_group_ids=[job_id])[ + "objects" + ] + snapshot_id = ( + objects[0].latest_snapshots_info[0].local_snapshot_info["snapshotId"] + ) + + # Delete the job. + print("Deleting created job '%s'" % job_name) + client.protection_group.delete_protection_group(job_id, delete_snapshots=True) + print("Successfully deleted job '%s'" % job_name)