Sample deployment templates


Note


The templates in this appendix are samples for your reference and do not contain real values.


This appendix contains the following topics:

Crosswork Network Controller cluster deployment templates for VMware vCenter

This topic contains manifest template examples for various scenarios of Crosswork cluster deployment.


Note


In case you are using resource pools, please note that individual ESXi host targeting is not allowed and vCenter is responsible for assigning the VMs to host(s) in the resource pool.


Example 1: To deploy a cluster (3 hybrid nodes, 2 worker nodes) on 2 hosts

The following example deploy Crosswork cluster with Hosts specified:


#See at the end of the file for a configured sample

/********* Crosswork Cluster Data  *********/

  # The name of the Crosswork Cluster.
  ClusterName      = "CW-Cluster-01"

  # Provide  name of Cw VM image in vcenter or leave empty
  # When empty the image name will be populated from the uploaded image
  Cw_VM_Image = ""    # Line added automatically by installer.

  # The IP stack protocol: IPv4 or IPv6 or DUALSTACK
  ClusterIPStack        = "IPv4"

  # The Management Virtual IP for the cluster
  ManagementVIP     = "x.x.x.x"

  # Optional: The Management Virtual IP host-name
  ManagementVIPName = ""

  # The Management IP subnet in dotted decimal format for ipv4 or prefix length for ipv6
  ManagementIPNetmask = "x.x.x.x"

  # The Gateway IP on the Management Network
  ManagementIPGateway = "x.x.x.x"

  # The Data Virtual IP for the cluster. Use 0.0.0.0 or ::0 to disable
  DataVIP           = "x.x.x.x"

  # Optional: The Data Virtual IP host-name
  DataVIPName = ""

  # The Data IP subnet in dotted decimal format for ipv4 or prefix length for ipv6
  # Provied any regular mask when not in use
  DataIPNetmask       = "x.x.x.x"

  # The Gateway IP on the Management Network
  DataIPGateway       = "x.x.x.x"

  #  The IP address of the DNS server
  DNS                 = "x.x.x.x"

  # The domain name to use for the cluster
  DomainName            = "cisco.com"

  # Sets the cw-admin user ssh login password for all VMs in the cluster
  # The password MUST be of min length 8 and strong
  CWPassword            = "**************"

  # Sets the VM size for the cluster. The only supported option is Large.
  VMSize                = "Large"

  # NTP server address or name
  NTP                   = "x.x.x.x"

  # Configuration Manifest schema version
  SchemaVersion         = "7.1.0"

  # Data disk size for Manager/Hybrid nodes in GB. Min 485 Max 8000
  ManagerDataFsSize = 485
  # Data disk size for Worker nodes in GB. Min 485 Max 8000
  WorkerDataFsSize = 485

  // Thin or thick provisioning for all disks. Set to true for thin provisioning, false for thick
  ThinProvisioned = false

  # Log partition size in GB. Min 20 Max 1000
  LogFsSize = 20

  # Minimum percentage of the data disk space to be used for the size of the backup partition
  # Note: The final backup partition size will be calculated dynamically. This parameter defines the minimum.
  # Valid range 1 - 80
  BackupMinPercent = 35

  # Enforces VM profile reservations as "hard"
  EnableHardReservations = "True"

  # FOR DEMO USE ONLY - NOT TO BE USED IN PRODUCTION DEPLOYMENTS
  # Ram disk size in GB
  RamDiskSize           = 10 


/********* Crosswork VM Data Map *********/
# Configure named entries for each Cw VM.
# Number of Hybrid VMs minimum: 3; maximum: 3
# Number of Worker VMs minimum: 2; maximum: 3

CwVMs = {
    # Seed VMs' data.
    # IMPORTANT: A VM with id "0" MUST be present in the initial day0 install manifest and its role MUST be
    # set to either MASTER or HYBRID.
  "0" = {

    # This VM's name
    VMName                = "CW_Node_0",

    # This VMs' management IP address
    ManagementIPAddress = "x.x.x.x",

    # This VMs' data IP address. Use 0.0.0.0 or ::0 to disable
    DataIPAddress       = "x.x.x.x",

    # This Cw VM's type - use "Hybrid" for initial install
    NodeType               = "Hybrid",

    # The state for this VM; 2 = running. Only uncomment when doing a manual inventory import
    #Op_Status = 2
  },

   # Second VMs' data
  "1" = {
    # This VM's name
    VMName                = "CW_Node_1",

    # This VMs' management IP address
    ManagementIPAddress = "x.x.x.x",

    # This VMs' data IP address
    DataIPAddress       = "x.x.x.x",

    # This Cw VM's type - use "Hybrid" for initial install
    NodeType               = "Hybrid",

    # The state for this VM; 2 = running. Only uncomment when doing a manual inventory import
    #Op_Status = 2
  },

  # Third VMs' data
  "2" = {
    # This VM's name
    VMName                = "CW_Node_2",

    # This VMs' management IP address
    ManagementIPAddress = "x.x.x.x",

    # This VMs' data IP address
    DataIPAddress       = "x.x.x.x",

    # This Cw VM's type - use "Hybrid" for initial install
    NodeType               = "Hybrid",

    # The state for this VM; 2 = running. Only uncomment when doing a manual inventory import
    #Op_Status = 2
  },

# Worker VMs' data
  "3" = {
    # This VM's name
    VMName                = "CW_Node_3",

    # This VMs' management IP address
    ManagementIPAddress = "x.x.x.x",

    # This VMs' data IP address
    DataIPAddress       = "x.x.x.x",

    # This Cw VM's type - use "Worker"
    NodeType               = "Worker",

    # The state for this VM; 2 = running. Only uncomment when doing a manual inventory import
    #Op_Status = 2
  },
# Worker VMs' data
  "4" = {
    # This VM's name
    VMName                = "CW_Node_4",

    # This VMs' management IP address
    ManagementIPAddress = "x.x.x.x",

    # This VMs' data IP address
    DataIPAddress       = "x.x.x.x",

    # This Cw VM's type - use "Worker"
    NodeType               = "Worker",

    # The state for this VM; 2 = running. Only uncomment when doing a manual inventory import
    #Op_Status = 2
  },
}

/********* vcenter Resource Data with Cw VM assignment *********/

VCenterDC = {

  # The vcenter IP or host name
  VCenterAddress = "x.x.x.x",

  # The username to use for logging into vcenter
  VCenterUser = "Cisco_User",

  # The vcenter password for the user
  VCenterPassword = "*********",

  # The name of the Data Centre resource to use
  DCname = "Cisco-Crosswork",

  # The name of the vcenter network to attach to the Cw VM Management interface
  # NOTE: Escape any special characters using their URL escape codes, eg use "%2f" instead of "/"
  MgmtNetworkName = "VM Network",

  # The name of the vcenter network to attach to the Cw VM Data interface.
  # Leave empty if not used.
  # NOTE: Escape any special characters using their URL escape codes, eg use "%2f" instead of "/"
  DataNetworkName = "Crosswork-Internal",

  # The resource folder name on vcenter. Leave empty if not used.
  DCfolder = "",

  # List of the vcenter host resources along with the VMs names
  # that each that each resource will host. Add additional stanzas, separated by a ','
  # for each additional ESXi host or resource
  VMs = [{

    # The ESXi host, or ONLY the vcenter cluster/resource group name.
    Host = "x.x.x.x",

    # The datastore name available to be used by this host or resource group.
    Datastore = "Datastore-1",

    # The high speed datastore available for this host or resource group.
    # Set to same value as Datastore if unsure.
    HSDatastore = "Datastore-1"

    # The ids of the VMs to be hosted by the above ESXi host or resource. These have to match to the Cw VM
    # ids specified in the Cw VM map. Separate multiple VMs the given
    # host with a ',', eg ["0","1"].
    HostedCwVMs = ["0","1","2"]

    },
    {
    Host = "x.x.x.x"
    Datastore = "Datastore-2"
    HSDatastore = "Datastore-2"
    HostedCwVMs =["3","4"]
    } 
  ]
}

Example 2: To deploy a cluster (3 hybrid, 2 workers) in a Resource Group

The following example deploys Crosswork cluster with resource groups:



/********* Crosswork Cluster Data  *********/


  ClusterName      = "CW-cluster-01"
  # When empty the image name will be populated from the uploaded image
  Cw_VM_Image = ""    # Line added automatically by installer.
  ClusterIPStack        = "IPv4"
  ManagementVIP     = "x.x.x.x"
  ManagementVIPName = ""
  ManagementIPNetmask = "x.x.x.x"
  ManagementIPGateway = "x.x.x.x"
  DataVIP           = "x.x.x.x"
  DataVIPName = ""
  DataIPNetmask       = "x.x.x.x"
  DataIPGateway       = "x.x.x.x"
  DNS                 = "x.x.x.x"
  DomainName            = "cisco.com"

  # Kubernetes Service Network Customization - The default network '10.96.0.0'.
  # NOTE: The CIDR range is fixed '/16', no need to enter.
  #       Only IPv4 is supported, IPv6 customization is NOT supported.
  K8sServiceNetwork = "10.96.0.0"

  # Kubernetes Service Network Customization - The default network '10.244.0.0'.
  # NOTE: The CIDR range is fixed '/16', no need to enter.
  #       Only IPv4 is supported, IPv6 customization is NOT supported.
  K8sPodNetwork = "10.244.0.0"


  CWPassword            = "*********"
  VMSize                = "Large"
  NTP                   = "x.x.x.x"
  SchemaVersion         = "7.1.0"

  # Data disk size for Manager/Hybrid nodes in GB. Min 485 Max 8000
  ManagerDataFsSize = 485
  # Data disk size for Worker nodes in GB. Min 485 Max 8000
  WorkerDataFsSize = 485

  // Thin or thick provisioning for all disks. Set to true for thin provisioning, false for thick
  ThinProvisioned = false

  # Log partition size in GB. Min 20 Max 1000
  LogFsSize = 20

  # Minimum percentage of the data disk space to be used for the size of the backup partition
  # Note: The final backup partition size will be calculated dynamically. This parameter defines the minimum.
  # Valid range 1 - 80
  BackupMinPercent = 35

  # Enforces VM profile reservations as "hard"
  EnableHardReservations = "False"

  # FOR DEMO USE ONLY - NOT TO BE USED IN PRODUCTION DEPLOYMENTS
  # Ram disk size in GB
  RamDiskSize           = 0

  # Pods that are marked as skip auto install will not be brought up until a dependent application/pod explicitly asks for it
  EnableSkipAutoInstallFeature = "False"

  # DEMO/DEV USE ONLY - Enforce pod minimum resource reservations. Default and for production use is True
  EnforcePodReservations = "True"

  # Optional: Provide a standard IANA time zone. Default value is Etc/UTC if not specified
  Timezone = ""

/********* Crosswork VM Data Map *********/
# Configure named entries for each Cw VM.
# Number of Hybrid VMs minimum: 3; maximum: 3
# Number of Worker VMs minimum: 0; maximum: 3

CwVMs = {
  "0" = {
    VMName                = "cw-vm-0",
    ManagementIPAddress = "x.x.x.x",
    DataIPAddress       = "x.x.x.x",
    NodeType               = "Hybrid",
    #Op_Status = 2
  },
  "1" = {
    VMName                = "cw-vm-1",
    ManagementIPAddress = "x.x.x.x",
    DataIPAddress       = "x.x.x.x",
    NodeType               = "Hybrid",
    #Op_Status = 2
  },
  "2" = {
    VMName                = "cw-vm-2",
    ManagementIPAddress = "x.x.x.x",
    DataIPAddress       = "x.x.x.x",
    NodeType               = "Hybrid",
    #Op_Status = 2
  },
  "3" = {
    # This VM's name
    VMName                = "cw-worker-3",
    ManagementIPAddress = "x.x.x.x",
    DataIPAddress       = "x.x.x.x",
    NodeType               = "Worker",
                                                                                       
    # The state for this VM; 2 = running. Only uncomment when doing a manual inventory import
    #Op_Status = 2
  },
  "4" = {
    # This VM's name
    VMName                = "cw-worker-4",
    ManagementIPAddress = "x.x.x.x",
    DataIPAddress       = "x.x.x.x",
    NodeType               = "Worker",
    #Op_Status = 2
  }  
}



/********* vcenter Resource Data with Cw VM assignment *********/

VCenterDC = {
  VCenterAddress = "x.x.x.x",
  VCenterUser = "Cisco_User",
  VCenterPassword = "***********",
  DCname = "Cisco-Crosswork",
  MgmtNetworkName = "Management Network",
  DataNetworkName = "Data Network",
  DCfolder = "" 
  VMs = [{
    Host = "{path to resource Group}",                                                                                    
    Datastore = "iSCSI-DataStore",                                                                                        
    HSDatastore = "iSCSI-DataStore",                                                                                      
    HostedCwVMs = ["0","1","2","3","4"],                                                                                                  
    }
  ]
}

Example 3: To deploy a cluster (3 hybrid nodes, 2 worker nodes) on dual stack configuration

The following example deploys a Crosswork cluster containing 3 Hybrid nodes (IDs 0,1, 2) and 2 worker nodes (IDs 3, 4) on a dual stack configuration.



/********* Crosswork Cluster Data  *********/

  ClusterName      = "bgl2-az2-geo-dualstack-setup"
  Cw_VM_Image = "cw-na-platform-release-7.1.0-1808-CSCwo72364_pwdfix-250410_dual"    # Line added automatically by installer.
  ClusterIPStack        = "DUALSTACK"
  ManagementVIPv4     = "10.77.84.16"
  ManagementVIPv6     = "2001:420:54ff:13::485:10"
  ManagementIPv4Netmask = "255.255.255.0"
  ManagementIPv4Gateway = "10.77.84.1"
  ManagementIPv6Netmask = "119"
  ManagementIPv6Gateway = "2001:420:54ff:13::485:1"
  DataVIPv6           = "fd00::101:1a"
  DataVIPv4           = "192.168.101.16"
  DataIPv6Netmask       = "121"
  DataIPv6Gateway       = "fd00::101:1"
  DataIPv4Netmask       = "255.255.255.0"
  DataIPv4Gateway       = "192.168.101.1"
  DNSv6                 = "2001:420:284:2004:4:110:122:5058 fe80::c54c:b337:eda5:8bd"
  DNSv4                 = "10.64.88.178"
  DomainName            = "cw.cisco"
  CWPassword            = "*************"
  VMSize                = "Large"
  NTP                   = "ntp.esl.cisco.com"
  SchemaVersion                   = "5.0.0"
  ThinProvisioned = false
  EnableHardReservations = "False"
  Timezone = "Asia/Kolkata"
/********* Crosswork VM Data Map *********/
# Configure named entries for each Cw VM.
# Number of Hybrid VMs minimum: 3; maximum: 3
# Number of Worker VMs minimum: 0; maximum: 3

CwVMs = {
    # Seed VMs' data.
    # NOTE: A VM with id "0" MUST be present in the initial day0 install manifest and it's role MUST be
    # set to either MASTER or HYBRID.
  "0" = {
    VMName                = "cw-hybrid1-geo2-az2",
    ManagementIPv6Address = "2001:420:54ff:13::485:11",
    DataIPv6Address       = "fd00::101:1b",
    ManagementIPv4Address = "10.77.84.17",
    DataIPv4Address       = "192.168.101.17",
    NodeType               = "Hybrid"
  },

   # Second VMs' data
  "1" = {
    VMName                = "cw-hybrid2-geo2-az2",
    ManagementIPv6Address = "2001:420:54ff:13::485:12",
    DataIPv6Address       = "fd00::101:1c",
    ManagementIPv4Address = "10.77.84.18",
    DataIPv4Address       = "192.168.101.18",
    NodeType               = "Hybrid"
  },

  "2" = {
    VMName                = "cw-hybrid3-geo2-az2",
    ManagementIPv6Address = "2001:420:54ff:13::485:13",
    DataIPv6Address       = "fd00::101:1d",
    ManagementIPv4Address = "10.77.84.19",
    DataIPv4Address       = "192.168.101.19",
    NodeType               = "Hybrid"
  },

  "3" = {
    VMName                = "cw-worker1-geo2-az2",
    ManagementIPv6Address = "2001:420:54ff:13::485:14",
    DataIPv6Address       = "fd00::101:1e",
    ManagementIPv4Address = "10.77.84.20",
    DataIPv4Address       = "192.168.101.20",
    NodeType               = "Worker"
  },

  "4" = {
    VMName                = "cw-worker2-geo2-az2",
    ManagementIPv6Address = "2001:420:54ff:13::485:15",
    DataIPv6Address       = "fd00::101:1f",
    ManagementIPv4Address = "10.77.84.21",
    DataIPv4Address       = "192.168.101.21",
    NodeType               = "Worker"
  }
 }

/********* vcenter Resource Data with Cw VM assignment *********/

VCenterDC = {

  VCenterAddress = "x.x.x.x",
  VCenterUser = "Cisco_User",
  VCenterPassword = "*********",
  DCname = "Crosswork-LAB",
  MgmtNetworkName = "10.77.84.0",
  DataNetworkName = "DATA-VLAN-101",
  DCfolder = "",
  VMs = [{
    HostedCwVMs = ["0"],
    Host = "10.126.165.52",
    Datastore = "3.3TB-SSD-165-52",
    HSDatastore = "3.3TB-SSD-165-52"
  },
  {
    HostedCwVMs = ["1"],
    Host = "10.126.165.52",
    Datastore = "3.3TB-SSD-165-52",
    HSDatastore = "3.3TB-SSD-165-52"
  },
  {
    HostedCwVMs = ["2"],
    Host = "10.126.165.52",
    Datastore = "3.3TB-SSD-165-52",
    HSDatastore = "3.3TB-SSD-165-52"
  },
  {
    HostedCwVMs = ["3"],
    Host = "10.126.165.52",
    Datastore = "3.3TB-SSD-165-52",
    HSDatastore = "3.3TB-SSD-165-52"
  },
  {
    HostedCwVMs = ["4"],
    Host = "10.126.165.52",
    Datastore = "3.3TB-SSD-165-52",
    HSDatastore = "3.3TB-SSD-165-52"
  }
 ]
}

IgnoreDiagnosticsCheckFailure = "True"    # Line added automatically by installer.

Crosswork Network Controller cluster deployment templates for KVM

Example 1: Deploy Crosswork Network Controller on KVM (cluster deployment)

<Environment
     xmlns="http://schemas.dmtf.org/ovf/environment/1"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
     xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
     xmlns:ve="http://www.vmware.com/schema/ovfenv"
     oe:id=""
   <PlatformSection>
      <Kind>KVM</Kind>
      <Version>7.1.0</Version>
      <Vendor>KVM</Vendor>
      <Locale>en</Locale>
   </PlatformSection>
    <PropertySection>
         <Property oe:key="CWPassword" oe:value="**********"/>
         <Property oe:key="CWUsername" oe:value="cw-admin"/>
         <Property oe:key="ClusterCaKey" oe:value=""/>
         <Property oe:key="ClusterCaPubKey" oe:value=""/>
         <Property oe:key="CwInstaller" oe:value="False"/>
         <Property oe:key="DNSv4" oe:value="171.70.168.183"/>
         <Property oe:key="DNSv6" oe:value="::0"/>
         <Property oe:key="DataIPv4Address" oe:value="192.168.5.43"/>
         <Property oe:key="DataIPv4Gateway" oe:value="192.168.5.1"/>
         <Property oe:key="DataIPv4Netmask" oe:value="255.255.255.0"/>
         <Property oe:key="DataIPv6Address" oe:value="::0"/>
         <Property oe:key="DataIPv6Gateway" oe:value="::1"/>
         <Property oe:key="DataIPv6Netmask" oe:value="64"/>
         <Property oe:key="DataPeerIPs" oe:value=""/>
         <Property oe:key="DataVIP" oe:value="192.168.5.42"/>
         <Property oe:key="DataVIPName" oe:value=""/>
         <Property oe:key="Deployment" oe:value="cw_ipv4"/>
         <Property oe:key="Disclaimer" oe:value="Cisco Crosswork"/>
         <Property oe:key="Domain" oe:value="cisco.com"/>
         <Property oe:key="EnableSkipAutoInstallFeature" oe:value="False"/>
         <Property oe:key="EnforcePodReservations" oe:value="True"/>
         <Property oe:key="IgnoreDiagnosticsCheckFailure" oe:value="True"/> 
         <Property oe:key="InitMasterCount" oe:value="3"/>
         <Property oe:key="InitNodeCount" oe:value="5"/>
         <Property oe:key="IsSeed" oe:value="True"/>
         <Property oe:key="K8Orch" oe:value=""/>
         <Property oe:key="K8sPodNetworkV4" oe:value="10.244.0.0"/>
         <Property oe:key="K8sServiceNetworkV4" oe:value="10.96.0.0"/>
         <Property oe:key="ManagementIPv4Address" oe:value="10.19.70.143"/>
         <Property oe:key="ManagementIPv4Gateway" oe:value="10.19.70.1"/>
         <Property oe:key="ManagementIPv4Netmask" oe:value="255.255.255.0"/>
         <Property oe:key="ManagementIPv6Address" oe:value="::0"/>
         <Property oe:key="ManagementIPv6Gateway" oe:value="::1"/>
         <Property oe:key="ManagementIPv6Netmask" oe:value="112"/>
         <Property oe:key="ManagementVIP" oe:value="10.19.70.142"/>
         <Property oe:key="ManagementVIPName" oe:value=""/>
         <Property oe:key="ManagerPeerIPs" oe:value=""/>
         <Property oe:key="NBIIPv4Address" oe:value="0.0.0.0"/>
         <Property oe:key="NBIIPv4Gateway" oe:value="0.0.0.0"/>
         <Property oe:key="NBIIPv4Netmask" oe:value="0.0.0.0"/>
         <Property oe:key="NBIIPv6Address" oe:value="::0"/>
         <Property oe:key="NBIIPv6Gateway" oe:value="::1"/>
         <Property oe:key="NBIIPv6Netmask" oe:value="64"/>
         <Property oe:key="NBIVIP" oe:value="NBI VIP address"/>
         <Property oe:key="NTP" oe:value="ntp.esl.cisco.com"/>
         <Property oe:key="Timezone" oe:value="US/Pacific"/>
         <Property oe:key="VMLocation" oe:value="default"/>
         <Property oe:key="VMType" oe:value="Hybrid"/>
         <Property oe:key="bckup_min_percent" oe:value="35"/>
         <Property oe:key="corefs" oe:value="18"/>
         <Property oe:key="ddatafs" oe:value="485"/>
         <Property oe:key="logfs" oe:value="20"/>
         <Property oe:key="ramdisk" oe:value="0"/>
         <Property oe:key="ssd" oe:value="15"/>
         <Property oe:key="VMSize" oe:value="Large"/>
         <Property oe:key="ThinProvisioned" oe:value="False"/>
         <Property oe:key="UseNonDefaultCalicoBgpPort" oe:value="False"/>
         <Property oe:key="bootOptions.efiSecureBootEnabled" oe:value="True"/>
   </PropertySection>
</Environment>

Example 2: Deploy Crosswork Network Controller VM on KVM (single VM deployment)

<Environment
     xmlns="http://schemas.dmtf.org/ovf/environment/1"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
     xmlns:oe="http://schemas.dmtf.org/ovf/environment/1"
     xmlns:ve="http://www.vmware.com/schema/ovfenv"
     oe:id=""
   <PlatformSection>
      <Kind>KVM</Kind>
      <Version>7.1.0</Version>
      <Vendor>KVM</Vendor>
      <Locale>en</Locale>
   </PlatformSection>
    <PropertySection>
         <Property oe:key="CWPassword" oe:value="**********"/>
         <Property oe:key="CWUsername" oe:value="cw-admin"/>
         <Property oe:key="ClusterCaKey" oe:value=""/>
         <Property oe:key="ClusterCaPubKey" oe:value=""/>
         <Property oe:key="CwInstaller" oe:value="False"/>
         <Property oe:key="DNSv4" oe:value="171.70.168.183"/>
         <Property oe:key="DNSv6" oe:value="::0"/>
         <Property oe:key="DataIPv4Address" oe:value="192.168.5.48"/>
         <Property oe:key="DataIPv4Gateway" oe:value="192.168.5.1"/>
         <Property oe:key="DataIPv4Netmask" oe:value="255.255.255.0"/>
         <Property oe:key="DataIPv6Address" oe:value="::0"/>
         <Property oe:key="DataIPv6Gateway" oe:value="::1"/>
         <Property oe:key="DataIPv6Netmask" oe:value="64"/>
         <Property oe:key="DataPeerIPs" oe:value=""/>
         <Property oe:key="DataVIP" oe:value="192.168.5.51"/>
         <Property oe:key="DataVIPName" oe:value=""/>
         <Property oe:key="Deployment" oe:value="cw_ipv4"/>
         <Property oe:key="Disclaimer" oe:value="Cisco Crosswork"/>
         <Property oe:key="Domain" oe:value="cisco.com"/>
         <Property oe:key="EnableSkipAutoInstallFeature" oe:value="True"/>
         <Property oe:key="EnforcePodReservations" oe:value="True"/>
         <Property oe:key="IgnoreDiagnosticsCheckFailure" oe:value="True"/> 
         <Property oe:key="InitMasterCount" oe:value="1"/>
         <Property oe:key="InitNodeCount" oe:value="1"/>
         <Property oe:key="IsSeed" oe:value="True"/>
         <Property oe:key="K8Orch" oe:value=""/>
         <Property oe:key="K8sPodNetworkV4" oe:value="10.244.0.0"/>
         <Property oe:key="K8sServiceNetworkV4" oe:value="10.96.0.0"/>
         <Property oe:key="ManagementIPv4Address" oe:value="10.19.70.148"/>
         <Property oe:key="ManagementIPv4Gateway" oe:value="10.19.70.1"/>
         <Property oe:key="ManagementIPv4Netmask" oe:value="255.255.255.0"/>
         <Property oe:key="ManagementIPv6Address" oe:value="::0"/>
         <Property oe:key="ManagementIPv6Gateway" oe:value="::1"/>
         <Property oe:key="ManagementIPv6Netmask" oe:value="112"/>
         <Property oe:key="ManagementVIP" oe:value="10.19.70.151"/>
         <Property oe:key="ManagementVIPName" oe:value=""/>
         <Property oe:key="ManagerPeerIPs" oe:value=""/>
         <Property oe:key="NTP" oe:value="ntp.esl.cisco.com"/>
         <Property oe:key="Timezone" oe:value="US/Pacific"/>
         <Property oe:key="VMLocation" oe:value="default"/>
         <Property oe:key="VMType" oe:value="Hybrid"/>
         <Property oe:key="bckup_min_percent" oe:value="35"/>
         <Property oe:key="corefs" oe:value="18"/>
         <Property oe:key="ddatafs" oe:value="485"/>
         <Property oe:key="logfs" oe:value="20"/>
         <Property oe:key="ramdisk" oe:value="0"/>
         <Property oe:key="ssd" oe:value="15"/>
         <Property oe:key="VMSize" oe:value="XLarge"/>
         <Property oe:key="ThinProvisioned" oe:value="False"/>
         <Property oe:key="UseNonDefaultCalicoBgpPort" oe:value="False"/>
         <Property oe:key="bootOptions.efiSecureBootEnabled" oe:value="True"/>
   </PropertySection>
</Environment>

Crosswork Network Controller VM templates for single VM deployments

Example 1: Dual stack template to deploy using OVF tool (.sh file)

#!/usr/bin/env bash 
SVM_OVA_PATH=$1
Host="10.104.52.105"
VCENTER_LOGIN="admin@vsphere.local@10.104.59.94"
VCENTER_PATH="SVM/host"

/root/user/cwinstall/ovftool/ovftool --acceptAllEulas --skipManifestCheck --X:injectOvfEnv -ds="10.104.52.75-ssd" \
--numberOfCpus:"*"=24 --viCpuResource=:50000: \
--memorySize:"*"=131072 --viMemoryResource=:131072: \
--diskMode="thin" --overwrite --powerOffTarget --powerOn --noSSLVerify \
--allowExtraConfig \
--deploymentOption="cw_dual" \
--prop:"NTP=ntp.esl.cisco.com" \
--prop:"Timezone=Asia/Kolkata" \
--prop:"EnforcePodReservations=True" \
--prop:"EnableSkipAutoInstallFeature=True" \
--prop:"Domain=cisco.com" \
--prop:"Disclaimer=Managed by SVM Infra Test" \
--name="cwi-svm-cnc-adv-test" \
--net:"Data Network=DPortGroup10" \
--net:"Management Network=VM Network" \
--prop:"ManagementVIPName=cwi-svm-cnc-adv-1" \
--prop:"ManagementVIPv4=10.104.52.87" \
--prop:"ManagementVIPv6=2001:420:54ff:24::656:87" \
--prop:"ManagementIPv4Address=10.104.52.88" \
--prop:"ManagementIPv6Address=2001:420:54ff:24::656:88" \
--prop:"ManagementIPv4Netmask=255.255.255.128" \
--prop:"ManagementIPv6Netmask=112" \
--prop:"ManagementIPv4Gateway=10.104.52.1" \
--prop:"ManagementIPv6Gateway=2001:420:54ff:24::656:1" \
--prop:"DataVIPv4=20.1.1.87" \
--prop:"DataVIPv6=fded:1bc1:fc3e:96d0:20:1:1:87" \
--prop:"DataIPv4Address=20.1.1.88" \
--prop:"DataIPv6Address=fded:1bc1:fc3e:96d0:20:1:1:88" \
--prop:"DataIPv4Netmask=255.0.0.0" \
--prop:"DataIPv6Netmask=112" \
--prop:"DataIPv4Gateway=20.1.1.1" \
--prop:"DataIPv6Gateway=fded:1bc1:fc3e:96d0:20:1:1:1" \
--prop:"DNSv4=171.70.168.183" \
--prop:"DNSv6=2001:420:200:1::a" \
--prop:"K8sServiceNetworkV4=10.96.0.0" \
--prop:"K8sPodNetworkV4=10.244.0.0" \
--prop:"CWPassword=*********" \
--prop:"VMType=Hybrid" \
--prop:"IsSeed=True" \
--prop:"InitNodeCount=1" \
--prop:"InitMasterCount=1" \
$SVM_OVA_PATH \
vi://$VCENTER_LOGIN/$VCENTER_PATH/$Host

Example 2: Dual stack template to deploy using Docker installer (.tfvars file)

Cw_VM_Image = "cw-na-cnc-advantage-svm-7.1.0-673-develop-250422_dual"    # Line added automatically by installer.
ClusterIPStack        = "DUALSTACK"
ManagementVIPName     = "cwi-svm-cnc-adv-1"
ManagementVIPv4       = "10.104.52.87"
ManagementVIPv6       = "2001:420:54ff:24::656:87"
ManagementIPv4Netmask = "255.255.255.128"
ManagementIPv6Netmask = "112"
ManagementIPv4Gateway = "10.104.52.1"
ManagementIPv6Gateway = "2001:420:54ff:24::656:1"
DataVIPv4             = "20.1.1.87"
DataVIPv6             = "fded:1bc1:fc3e:96d0:20:1:1:87"
DataIPv4Netmask       = "255.0.0.0"
DataIPv6Netmask       = "112"
DataIPv4Gateway       = "20.1.1.1"
DataIPv6Gateway       = "fded:1bc1:fc3e:96d0:20:1:1:1"
DNSv4                 = "171.70.168.183"
DNSv6                 = "2001:420:200:1::a"
DomainName            = "cisco.com"
K8sServiceNetwork     = "10.96.0.0"
K8sPodNetwork         = "10.244.0.0"
CWPassword            = "*******"
VMSize                = "XLarge"
NTP                   = "ntp.esl.cisco.com"
Timezone              = "Asia/Kolkata"
EnableSkipAutoInstallFeature = "True"
EnableHardReservations = true
CwVMs = {
    "0" = {
      VMName              = "cwi-svm-cnc-adv-test",
      ManagementIPv4Address = "10.104.52.88",
      ManagementIPv6Address = "2001:420:54ff:24::656:88",
      DataIPv4Address     = "20.1.1.88",
      DataIPv6Address     = "fded:1bc1:fc3e:96d0:20:1:1:88",
      NodeType            = "Hybrid"
    }
}
VCenterDC = {
  VCenterAddress = "10.104.52.94",
  VCenterUser = "abc@vsphere.local",
  VCenterPassword = "********",
  DCname = "SVM",
  MgmtNetworkName = "VM Network",
  DataNetworkName = "DPortGroup10",
  VMs = [
      {
        HostedCwVMs = ["0"],
        Host = "10.104.52.105",
        Datastore = "10.104.52.75-ssd",
        HSDatastore="10.104.52.75-ssd"
      }
  ]
}
SchemaVersion = "7.1.0"
IsRunDiagnosticsScriptForCheck = "True"

Crosswork Data Gateway deployment templates and scripts

Sample Script for Crosswork Data Gateway Dual-Stack Deployment with Geo-Redundancy

Before running the script, ensure that you are using OVFtool version 4.4.x.

#!/usr/bin/env bash
VM_NAME="<VM name on vcenter>"
DM="<thin/thick>"
DS="<Datastore>"
Vcenter="<vCenter IP address>"
Host="<CDG hostname>"
DC="<Data Center>"
CDGIpv4MgmtIpv4="<CDG management IPv4>"
ManagementIPv4Netmask="<Management network IPv4 mask>"
ManagementIPv4Gateway="<Management IPv4 gateway>"
NorthDataIPv4Address="<Northbound data IPv4>"
NorthDataIPv4Netmask="<Northbound data IPv4 mask>"
NorthDataIPv4Gateway="<Northbound data IPv4 gateway>"
SouthDataIPv4Address="<Southbound data IPv4>"
SouthDataIPv4Netmask="<Southbound data IPv4 mask>"
SouthDataIPv4Gateway="<Southbound data IPv4 gateway>"
CDGIpv6MgmtIpv6="<CDG management IPv6>"
ManagementIPv6Netmask="<Management network IPv6 mask>"
ManagementIPv6Gateway="<Management network IPv6 gateway>"
NorthDataIPv6Address="<Northbound data network IPv6 address>"
NorthDataIPv6Netmask="<Northbound data network IPv6 mask>"
NorthDataIPv6Gateway="<Northbound data network IPv6 gateway>"
SouthDataIPv6Address="<Southbound data network IPv6 address>"
SouthDataIPv6Netmask="<Southbound data network IPv6 mask>"
SouthDataIPv6Gateway="<Southbound data network IPv6 gateway>"
PrivateKey="<Private key>"
Disclaimer="Cisco CDG VM for Crosswork Deployment"
DNSv6="<DNS address>"
NTP="<NTP address>"
Domain="<Domain>"
CtrlerCertChainPwd="<Controller certificate password>"
ClientCertChainPwd="<Client certificate password>"
DgAdminPwd="<Data gateway admin password>"
DgOperPwd="<Data gateway operator password>"
ControllerIP="<Unified FQDN>"
ControllerPassword="<CNC Password>"
ControllerPort="30607"
cdgDomain="<Data gateway domain>"
SouthDataNetwork="<South data network>"
NorthDataNetwork="<North data network>"

ovftool --version
ovftool --acceptAllEulas --skipManifestCheck --X:injectOvfEnv --overwrite --powerOffTarget --powerOn --noSSLVerify  --allowExtraConfig \
      -ds=$DS \
--deploymentOption="onpremise-standard" \
      --diskMode=$DM \
--prop:"ControllerIP=${ControllerIP}" \
--prop:"ControllerPort=${ControllerPort}" \
      --prop:"ControllerSignCertChain=cw-admin@${CwIpv6Mgmt}:/home/cw-admin/controller.pem" \
      --prop:"ControllerCertChainPwd=${ControllerPassword}" \
      --name="${VM_NAME}" \
      --prop:"Hostname=${cdgDomain}" \
      --prop:"Description=CDG Base VM for Automation" \
      --prop:"Vnic0IPv6Method=Static" \
      --prop:"Vnic0IPv6Address=${CDGIpv6MgmtIpv6}" \
      --prop:"Vnic0IPv6Netmask=${ManagementIPv6Netmask}" \
      --prop:"Vnic0IPv6Gateway=${ManagementIPv6Gateway}" \
      --prop:"Vnic1IPv6Method=Static" \
      --prop:"Vnic1IPv6Address=${NorthDataIPv6Address}" \
      --prop:"Vnic1IPv6Netmask=${NorthDataIPv6Netmask}" \
      --prop:"Vnic1IPv6Gateway=${NorthDataIPv6Gateway}" \
      --prop:"Vnic0IPv4Method=Static" \
      --prop:"Vnic0IPv4Address=${CDGIpv4MgmtIpv4}" \
      --prop:"Vnic0IPv4Netmask=${ManagementIPv4Netmask}" \
      --prop:"Vnic0IPv4Gateway=${ManagementIPv4Gateway}" \
      --prop:"Vnic1IPv4Method=Static" \
      --prop:"Vnic1IPv4Address=${NorthDataIPv4Address}" \
      --prop:"Vnic1IPv4Netmask=${NorthDataIPv4Netmask}" \
      --prop:"Vnic1IPv4Gateway=${NorthDataIPv4Gateway}" \
      --prop:"dg-adminPassword=${DgAdminPwd}" \
      --prop:"dg-operPassword=${DgOperPwd}" \
      --prop:"DNS=${DNSv6}" \
      --net:"vNIC0=DPortGMgmt12" \
      --net:"vNIC1=${NorthDataNetwork}" \
      --net:"vNIC2=${SouthDataNetwork}" \
      --prop:"NTP=${NTP}" \
      --prop:"Domain=${Domain}" \
      $CDG_OVA_PATH \
      vi://Administrator%40vsphere%2Elocal:Crosswork123%21@$Vcenter/$DC/host/$Host

Sample Script for Crosswork Data Gateway Dual-Stack Deployment

Before running the script, ensure that you are using OVFtool version 4.4.x.


Note


The controller IP address parameter can be a controller VIP or FQDN of controller VIP.


#!/usr/bin/env bash
VM_NAME="<VM name on vcenter>"
DM="<thin/thick>"
DS="<Datastore>"
Vcenter="<vCenter IP address>"
Host="<CDG hostname>"
DC="<Data Center>"
CDGIpv4MgmtIpv4="<CDG management IPv4>"
ManagementIPv4Netmask="<Management network IPv4 mask>"
ManagementIPv4Gateway="<Management IPv4 gateway>"
NorthDataIPv4Address="<Northbound data IPv4>"
NorthDataIPv4Netmask="<Northbound data IPv4 mask>"
NorthDataIPv4Gateway="<Northbound data IPv4 gateway>"
SouthDataIPv4Address="<Southbound data IPv4>"
SouthDataIPv4Netmask="<Southbound data IPv4 mask>"
SouthDataIPv4Gateway="<Southbound data IPv4 gateway>"
CDGIpv6MgmtIpv6="<CDG management IPv6>"
ManagementIPv6Netmask="<Management network IPv6 mask>"
ManagementIPv6Gateway="<Management network IPv6 gateway>"
NorthDataIPv6Address="<Northbound data network IPv6 address>"
NorthDataIPv6Netmask="<Northbound data network IPv6 mask>"
NorthDataIPv6Gateway="<Northbound data network IPv6 gateway>"
SouthDataIPv6Address="<Southbound data network IPv6 address>"
SouthDataIPv6Netmask="<Southbound data network IPv6 mask>"
SouthDataIPv6Gateway="<Southbound data network IPv6 gateway>"
PrivateKey="<Private key>"
Disclaimer="Cisco CDG VM for Crosswork Deployment"
DNSv6="<DNS address>"
NTP="<NTP address>"
Domain="<Domain>"
CtrlerCertChainPwd="<Controller certificate password>"
ClientCertChainPwd="<Client certificate password>"
DgAdminPwd="<Data gateway admin password>"
DgOperPwd="<Data gateway operator password>"
ControllerIP="<Controller VIP or FQDN of the controller VIP>"
ControllerPassword="<CNC Password>"
ControllerPort="30607"
cdgDomain="<Data gateway domain>"
SouthDataNetwork="<South data network>"
NorthDataNetwork="<North data network>"

ovftool --version
ovftool --acceptAllEulas --skipManifestCheck --X:injectOvfEnv --overwrite --powerOffTarget --powerOn --noSSLVerify  --allowExtraConfig \
      -ds=$DS \
--deploymentOption="onpremise-standard" \
      --diskMode=$DM \
--prop:"ControllerIP=${ControllerIP}" \
--prop:"ControllerPort=${ControllerPort}" \
      --prop:"ControllerSignCertChain=cw-admin@${CwIpv6Mgmt}:/home/cw-admin/controller.pem" \
      --prop:"ControllerCertChainPwd=${ControllerPassword}" \
      --name="${VM_NAME}" \
      --prop:"Hostname=${cdgDomain}" \
      --prop:"Description=CDG Base VM for Automation" \
      --prop:"Vnic0IPv6Method=Static" \
      --prop:"Vnic0IPv6Address=${CDGIpv6MgmtIpv6}" \
      --prop:"Vnic0IPv6Netmask=${ManagementIPv6Netmask}" \
      --prop:"Vnic0IPv6Gateway=${ManagementIPv6Gateway}" \
      --prop:"Vnic1IPv6Method=Static" \
      --prop:"Vnic1IPv6Address=${NorthDataIPv6Address}" \
      --prop:"Vnic1IPv6Netmask=${NorthDataIPv6Netmask}" \
      --prop:"Vnic1IPv6Gateway=${NorthDataIPv6Gateway}" \
      --prop:"Vnic0IPv4Method=Static" \
      --prop:"Vnic0IPv4Address=${CDGIpv4MgmtIpv4}" \
      --prop:"Vnic0IPv4Netmask=${ManagementIPv4Netmask}" \
      --prop:"Vnic0IPv4Gateway=${ManagementIPv4Gateway}" \
      --prop:"Vnic1IPv4Method=Static" \
      --prop:"Vnic1IPv4Address=${NorthDataIPv4Address}" \
      --prop:"Vnic1IPv4Netmask=${NorthDataIPv4Netmask}" \
      --prop:"Vnic1IPv4Gateway=${NorthDataIPv4Gateway}" \
      --prop:"dg-adminPassword=${DgAdminPwd}" \
      --prop:"dg-operPassword=${DgOperPwd}" \
      --prop:"DNS=${DNSv6}" \
      --net:"vNIC0=DPortGMgmt12" \
      --net:"vNIC1=${NorthDataNetwork}" \
      --net:"vNIC2=${SouthDataNetwork}" \
      --prop:"NTP=${NTP}" \
      --prop:"Domain=${Domain}" \
      $CDG_OVA_PATH \
      vi://Administrator%40vsphere%2Elocal:Crosswork123%21@$Vcenter/$DC/host/$Host

Sample Script for Deploying Crosswork Data Gateway with IPv6 Address

Prerequisites and considerations before running the script

Ensure the following:

  • You are using OVFtool version 4.4.x.

  • You are aware that the values for these rules may vary based on the number of vNICs deployed:

    • NicDefaultGateway

    • NicAdministration

    • NicExternalLogging

    • NicManagement

    • NicControl

    • NicNBExternalData

    • NicSBData

Sample script

#!/usr/bin/env bash
DM="<thin/thick>"
Disclaimer="<Disclaimer>"
DNSv4="<DNS Server>"
NTP="<NTP Server>"
Domain="<Domain>"
Hostname="<CDG hostname>"

VM_NAME="<VM name on vcenter>"
DeploymentOption="<onpremise-standard/onpremise-extended>"
DS="<Datastore>"
Host="<ESXi host>"
ManagementNetwork="<vSwitch/dvSwitch>"
DataNetwork="<vSwitch/dvSwitch>"
DeviceNetwork="<vSwitch/dvSwitch>"
ManagementIPv6Address="<CDG managment IP>"
ManagementIPv6Netmask="<CDG managment mask>"
ManagementIPv6Gateway="<CDG managment gateway>"
DataIPv6Address="<CDG Data network IP>"
DataIPv6Netmask="<CDG Data network mask>"
DataIPv6Gateway="<CDG Data network gateway>"
dgadminpwd="<CDG password for dg-admin user>"
dgoperpwd="<CDG password for dg-admin user>"
ControllerIP="<CNC Managment VIP>"
ControllerPassword="<CNC Password>"
ControllerPort="30607"

CDG_OVA_PATH=$1

VCENTER_LOGIN="Administrator%40vsphere.local@<vCenter-IP>"
VCENTER_PATH="<vCenter-DC-NAME>/host"

ovftool --acceptAllEulas --skipManifestCheck --X:injectOvfEnv -ds=$DS --diskMode=$DM --overwrite --powerOffTarget --powerOn --noSSLVerify \
--allowExtraConfig \
--name=$VM_NAME \
--deploymentOption=${DeploymentOption} \
--net:"vNIC0=${ManagementNetwork}" \
--prop:"ControllerIP=${ControllerIP}" \
--prop:"ControllerPort=${ControllerPort}" \
--prop:"ControllerSignCertChain=cw-admin@${ControllerIP}:/home/cw-admin/controller.pem" \
--prop:"ControllerCertChainPwd=${ControllerPassword}" \
--prop:"Hostname=${Hostname}" \
--prop:"Description=${Disclaimer}" \
--prop:"DNS=${DNSv4}" \
--prop:"NTP=${NTP}" \
--prop:"Domain=${Domain}" \
--prop:"Vnic0IPv6Method=Static" \
--prop:"Vnic0IPv6Address=${ManagementIPv6Address}" \
--prop:"Vnic0IPv6Gateway=${ManagementIPv6Gateway}" \
--prop:"Vnic0IPv6Netmask=${ManagementIPv6Netmask}" \
--prop:"NicDefaultGateway=eth0" \
--prop:"NicAdministration=eth0" \
--prop:"NicExternalLogging=eth0" \
--prop:"NicManagement=eth0" \
--prop:"NicControl=eth0" \
--prop:"NicNBExternalData=eth0" \
--prop:"NicSBData=eth0" \
--prop:"dg-adminPassword=${dgadminpwd}" \
--prop:"dg-operPassword=${dgoperpwd}" \
$CDG_OVA_PATH \
vi://$VCENTER_LOGIN/$VCENTER_PATH/$Host

#############################################################
Append section below for two NIC deployment
#############################################################
#--net:"vNIC1=${DataNetwork}" \
#--prop:"Vnic1IPv6Method=Static" \
#--prop:"Vnic1IPv6Address=${DataIPv6Address}" \
#--prop:"Vnic1IPv6Gateway=${DataIPv6Gateway}" \
#--prop:"Vnic1IPv6Netmask=${DataIPv6Netmask}" \
#--prop:"NicDefaultGateway=eth0" \
#--prop:"NicAdministration=eth0" \
#--prop:"NicExternalLogging=eth0" \
#--prop:"NicManagement=eth0" \
#--prop:"NicControl=eth1" \
#--prop:"NicNBExternalData=eth1" \
#--prop:"NicSBData=eth1" \

#############################################################
Append section below for three NIC deployment
#############################################################
#--net:"vNIC1=${DataNetwork}" \
--net:"vNIC2=${DeviceNetwork}" \
#--prop:"Vnic1IPv6Method=Static" \
#--prop:"Vnic1IPv6Address=${DataIPv6Address}" \
#--prop:"Vnic1IPv6Gateway=${DataIPv6Gateway}" \
#--prop:"Vnic1IPv6Netmask=${DataIPv6Netmask}" \
#--prop:"NicDefaultGateway=eth0" \
#--prop:"NicAdministration=eth0" \
#--prop:"NicExternalLogging=eth0" \
#--prop:"NicManagement=eth0" \
#--prop:"NicControl=eth1" \
#--prop:"NicNBExternalData=eth1" \
#--prop:"NicSBData=eth2" \

Sample Script for Deploying Crosswork Data Gateway with IPv4 Address

Prerequisites and considerations before running the script

Ensure the following:

  • You are using OVFtool version 4.4.x.

  • You are aware that the values for these rules may vary based on the number of vNICs deployed:

    • NicDefaultGateway

    • NicAdministration

    • NicExternalLogging

    • NicManagement

    • NicControl

    • NicNBExternalData

    • NicSBData

Sample script

#!/usr/bin/env bash
DM="<thin/thick>"
Disclaimer="<Disclaimer>"
DNSv4="<DNS Server>"
NTP="<NTP Server>"
Domain="<Domain>"
Hostname="<CDG hostname>"

VM_NAME="<VM name on vcenter>"
DeploymentOption="<onpremise-standard/onpremise-extended>"
DS="<Datastore>"
Host="<ESXi host>"
ManagementNetwork="<vSwitch/dvSwitch>"
DataNetwork="<vSwitch/dvSwitch>"
DeviceNetwork="<vSwitch/dvSwitch>"
ManagementIPv4Address="<CDG managment IP>"
ManagementIPv4Netmask="<CDG managment mask>"
ManagementIPv4Gateway="<CDG managment gateway>"
DataIPv4Address="<CDG Data network IP>"
DataIPv4Netmask="<CDG Data network mask>"
DataIPv4Gateway="<CDG Data network gateway>"
dgadminpwd="<CDG password for dg-admin user>"
dgoperpwd="<CDG password for dg-admin user>"
ControllerIP="<CNC Managment VIP>"
ControllerPassword="<CNC Password>"
ControllerPort="30607"


CDG_OVA_PATH=$1

VCENTER_LOGIN="Administrator%40vsphere.local@<vCenter-IP>"
VCENTER_PATH="<vCenter-DC-NAME>/host"

ovftool --acceptAllEulas --skipManifestCheck --X:injectOvfEnv -ds=$DS --diskMode=$DM --overwrite --powerOffTarget --powerOn --noSSLVerify \
--allowExtraConfig \
--name=$VM_NAME \
--deploymentOption=${DeploymentOption} \
--net:"vNIC0=${ManagementNetwork}" \
--prop:"ControllerIP=${ControllerIP}" \
--prop:"ControllerPort=${ControllerPort}" \
--prop:"ControllerSignCertChain=cw-admin@${ControllerIP}:/home/cw-admin/controller.pem" \
--prop:"ControllerCertChainPwd=${ControllerPassword}" \
--prop:"Hostname=${Hostname}" \
--prop:"Description=${Disclaimer}" \
--prop:"DNS=${DNSv4}" \
--prop:"NTP=${NTP}" \
--prop:"Domain=${Domain}" \
--prop:"Vnic0IPv4Method=Static" \
--prop:"Vnic0IPv4Address=${ManagementIPv4Address}" \
--prop:"Vnic0IPv4Gateway=${ManagementIPv4Gateway}" \
--prop:"Vnic0IPv4Netmask=${ManagementIPv4Netmask}" \
--prop:"NicDefaultGateway=eth0" \
--prop:"NicAdministration=eth0" \
--prop:"NicExternalLogging=eth0" \
--prop:"NicManagement=eth0" \
--prop:"NicControl=eth0" \
--prop:"NicNBExternalData=eth0" \
--prop:"NicSBData=eth0" \
--prop:"dg-adminPassword=${dgadminpwd}" \
--prop:"dg-operPassword=${dgoperpwd}" \
$CDG_OVA_PATH \
vi://$VCENTER_LOGIN/$VCENTER_PATH/$Host

#############################################################
Append section below for two NIC deployment
#############################################################
#--net:"vNIC1=${DataNetwork}" \
#--prop:"Vnic1IPv4Method=Static" \
#--prop:"Vnic1IPv4Address=${DataIPv4Address}" \
#--prop:"Vnic1IPv4Gateway=${DataIPv4Gateway}" \
#--prop:"Vnic1IPv4Netmask=${DataIPv4Netmask}" \
#--prop:"NicDefaultGateway=eth0" \
#--prop:"NicAdministration=eth0" \
#--prop:"NicExternalLogging=eth0" \
#--prop:"NicManagement=eth0" \
#--prop:"NicControl=eth1" \
#--prop:"NicNBExternalData=eth1" \
#--prop:"NicSBData=eth1" \

#############################################################
Append section below for three NIC deployment
#############################################################
#--net:"vNIC1=${DataNetwork}" \
--net:"vNIC2=${DeviceNetwork}" \
#--prop:"Vnic1IPv4Method=Static" \
#--prop:"Vnic1IPv4Address=${DataIPv4Address}" \
#--prop:"Vnic1IPv4Gateway=${DataIPv4Gateway}" \
#--prop:"Vnic1IPv4Netmask=${DataIPv4Netmask}" \
#--prop:"NicDefaultGateway=eth0" \
#--prop:"NicAdministration=eth0" \
#--prop:"NicExternalLogging=eth0" \
#--prop:"NicManagement=eth0" \
#--prop:"NicControl=eth1" \
#--prop:"NicNBExternalData=eth1" \
#--prop:"NicSBData=eth2" \

Sample auto action templates

This topic includes examples of auto action templates.


Note


Make sure to replace the placeholder values with actual values relevant to your environment.


Sample auto-action definition (JSON) file

{
    "auto_action": {
        "add_to_repository_requests": [
            {
                "file_location": {
                    "uri_location": {
                        "uri": "https://example.com/path/to/cw-na-cncessential-7.1.0-250530.tar.gz"
                    }
                }
            },
            {
                "file_location": {
                    "uri_location": {
                        "uri": "https://example.com/path/to/cw-na-cncadvantage-7.1.0-250530.tar.gz"
                    }
                }
            },
            {
                "file_location": {
                    "uri_location": {
                        "uri": "https://example.com/path/to/cw-na-cncaddon-7.1.0-250530.tar.gz"
                    }
                }
            }
        ],
        "install_activate_requests": [
            {
                "package_identifier": {
                    "_comment": "Part of essentials capp",
                    "version": "7.1.0",
                    "id": "capp-common-ems-services"
                }
            },
            {
                "package_identifier": {
                    "_comment": "Part of advantage capp",
                    "version": "7.1.0",
                    "id": "capp-cat"
                }
            },
            {
                "package_identifier": {
                    "_comment": "Part of advantage capp",
                    "version": "7.1.0",
                    "id": "capp-coe"
                }
            },
            {
                "package_identifier": {
                    "_comment": "Part of advantage capp",
                    "version": "7.1.0",
                    "id": "capp-aa"
                }
            },
            {
                "package_identifier": {
                    "_comment": "Part of add on capp",
                    "version": "7.1.0",
                    "id": "capp-ca"
                }
            },
            {
                "package_identifier": {
                    "_comment": "Part of add on capp",
                    "version": "7.1.0"",
                    "id": "capp-hi"
                }
            }
        ]
    }
}  

Sample auto-action manifest (YAML) file

# auto_action.yaml
# This YAML file configures automated actions to be performed by the orchestrator.
# It includes actions to add files to a repository and install/activate packages.
# Modify the sample file based on the need. Add or remove add_to_repository_requests accordingly.

# Usage Instructions:
# 1. Ensure the file paths and URIs are accessible from your orchestrator environment.
# 2. Supported types for adding files to the repository include:
#    - Local file URIs (e.g., file:///mnt/app_capp/cw-na-collectors-7.xx.xx.tar.gz)
#    - Remote file URIs over HTTP/HTTPS (e.g., https://example.com/sample.tar.gz)
#    - Remote files accessed via SCP (e.g., /path/to/remote/file.tar.gz)
# 3. Pick the add_to_repository_requests type based on the need.
# 4. Either `add_to_repository_requests` or `install_activate_requests` should be present, or both can be included.
# 5. The orchestrator will add the specified files to the repository and then install/activate the listed packages if both are present.
# 6. Double-check the package identifiers and versions for accuracy.
# 7. Ensure `install_activate_requests` is defined in the dependency order. If CAPP2 is dependent on CAPP1, CAPP1 should be defined first.
# 8. Package ID references:
#    - E-CDG: capp-cdg
#    - EMS: capp-common-ems-services
#    - COE: capp-coe
#    - CAT: capp-cat
#    - Service Health: capp-aa
#    - E-NSO: capp-enso
#    - CP-INFRA: capp-cpinfra
#    - CP-Design: capp-design
#    - CP-Collector: capp-collector

auto_action:
  # List of requests to add files to the repository
  add_to_repository_requests:
    - file_location:
        uri_location:
          # Remote file URI to be added to the repository.
          # Example: https://example.com/sample.tar.gz
          uri: <remote_file_uri>
          # Optional: Basic authentication credentials for HTTPS
          # basic_auth:
          #   username: <username>
          #   password: <password>
          # Optional: Skip TLS verification if https
          # skip_tls_verify: <true/false>
    - file_location:
        scp_location:
          # Remote file path to be added to the repository.
          # Example: /path/to/remote/file.tar.gz
          remote_file: <remote_file_path>
          # SSH configuration for remote file access
          ssh_config:
            remote_host: <ssh_host>
            port: <ssh_port>
            username: <ssh_username>
            password: <ssh_password>
    - file_location:
        uri_location:
          # Local file URI to be added to the repository.
          # Example: file:///mnt/app_capp/cw-na-collectors-7.xx.xx.tar.gz
          uri: <local_file_uri_1>
  # List of requests to install and activate packages. Ensure the dependency order of CAPP activation is retained
  install_activate_requests:
    - package_identifier:
        # Identifier of the package to be installed/activated
        # Example: capp-cdg
        id: <package_id_1>
        version: <optional_package_version_1>
    - package_identifier:
        # Identifier of the package to be installed/activated
        # Example: capp-common-ems-services
        id: <package_id_2>
        version: <optional_package_version_2>

# Note:
# - If using SSH for remote file access, replace <ssh_host>, <ssh_port>, <ssh_username>, and <ssh_password> with the appropriate credentials and configuration.
# - If using HTTPS with basic authentication, replace <username> and <password> with the appropriate credentials.

Geo redundancy templates

Sample cross cluster inventory templates

Here are some examples of the cross cluster inventory file (.yaml) that you need to prepare to enable geo redundancy:


Note


  • Cross Cluster inventory supports only IPv6 addresses. If both IPv4 and IPv6 addresses are present, only the IPv6 addresses will be used. If the Cross Cluster inventory is provided with only IPv4 addresses, the system will generate an error.

  • In a geo redundant setup with dual stack deployment, the inventory file must reference the IPv6 VIP for both clusters instead of the IPv4 VIP, as the unified cross cluster uses IPv6. It is recommended to use IPv6 addresses for the unified cross cluster endpoint since all geo-redundancy traffic is routed via IPv6.

  • For details on each parameter, see the sample file downloaded from the Geo Redundancy Manager window.


Day 0: Geo inventory for active cluster deployed with peer cluster (standby)

################################################################################################
#
# Use case:
#   - Geo-inventory yaml to be used when geo-activating the active cluster after standby
#     cluster is fully deployed.
#
# Flag settings:
#   - Set 'is_skip_peer_check_enabled' to false since the peer cluster is already deployed.
#   - Set 'is_post_migration_activation' to false since this is not a migration flow.
#
# Important Notes:
#   - The 'is_skip_peer_check_enabled: true' is required when:
#     - The standby cluster is not deployed before geo-activating the active cluster.
#     - Enabling geo mode on a system post disaster restore after multi cluster failure.
#     - Additionally, if 'is_skip_peer_check_enabled: true', following details are required:
#       - Cluster connectivity
#       - Cluster node's connectivity details
#   - See geo-inventory for the case when standby cluster is not deployed before geo-activating.
#
################################################################################################
---
meta_version: 1.0.0
crosscluster_name: mycnc-geo-cluster
crosscluster_unified_connectivity:
  unified_end_point:
    unified_endpoint_type:
      fqdn_type: {}
    unified_endpoint_implementation: DNS
  data_fqdn:
    domain_name: cw.cisco
    host_name: geodata
  management_fqdn:
    domain_name: cw.cisco
    host_name: geomanagement
clusters:
- cluster_name: cluster-sjc
  cluster_id: fded:1bc1:fc3e:96d0:192:168:5:500
  cluster_type: CROSSWORK_CLUSTER
  initial_preferred_leadership_state: ACTIVE
  connectivity:
    unified_end_point:
      unified_endpoint_type:
        ip_type: {}
      unified_endpoint_implementation: VRRP
    data_vip: fded:1bc1:fc3e:96d0:10:10:10:500
    data_vip_mask: 112
    management_vip: fded:1bc1:fc3e:96d0:192:168:5:500
    management_vip_mask: 112
  site_location:
    location: San Jose
  cluster_credential:
    https_credential:
      username: admin
      password: ********
    ssh_credential:
      username: cw-admin
      password: ********
- cluster_name: cluster-nyc
  cluster_id: fded:1bc1:fc3e:96d0:192:168:6:500
  cluster_type: CROSSWORK_CLUSTER
  initial_preferred_leadership_state: STANDBY
  connectivity:
    unified_end_point:
      unified_endpoint_type:
        ip_type: {}
      unified_endpoint_implementation: VRRP
    data_vip: fded:1bc1:fc3e:96d0:10:10:11:500
    data_vip_mask: 112
    management_vip: fded:1bc1:fc3e:96d0:192:168:6:500
    management_vip_mask: 112
  site_location:
    location: New York
  cluster_credential:
    https_credential:
      username: admin
      password: ********
    ssh_credential:
      username: cw-admin
      password: ********
secret: Your-secret1
is_post_migration_activation: false
is_skip_peer_check_enabled: false

Day 0: Geo inventory for active cluster deployed with peer clusters (standby and arbiter)

################################################################################################
#
# Use case:
#   - Geo-inventory yaml to be used when geo-activating the active cluster after standby
#     and arbiter clusters are fully deployed.
#
# Flag settings:
#   - Set 'is_skip_peer_check_enabled' to false since both peer clusters are already deployed.
#   - Set 'is_post_migration_activation' to false since this is not a migration flow.
#
# Important Notes:
#   - The 'is_skip_peer_check_enabled: true' is required when:
#     - The standby cluster is not deployed before geo-activating the active cluster.
#     - Enabling geo mode on a system post disaster restore after multi cluster failure.
#     - Additionally, if 'is_skip_peer_check_enabled: true', following details are required:
#       - Cluster connectivity
#       - Cluster node's connectivity details
#   - See geo-inventory for the case when standby cluster is not deployed before geo-activating.
#
################################################################################################
---
meta_version: 1.0.0
crosscluster_name: mycnc-geo-cluster
crosscluster_unified_connectivity:
  unified_end_point:
    unified_endpoint_type:
      fqdn_type: {}
    unified_endpoint_implementation: DNS
  data_fqdn:
    domain_name: cw.cisco
    host_name: geodata
  management_fqdn:
    domain_name: cw.cisco
    host_name: geomanagement
clusters:
- cluster_name: cluster-sjc
  cluster_id: fded:1bc1:fc3e:96d0:192:168:5:500
  cluster_type: CROSSWORK_CLUSTER
  initial_preferred_leadership_state: ACTIVE
  connectivity:
    unified_end_point:
      unified_endpoint_type:
        ip_type: {}
      unified_endpoint_implementation: VRRP
    data_vip: fded:1bc1:fc3e:96d0:10:10:10:500
    data_vip_mask: 112
    management_vip: fded:1bc1:fc3e:96d0:192:168:5:500
    management_vip_mask: 112
  site_location:
    location: San Jose
  cluster_credential:
    https_credential:
      username: admin
      password: ********
    ssh_credential:
      username: cw-admin
      password: ********
- cluster_name: cluster-nyc
  cluster_id: fded:1bc1:fc3e:96d0:192:168:6:500
  cluster_type: CROSSWORK_CLUSTER
  initial_preferred_leadership_state: STANDBY
  connectivity:
    unified_end_point:
      unified_endpoint_type:
        ip_type: {}
      unified_endpoint_implementation: VRRP
    data_vip: fded:1bc1:fc3e:96d0:10:10:11:500
    data_vip_mask: 112
    management_vip: fded:1bc1:fc3e:96d0:192:168:6:500
    management_vip_mask: 112
  site_location:
    location: New York
  cluster_credential:
    https_credential:
      username: admin
      password: ********
    ssh_credential:
      username: cw-admin
      password: ********
- cluster_name: cluster-aus
  cluster_id: fded:1bc1:fc3e:96d0:192:168:5:506
  cluster_type: ARBITER
  initial_preferred_leadership_state: STANDBY
  connectivity:
    unified_end_point:
      unified_endpoint_type:
        ip_type: {}
      unified_endpoint_implementation: VRRP
    data_vip: fded:1bc1:fc3e:96d0:10:10:10:506
    data_vip_mask: 112
    management_vip: fded:1bc1:fc3e:96d0:192:168:5:506
    management_vip_mask: 112
  site_location:
    location: Austin
  cluster_credential:
    https_credential:
      username: admin
      password: ********
    ssh_credential:
      username: cw-admin
      password: ********
secret: Your-secret1
is_post_migration_activation: false
is_skip_peer_check_enabled: false

Day N: Geo inventory for active cluster deployed without peer cluster (standby)

################################################################################################
#
# Use case:
#   - Geo-inventory yaml to be used when geo-activating the active cluster before standby
#     cluster is fully deployed.
#
# Flag settings:
#   - Set 'is_skip_peer_check_enabled' to true since the peer cluster is not deployed.
#   - Set 'is_post_migration_activation' to true.
#
# Important Notes:
#   - The 'is_skip_peer_check_enabled: true' is required when:
#     - The standby cluster is not deployed before geo-activating the active cluster.
#     - Enabling geo mode on a system post disaster restore after multi cluster failure.
#     - Additionally, if 'is_skip_peer_check_enabled: true', following details are required:
#       - Cluster connectivity
#         - See explicit IPv4/IPv6 VIP and netmasks under cluster connectivity.
#       - Cluster node's connectivity details
#         - See 'cluster_node_details' object under the cluster, which is not deployed.
#
################################################################################################
---
meta_version: 1.0.0
crosscluster_name: mycnc-geo-cluster
crosscluster_unified_connectivity:
  unified_end_point:
    unified_endpoint_type:
      fqdn_type: {}
    unified_endpoint_implementation: DNS
  data_fqdn:
    domain_name: cw.cisco
    host_name: geodata
  management_fqdn:
    domain_name: cw.cisco
    host_name: geomanagement
clusters:
- cluster_name: cluster-sjc
  cluster_id: fded:1bc1:fc3e:96d0:192:168:5:500
  cluster_type: CROSSWORK_CLUSTER
  initial_preferred_leadership_state: ACTIVE
  connectivity:
    unified_end_point:
      unified_endpoint_type:
        ip_type: {}
      unified_endpoint_implementation: VRRP
    data_vip: fded:1bc1:fc3e:96d0:10:10:10:500
    data_vip_mask: 112
    management_vip: fded:1bc1:fc3e:96d0:192:168:5:500
    management_vip_mask: 112
    data_vip_ipv4: 10.10.10.50
    data_vip_ipv4_mask: 24
    management_vip_ipv4: 192.168.5.50
    management_vip_ipv4_mask: 24
    data_vip_ipv6: fded:1bc1:fc3e:96d0:10:10:10:500
    data_vip_ipv6_mask: 112
    management_vip_ipv6: fded:1bc1:fc3e:96d0:192:168:5:500
    management_vip_ipv6_mask: 112
  site_location:
    location: San Jose
  cluster_credential:
    https_credential:
      username: admin
      password: ********
    ssh_credential:
      username: cw-admin
      password: ********
- cluster_name: cluster-nyc
  cluster_id: fded:1bc1:fc3e:96d0:192:168:6:500
  cluster_type: CROSSWORK_CLUSTER
  initial_preferred_leadership_state: STANDBY
  connectivity:
    unified_end_point:
      unified_endpoint_type:
        ip_type: {}
      unified_endpoint_implementation: VRRP
    data_vip: fded:1bc1:fc3e:96d0:10:10:11:500
    data_vip_mask: 112
    management_vip: fded:1bc1:fc3e:96d0:192:168:6:500
    management_vip_mask: 112
    data_vip_ipv4: 10.10.11.50
    data_vip_ipv4_mask: 24
    management_vip_ipv4: 192.168.6.50
    management_vip_ipv4_mask: 24
    data_vip_ipv6: fded:1bc1:fc3e:96d0:10:10:11:500
    data_vip_ipv6_mask: 112
    management_vip_ipv6: fded:1bc1:fc3e:96d0:192:168:6:500
    management_vip_ipv6_mask: 112
  site_location:
    location: New York
  cluster_credential:
    https_credential:
      username: admin
      password: ********
    ssh_credential:
      username: cw-admin
      password: ********
  cluster_node_details:
    - unified_end_point:
        unified_endpoint_type:
          ip_type: {}
        unified_endpoint_implementation: VRRP
      data_ipv4: 10.10.11.51
      data_ipv4_mask: 24
      management_ipv4: 192.168.6.51
      management_ipv4_mask: 24
      data_ipv6: fded:1bc1:fc3e:96d0:10:10:11:501
      data_ipv6_mask: 112
      management_ipv6: fded:1bc1:fc3e:96d0:192:168:6:501
      management_ipv6_mask: 112
    - unified_end_point:
        unified_endpoint_type:
          ip_type: {}
        unified_endpoint_implementation: VRRP
      data_ipv4: 10.10.11.52
      data_ipv4_mask: 24
      management_ipv4: 192.168.6.52
      management_ipv4_mask: 24
      data_ipv6: fded:1bc1:fc3e:96d0:10:10:11:502
      data_ipv6_mask: 112
      management_ipv6: fded:1bc1:fc3e:96d0:192:168:6:502
      management_ipv6_mask: 112
    - unified_end_point:
        unified_endpoint_type:
          ip_type: {}
        unified_endpoint_implementation: VRRP
      data_ipv4: 10.10.11.53
      data_ipv4_mask: 24
      management_ipv4: 192.168.6.53
      management_ipv4_mask: 24
      data_ipv6: fded:1bc1:fc3e:96d0:10:10:11:503
      data_ipv6_mask: 112
      management_ipv6: fded:1bc1:fc3e:96d0:192:168:6:503
      management_ipv6_mask: 112
secret: Your-secret1
is_post_migration_activation: true
is_skip_peer_check_enabled: true

Day N: Geo inventory for active cluster deployed without arbiter VM

  • This is the day-N arbiter deployment of geo HA setup, i.e., the arbiter cluster is deployed after deploying the active and standby clusters.

  • The inventory is same as the one used in Day 0: Geo inventory for active cluster deployed with peer clusters (standby and arbiter), but you must set the is_post_migration_activation flag to true..

  • You must enable geo redundancy on the active and standby clusters with "re-import" flow, followed by normal geo activation on the arbiter VM.

Sample multi-cluster upgrade inventory template

Here is an example of the inventory file (.yaml) that you need to prepare for multi-cluster upgrade:

################################################################################################
#
# Use case:
#   - Geo inventory yaml to be used when performing geo upgrade from site-2.
#
################################################################################################
 
## meta version of yaml ###
meta_version: 1.0.0
## Crosscluster name , mutable
crosscluster_name: mycnc-geo-cluster
### Unified endpoint of multi cluster for management and data endpoint across clusters
crosscluster_unified_connectivity:
  unified_end_point:
    unified_endpoint_type:
      #### fqdn_type,ip_type are options , only fqdn_type is supported.
      fqdn_type: {}
    #### DNS,BGP,NLB are options, only DNS is supported for now.
    unified_endpoint_implementation: DNS
     ### The below is needed if fqdn_type is chosen,else data_vip,mgmt_vip could be used for ip_type endpoint type
  management_fqdn:
    ## cnc domain zone, DNS server would be checked for resolution
    domain_name: your-name.domain
    host_name: your-unified-cnc-mgmt-hostname
  data_fqdn:
    ## cnc domain zone name, DNS server would be checked for resolution
    domain_name: your-name.domain
    host_name: your-unified-cnc-data-hostname
  
### Constituent clusters ####
clusters:
##### Mutable cluster name
  - cluster_name: cluster-sjc
    ## Fill cluster_id with cluster Management VIP
    cluster_id: fded:1bca:fc3f:96d0:192:168:5:101
    connectivity:
      ### Intra cluster (within a cluster) unified endpoint  ###
      ### Endpoint type is ip_type,fqdn_type , Implementation could be VRRP,NLB,BGP.
      unified_end_point:
        unified_endpoint_type:
          ip_type: {}
        #### VRRP,BGP,NLB are options, only VRRP,ip_type is supported for now in on prem. For cloud NLB,
        ### fqdn_type could be used.
        unified_endpoint_implementation: VRRP
      ### The below is needed if ip_type is chosen,else data_fqdn,mgmt_fqdn could be used for fqdn_type endpoint type
      ## Your intra cluster data vip
      data_vip: 10.10.10.11
      ## data vip subnet mask
      data_vip_mask: 0
      ## Your intra cluster management vip
      management_vip: 20.20.20.11
      ## management vip subnet mask
      management_vip_mask: 0
      ## management and data fqdn for crosscluster instance
      ## management and data fqdn is applicable for only for unified crosscluster instance
    ## STANDBY or ACTIVE for leadership state
    initial_preferred_leadership_state: ACTIVE
    ### DC location , needs to be unique per cluster, For cloud region-az could be used
    site_location:
      location: San Jose
      #Mutable credentials
    cluster_credential:
      ## This is the https credential post first time cluster login
      https_credential:
        username: admin
        #### pwd/secrets are within single quotes,if special chars are used
        password: your-password
      ssh_credential:
        username: admin
        #### pwd/secrets are within single quotes,if special chars are used
        password: your-password
##### Mutable cluster name
  - cluster_name: cluster-nyc
    ## Fill cluster_id with cluster Management VIP
    cluster_id: fded:1bca:fc3f:96d0:192:168:6:101
    ## STANDBY or ACTIVE for leadership state
    initial_preferred_leadership_state: STANDBY
    connectivity:
      ### Intra cluster (within a cluster) unified endpoint  ###
      ### Endpoint type is ip_type,fqdn_type , Implementation could be VRRP,NLB,BGP.
      unified_end_point:
        unified_endpoint_type:
          ip_type: {}
        #### VRRP,BGP,NLB are options, only VRRP,ip_type is supported for now in on prem. For cloud NLB,
        ### fqdn_type could be used.
        unified_endpoint_implementation: VRRP
      ### The below is needed if ip_type is chosen,else data_fqdn,mgmt_fqdn could be used for fqdn_type endpoint type
      ## Your intra cluster data vip
      data_vip: 30.30.30.11
      ## data vip subnet mask
      data_vip_mask: 0
      ## Your intra cluster management vip
      management_vip: 40.40.40.11
      ## management vip subnet mask
      management_vip_mask: 0
      ## management and data fqdn for crosscluster instance
      ## management and data fqdn is applicable for only for unified crosscluster instance
    ### DC location , needs to be unique per cluster, For cloud region-az could be used
    site_location:
      location: New York City
    #Mutable credentials
    cluster_credential:
      ## This is the https credential post first time cluster login
      https_credential:
        username: admin
        #### pwd/secrets are within single quotes if special chars are used
        password: your-password
      ssh_credential:
        username: admin
        #### pwd/secrets are within single quotes if special chars are used
        password: your-password
#### Mutable secret are within single quotes if special chars are used, used to kick-start inter cluster mTLS
### needs to be >= 10 chars with at-least 1 special,upper,numerical characters
secret: Your-secret1
### Set this to true , if one is enabling geo mode on a system post migration setup, rather than a fresh first time
## install
is_post_migration_activation: false
### Set this to true, if one wants to skip peer check, if one is enabling geo mode on a system post disaster restore
## after multi cluster failure
is_skip_peer_check_enabled: false
# Disable nso backup from peer ,set true if nso not deployed ,else false.
disable_nso_backup_from_peer: true
# Backup job timeout  ,
# Valid values for eg. "3h" ,"4h", "5h30m40s" , "100m"
# By default in system is "3h",
# If set timeout less than "3h",it will set to default "3h" internally.
# If cluster deployed on test bed with all apps with not much data ,then default timeout is ok,
# If its scale setup/or setup with huge data , then calculate and set timeout like :
# for eg: if postgres/timescale data size is ~300 GB then it take approx 8 hours  for backup ,
# so calculate in way  : for 100GB approx ~3h ,
# for 300 GB approx 9 hours ,
# for 1 TB approx 30 hours ,and so on
backup_job_poll_timout: "3h"
# Migration job timeout,
# Valid values for eg. "3h" ,"4h", "5h30m40s" , "100m"
# by default in system is "3h",
# If set timeout less than "3h",it will set to default "3h" internally.
# If cluster deployed on test bed with all apps with not much data ,then default timeout is ok,
# If its scale setup/or setup with huge data , then calculate and set timeout like :
# for eg: if postgres/timescale data size is ~300 GB then it take approx 8 hours  for backup ,
# so calculate in way  : for 100GB approx ~3h ,
# for 300 GB approx 9 hours ,
# For 1 TB approx 30 hours ,and so on
migration_job_poll_timout: "3h" 
# Storage setting for migration
storage_settings:
  ##scp host setting type
  scp_host:
    ## remote location
    remote_location: "/mnt/robot_datafs/public/l1"
    ## ssh configuration
    ssh_config:
      ## ssh configuration host ip
      remote_host: 172.20.80.84
      ## ssh configuration username
      username: cw-admin
      ## ssh configuration password
      password: your-password
       ## ssh  port
      port: 22