#!/usr/bin/env python3 import json import sys import os import argparse import time from datetime import datetime from rsc import RSCAuth, RSCGraphQL def parse_options_file(options_file): """Parse the options file containing Oracle parameters""" options = [] try: with open(options_file, 'r', encoding='utf-8') as f: for line in f: line = line.strip() # Skip empty lines and comments if not line or line.startswith('#'): continue # Parse key=value pairs if '=' in line: key, value = line.split('=', 1) key = key.strip() # Handle quoted values if value.startswith("'") and value.endswith("'"): # Keep the quotes for Oracle parameters pass else: # Remove any single quotes and trim value = value.strip().replace("'", "") options.append({ "key": key, "value": value }) except FileNotFoundError: print(f"ERROR: Options file '{options_file}' does not exist.", file=sys.stderr) sys.exit(1) except Exception as e: print(f"ERROR: Failed to parse options file: {e}", file=sys.stderr) sys.exit(1) return options def find_database_by_name_or_id(identifier, source_host=None): """Find database by name or ID and return its details""" auth = RSCAuth() gql = RSCGraphQL(auth) # Check if identifier looks like a UUID (contains hyphens) if '-' in identifier: # It's likely a database ID query = """ query OracleDatabase($fid: UUID!) { oracleDatabase(fid: $fid) { dbUniqueName id cluster { id name } logicalPath { fid name objectType } } } """ variables = {"fid": identifier} else: # It's a database name filter_conditions = [ {"texts": [identifier], "field": "NAME_EXACT_MATCH"}, {"texts": ["false"], "field": "IS_RELIC"}, {"texts": ["false"], "field": "IS_REPLICATED"} ] if source_host: # Add host filter if source host specified filter_conditions.append({"texts": [source_host], "field": "HOST_NAME"}) query = """ query OracleDatabases($filter: [Filter!]) { oracleDatabases(filter: $filter) { nodes { dbUniqueName id cluster { id name } logicalPath { fid name objectType } } } } """ variables = {"filter": filter_conditions} response = gql.query(query, variables) if '-' in identifier: # Direct ID lookup db = response['data']['oracleDatabase'] if not db: raise ValueError(f"Database with ID '{identifier}' not found") return db else: # Name lookup databases = response['data']['oracleDatabases']['nodes'] if not databases: raise ValueError(f"No databases found with name '{identifier}'") if source_host: # Filter by source host if specified filtered_dbs = [db for db in databases if any(path.get('name') == source_host for path in db.get('logicalPath', []))] if not filtered_dbs: raise ValueError(f"No databases found with name '{identifier}' on host '{source_host}'") if len(filtered_dbs) > 1: print(f"Multiple databases found with name '{identifier}' on host '{source_host}':", file=sys.stderr) for db in filtered_dbs: print(f" - {db['dbUniqueName']} (ID: {db['id']})", file=sys.stderr) raise ValueError("Please specify the database ID instead") return filtered_dbs[0] else: if len(databases) > 1: print(f"Multiple databases found with name '{identifier}':", file=sys.stderr) for db in databases: host_name = db['logicalPath'][0]['name'] if db['logicalPath'] else 'Unknown' print(f" - {db['dbUniqueName']} (ID: {db['id']}, Host: {host_name})", file=sys.stderr) raise ValueError("Please specify the database ID instead") return databases[0] def get_latest_pit(db_id): """Get the latest Point in Time from recoverable ranges""" auth = RSCAuth() gql = RSCGraphQL(auth) query = """ query OracleDatabaseRecoverableRangesQuery($fid: String!) { oracleRecoverableRanges( input: {id: $fid, shouldIncludeDbSnapshotSummaries: false} ) { data { beginTime endTime __typename } __typename } oracleMissedRecoverableRanges(input: {id: $fid}) { data { beginTime endTime __typename } __typename } } """ variables = {"fid": db_id} response = gql.query(query, variables) # Get latest endTime from recoverable ranges ranges = response['data']['oracleRecoverableRanges']['data'] if ranges: latest_endtime = max(range_item['endTime'] for range_item in ranges) print(f"INFO: Latest PIT (ISO8601): {latest_endtime}") # Convert to datetime and then to milliseconds since epoch dt = datetime.fromisoformat(latest_endtime.replace('Z', '+00:00')) unixtime_ms = int(dt.timestamp() * 1000) print(f"INFO: Latest PIT unixtime (ms): {unixtime_ms}") return unixtime_ms else: raise ValueError("No recoverable ranges found for database") def get_oracle_host_id(host_name, cluster_id): """Get Oracle host ID by name and cluster""" auth = RSCAuth() gql = RSCGraphQL(auth) query = """ query OracleHosts($filter: [Filter!]) { oracleTopLevelDescendants(filter: $filter) { nodes { name id } } } """ variables = { "filter": [ {"texts": [host_name], "field": "NAME"}, {"texts": [cluster_id], "field": "CLUSTER_ID"} ] } response = gql.query(query, variables) hosts = response['data']['oracleTopLevelDescendants']['nodes'] if not hosts: raise ValueError(f"Host '{host_name}' not found in cluster") if len(hosts) > 1: print(f"WARN: Multiple hosts found for '{host_name}':", file=sys.stderr) for host in hosts: print(f" - {host['name']} (ID: {host['id']})", file=sys.stderr) # Use the first one print(f"WARN: Using first match: {hosts[0]['name']}", file=sys.stderr) return hosts[0]['id'] def execute_operation(db_id, target_host_id, recovery_timestamp_ms, operation_name, options, num_channels=None, custom_pfile_path=None, is_live_mount=False): """Execute the database clone or live mount operation""" auth = RSCAuth() gql = RSCGraphQL(auth) # Build the config object config = { "targetOracleHostOrRacId": target_host_id, "recoveryPoint": { "timestampMs": recovery_timestamp_ms }, "shouldAllowRenameToSource": True, "shouldSkipDropDbInUndo": False } if is_live_mount: config["shouldMountFilesOnly"] = False config["lmDbName"] = operation_name else: config["shouldRestoreFilesOnly"] = False config["cloneDbName"] = operation_name if num_channels is not None: config["numChannels"] = num_channels if custom_pfile_path is not None: config["customPfilePath"] = custom_pfile_path variables = { "input": { "request": { "id": db_id, "config": config }, "advancedRecoveryConfigMap": options } } if is_live_mount: query = """ mutation OracleDatabaseMountMutation($input: MountOracleDatabaseInput!) { mountOracleDatabase(input: $input) { id links { href rel __typename } __typename } } """ response = gql.query(query, variables) return response['data']['mountOracleDatabase']['id'] else: query = """ mutation OracleDatabaseExportMutation($input: ExportOracleDatabaseInput!) { exportOracleDatabase(input: $input) { id links { href rel __typename } __typename } } """ response = gql.query(query, variables) return response['data']['exportOracleDatabase']['id'] def monitor_job_status(job_id, cluster_id): """Monitor the clone job status until completion""" auth = RSCAuth() gql = RSCGraphQL(auth) query = """ query OracleDatabaseAsyncRequestDetails($input: GetOracleAsyncRequestStatusInput!) { oracleDatabaseAsyncRequestDetails(input: $input) { id nodeId status startTime endTime progress error { message } } } """ variables = { "input": { "id": job_id, "clusterUuid": cluster_id } } while True: response = gql.query(query, variables) details = response['data']['oracleDatabaseAsyncRequestDetails'] status = details['status'] progress = details.get('progress', 0) print(f"INFO: Job status: {status} ({progress}%)") if status == "FAILED": error_msg = details.get('error', {}).get('message', 'Unknown error') print(f"ERROR: Database clone FAILED: {error_msg}", file=sys.stderr) print(json.dumps(response, indent=2)) sys.exit(2) elif status == "CANCELLED": print("WARN: Database clone CANCELLED") sys.exit(3) elif status == "SUCCEEDED": print("INFO: Database clone SUCCEEDED") print(json.dumps(response, indent=2)) return time.sleep(15) def main(): parser = argparse.ArgumentParser( description="Clone or live mount an Oracle database using Rubrik Security Cloud", formatter_class=argparse.RawDescriptionHelpFormatter, add_help=False, # Disable default -h help to avoid conflict epilog=""" Examples: python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host SCLONE python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host --livemount SCLONE python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host -s source-host SCLONE python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host -t "2025-11-14 15:30:00" SCLONE python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host --dryrun SCLONE python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host -c 4 SCLONE python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host -p /path/to/pfile SCLONE Options file format (options.txt): CONTROL_FILES='/u01/app/oracle/oradata/NEWDB/control01.ctl, /u01/app/oracle/fast_recovery_area/NEWDB/control02.ctl' DB_FILE_NAME_CONVERT='SCLONE','NEWDB' DB_CREATE_FILE_DEST=/u01/app/oracle/oradata/NEWDB/ AUDIT_FILE_DEST='/u01/app/oracle/admin/NEWDB/adump' """ ) parser.add_argument("-n", "--newname", required=True, help="Database name/SID of the new cloned or live mounted database") parser.add_argument("-o", "--optionsfile", required=True, help="Path to the options file containing advanced cloning options") parser.add_argument("--targethost", required=True, help="Target host where the cloned database will be created") parser.add_argument("-s", "--sourcehost", help="Source host where the original database is located (optional)") parser.add_argument("-t", "--timestamp", help="Optional timestamp for the recovery point in format 'YYYY-MM-DD HH:MM:SS'") parser.add_argument("-d", "--dryrun", action="store_true", help="Dry-run mode - show mutation variables without executing the clone") parser.add_argument("--livemount", action="store_true", help="Create a live mount instead of a clone") parser.add_argument("-c", "--channels", type=int, help="Optional number of RMAN channels to configure for the clone") parser.add_argument("-p", "--pfile", help="Optional custom pfile path for the clone") parser.add_argument("--help", action="help", default=argparse.SUPPRESS, help="Show this help message and exit") parser.add_argument("srcdb", help="Source database name or RSC database ID") args = parser.parse_args() # Validate arguments if args.channels is not None and args.channels <= 0: print("ERROR: -c requires a positive integer value", file=sys.stderr) sys.exit(1) if args.pfile and not args.pfile.startswith('/'): print("ERROR: -p requires an absolute path (starting with /)", file=sys.stderr) sys.exit(1) try: # Parse options file print(f"INFO: Parsing options file: {args.optionsfile}") options = parse_options_file(args.optionsfile) print(f"INFO: Loaded {len(options)} configuration options") # Find the source database print(f"INFO: Finding source database: {args.srcdb}") if args.sourcehost: print(f"INFO: Filtering by source host: {args.sourcehost}") db = find_database_by_name_or_id(args.srcdb, args.sourcehost) print(f"INFO: Found database: {db['dbUniqueName']} (ID: {db['id']})") print(f"INFO: Cluster: {db['cluster']['name']} (ID: {db['cluster']['id']})") # Get recovery timestamp if args.timestamp: print(f"INFO: Using specified timestamp: {args.timestamp}") try: dt = datetime.strptime(args.timestamp, '%Y-%m-%d %H:%M:%S') recovery_timestamp_ms = int(dt.timestamp() * 1000) print(f"INFO: Recovery timestamp: {recovery_timestamp_ms} ms") except ValueError as e: print(f"ERROR: Invalid timestamp format. Use 'YYYY-MM-DD HH:MM:SS': {e}", file=sys.stderr) sys.exit(1) else: print("INFO: No timestamp specified, using latest PIT") recovery_timestamp_ms = get_latest_pit(db['id']) # Get target host ID print(f"INFO: Resolving target host: {args.targethost}") target_host_id = get_oracle_host_id(args.targethost, db['cluster']['id']) print(f"INFO: Target host ID: {target_host_id}") # Prepare operation configuration operation_type = "live mount" if args.livemount else "clone" config_summary = { "operation_type": operation_type, "source_db": db['dbUniqueName'], "source_db_id": db['id'], "target_host": args.targethost, "target_host_id": target_host_id, "operation_name": args.newname, "recovery_timestamp_ms": recovery_timestamp_ms, "num_channels": args.channels, "custom_pfile_path": args.pfile, "options_count": len(options) } if args.dryrun: print(f"\n=== DRY-RUN MODE ({operation_type.upper()}) ===") print(f"Would execute {operation_type} with the following configuration:") print(json.dumps(config_summary, indent=2)) print(f"Options: {json.dumps(options, indent=2)}") print(f"=== END DRY-RUN ({operation_type.upper()}) ===") return # Execute the operation print(f"INFO: Starting {operation_type} operation for database '{args.newname}'") job_id = execute_operation( db['id'], target_host_id, recovery_timestamp_ms, args.newname, options, args.channels, args.pfile, args.livemount ) print(f"INFO: Clone job started with ID: {job_id}") # Monitor the job monitor_job_status(job_id, db['cluster']['id']) except Exception as e: print(f"ERROR: {e}", file=sys.stderr) sys.exit(1) if __name__ == "__main__": main()