initial seeding

This commit is contained in:
2025-11-14 17:34:50 +00:00
parent 8ccc545e79
commit 7c0dfd0e14
10 changed files with 1417 additions and 0 deletions

116
.gitignore vendored Normal file
View File

@@ -0,0 +1,116 @@
rsc.json
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/

123
SHED_to_SCLONE.txt Normal file
View File

@@ -0,0 +1,123 @@
###############################################################################
# #
# This is an example Advanced Recovery Configuration file for a customized #
# recovery of an Oracle database. #
# #
# Use this file by modifying the values for the applicable options. Once the #
# changes are complete, remove the preceding hash mark (#) to uncomment the #
# lines containing the changed options. Upload the modified file to the #
# Rubrik Mount or Clone menu. #
# #
# The modified values override the values from the source database when using #
# an Advanced Recovery Options file. #
# #
# Any custom directory paths which are specified in this file must exist on #
# the target before triggering the Live Mount or Clone operation. #
# #
# See the CDM user guide for more information about using these parameters. #
# #
# Using Custom Pfile: #
# Specify only these parameters in the configuration file when using a #
# custom pfile and an Advanced Recovery Configuration file: #
# - ORACLE_HOME #
# - SPFILE_LOCATION #
# - DB_CREATE_ONLINE_LOG_DEST_* (supported for Live Mount only) #
# Add other parameters to the custom pfile. #
# #
# Using Clone Database Name: #
# When a custom database name is specified for a Clone operation, Rubrik #
# recommends either of the following sets of parameters: #
# - DB_FILE_NAME_CONVERT, LOG_FILE_NAME_CONVERT, PARAMETER_VALUE_CONVERT #
# - CONTROL_FILES, DB_CREATE_FILE_DEST #
# #
# Using Live Mount or Clone with DB name change: #
# When a custom database name is specified for a Live Mount or Clone #
# operation to the source host or RAC, specify all of the following #
# parameters: #
# - CONTROL_FILES, AUDIT_FILE_DEST, DB_FILE_NAME_CONVERT #
# Additionally, specify at least one of the following parameters: #
# - DB_CREATE_FILE_DEST, DB_RECOVERY_FILE_DEST, #
# DB_CREATE_ONLINE_LOG_DEST_n, LOG_FILE_NAME_CONVERT #
# #
###############################################################################
###############################################################################
# Recovered database memory parameters:
###############################################################################
# SGA_MAX_SIZE=2G
# SGA_TARGET=2G
# PGA_AGGREGATE_TARGET=1G
# USE_LARGE_PAGES=false
###############################################################################
# Full path to the restored spfile after recovery:
###############################################################################
# SPFILE_LOCATION='/u01/app/oracle/product/dbhome/spfilerbk.ora'
###############################################################################
# Locations of the controlfiles (may be either ASM disk groups, ASM directories
# or filesystem paths):
###############################################################################
CONTROL_FILES='/u01/app/oracle/oradata/SCLONE/control01.ctl, /u01/app/oracle/fast_recovery_area/SCLONE/control02.ctl'
###############################################################################
# Default locations for Oracle-managed control files and online redo logs
# (Specify up to 5 locations):
###############################################################################
# DB_CREATE_ONLINE_LOG_DEST_1=+DG1
# DB_CREATE_ONLINE_LOG_DEST_2=+DG2
# DB_CREATE_ONLINE_LOG_DEST_3=+DG3
# DB_CREATE_ONLINE_LOG_DEST_4=/u01/log_dest_4
# DB_CREATE_ONLINE_LOG_DEST_5=/u01/log_dest_5
###############################################################################
# Locations where archived logs will be created (Specify up to 31 locations):
###############################################################################
# LOG_ARCHIVE_DEST_1='LOCATION=+DG1'
# LOG_ARCHIVE_DEST_2='LOCATION=+DG2'
# LOG_ARCHIVE_DEST_3='LOCATION=+DG3'
# LOG_ARCHIVE_DEST_4='LOCATION=+DG4'
# LOG_ARCHIVE_DEST_5='LOCATiON=+DG5'
###############################################################################
# Configuration of the Fast Recovery Area (FRA)
# (If db_recovery_file_dest is specified, db_recovery_file_dest_size must also
# be specified):
###############################################################################
# DB_RECOVERY_FILE_DEST=+FRA
# DB_RECOVERY_FILE_DEST_SIZE=3G
###############################################################################
# Default location of Oracle-managed data files:
###############################################################################
DB_CREATE_FILE_DEST=/u01/app/oracle/oradata/SCLONE/
###############################################################################
# Location of the audit records:
###############################################################################
AUDIT_FILE_DEST='/u01/app/oracle/admin/SCLONE/adump'
###############################################################################
# ORACLE_HOME path on the target Oracle host or RAC:
###############################################################################
# ORACLE_HOME='/u02/app/oracle/product/12.2.0/db_1'
###############################################################################
# NOTE: These parameters are supported when Live Mounting or Cloning an
# Oracle database.
#
# Each of these parameters may be used to alter the file paths
# of each of the database files:
# - PARAMETER_VALUE_CONVERT (substitute values in all parameters)
# - DB_FILE_NAME_CONVERT (alters the file paths for data files)
# - LOG_FILE_NAME_CONVERT (alters the file paths for online redo log files)
################################################################################
# PARAMETER_VALUE_CONVERT='STESBC','RBKTEST'
DB_FILE_NAME_CONVERT='SHED','SCLONE'

488
clone_oracle_database.py Normal file
View File

@@ -0,0 +1,488 @@
#!/usr/bin/env python3
import json
import sys
import os
import argparse
import time
from datetime import datetime
from rsc_auth import RSCAuth
from rsc_graphql import RSCGraphQL
def parse_options_file(options_file):
"""Parse the options file containing Oracle parameters"""
options = []
try:
with open(options_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith('#'):
continue
# Parse key=value pairs
if '=' in line:
key, value = line.split('=', 1)
key = key.strip()
# Handle quoted values
if value.startswith("'") and value.endswith("'"):
# Keep the quotes for Oracle parameters
pass
else:
# Remove any single quotes and trim
value = value.strip().replace("'", "")
options.append({
"key": key,
"value": value
})
except FileNotFoundError:
print(f"ERROR: Options file '{options_file}' does not exist.", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"ERROR: Failed to parse options file: {e}", file=sys.stderr)
sys.exit(1)
return options
def find_database_by_name_or_id(identifier, source_host=None):
"""Find database by name or ID and return its details"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
# Check if identifier looks like a UUID (contains hyphens)
if '-' in identifier:
# It's likely a database ID
query = """
query OracleDatabase($fid: UUID!) {
oracleDatabase(fid: $fid) {
dbUniqueName
id
cluster {
id
name
}
logicalPath {
fid
name
objectType
}
}
}
"""
variables = {"fid": identifier}
else:
# It's a database name
filter_conditions = [
{"texts": [identifier], "field": "NAME_EXACT_MATCH"},
{"texts": ["false"], "field": "IS_RELIC"},
{"texts": ["false"], "field": "IS_REPLICATED"}
]
if source_host:
# Add host filter if source host specified
filter_conditions.append({"texts": [source_host], "field": "HOST_NAME"})
query = """
query OracleDatabases($filter: [Filter!]) {
oracleDatabases(filter: $filter) {
nodes {
dbUniqueName
id
cluster {
id
name
}
logicalPath {
fid
name
objectType
}
}
}
}
"""
variables = {"filter": filter_conditions}
response = gql.query(query, variables)
if '-' in identifier:
# Direct ID lookup
db = response['data']['oracleDatabase']
if not db:
raise ValueError(f"Database with ID '{identifier}' not found")
return db
else:
# Name lookup
databases = response['data']['oracleDatabases']['nodes']
if not databases:
raise ValueError(f"No databases found with name '{identifier}'")
if source_host:
# Filter by source host if specified
filtered_dbs = [db for db in databases if any(path.get('name') == source_host for path in db.get('logicalPath', []))]
if not filtered_dbs:
raise ValueError(f"No databases found with name '{identifier}' on host '{source_host}'")
if len(filtered_dbs) > 1:
print(f"Multiple databases found with name '{identifier}' on host '{source_host}':", file=sys.stderr)
for db in filtered_dbs:
print(f" - {db['dbUniqueName']} (ID: {db['id']})", file=sys.stderr)
raise ValueError("Please specify the database ID instead")
return filtered_dbs[0]
else:
if len(databases) > 1:
print(f"Multiple databases found with name '{identifier}':", file=sys.stderr)
for db in databases:
host_name = db['logicalPath'][0]['name'] if db['logicalPath'] else 'Unknown'
print(f" - {db['dbUniqueName']} (ID: {db['id']}, Host: {host_name})", file=sys.stderr)
raise ValueError("Please specify the database ID instead")
return databases[0]
def get_latest_pit(db_id):
"""Get the latest Point in Time from recoverable ranges"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
query = """
query OracleDatabaseRecoverableRangesQuery($fid: String!) {
oracleRecoverableRanges(
input: {id: $fid, shouldIncludeDbSnapshotSummaries: false}
) {
data {
beginTime
endTime
__typename
}
__typename
}
oracleMissedRecoverableRanges(input: {id: $fid}) {
data {
beginTime
endTime
__typename
}
__typename
}
}
"""
variables = {"fid": db_id}
response = gql.query(query, variables)
# Get latest endTime from recoverable ranges
ranges = response['data']['oracleRecoverableRanges']['data']
if ranges:
latest_endtime = max(range_item['endTime'] for range_item in ranges)
print(f"INFO: Latest PIT (ISO8601): {latest_endtime}")
# Convert to datetime and then to milliseconds since epoch
dt = datetime.fromisoformat(latest_endtime.replace('Z', '+00:00'))
unixtime_ms = int(dt.timestamp() * 1000)
print(f"INFO: Latest PIT unixtime (ms): {unixtime_ms}")
return unixtime_ms
else:
raise ValueError("No recoverable ranges found for database")
def get_oracle_host_id(host_name, cluster_id):
"""Get Oracle host ID by name and cluster"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
query = """
query OracleHosts($filter: [Filter!]) {
oracleTopLevelDescendants(filter: $filter) {
nodes {
name
id
}
}
}
"""
variables = {
"filter": [
{"texts": [host_name], "field": "NAME"},
{"texts": [cluster_id], "field": "CLUSTER_ID"}
]
}
response = gql.query(query, variables)
hosts = response['data']['oracleTopLevelDescendants']['nodes']
if not hosts:
raise ValueError(f"Host '{host_name}' not found in cluster")
if len(hosts) > 1:
print(f"WARN: Multiple hosts found for '{host_name}':", file=sys.stderr)
for host in hosts:
print(f" - {host['name']} (ID: {host['id']})", file=sys.stderr)
# Use the first one
print(f"WARN: Using first match: {hosts[0]['name']}", file=sys.stderr)
return hosts[0]['id']
def execute_operation(db_id, target_host_id, recovery_timestamp_ms, operation_name, options, num_channels=None, custom_pfile_path=None, is_live_mount=False):
"""Execute the database clone or live mount operation"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
# Build the config object
config = {
"targetOracleHostOrRacId": target_host_id,
"recoveryPoint": {
"timestampMs": recovery_timestamp_ms
},
"shouldAllowRenameToSource": True,
"shouldSkipDropDbInUndo": False
}
if is_live_mount:
config["shouldMountFilesOnly"] = False
config["lmDbName"] = operation_name
else:
config["shouldRestoreFilesOnly"] = False
config["cloneDbName"] = operation_name
if num_channels is not None:
config["numChannels"] = num_channels
if custom_pfile_path is not None:
config["customPfilePath"] = custom_pfile_path
variables = {
"input": {
"request": {
"id": db_id,
"config": config
},
"advancedRecoveryConfigMap": options
}
}
if is_live_mount:
query = """
mutation OracleDatabaseMountMutation($input: MountOracleDatabaseInput!) {
mountOracleDatabase(input: $input) {
id
links {
href
rel
__typename
}
__typename
}
}
"""
response = gql.query(query, variables)
return response['data']['mountOracleDatabase']['id']
else:
query = """
mutation OracleDatabaseExportMutation($input: ExportOracleDatabaseInput!) {
exportOracleDatabase(input: $input) {
id
links {
href
rel
__typename
}
__typename
}
}
"""
response = gql.query(query, variables)
return response['data']['exportOracleDatabase']['id']
def monitor_job_status(job_id, cluster_id):
"""Monitor the clone job status until completion"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
query = """
query OracleDatabaseAsyncRequestDetails($input: GetOracleAsyncRequestStatusInput!) {
oracleDatabaseAsyncRequestDetails(input: $input) {
id
nodeId
status
startTime
endTime
progress
error {
message
}
}
}
"""
variables = {
"input": {
"id": job_id,
"clusterUuid": cluster_id
}
}
while True:
response = gql.query(query, variables)
details = response['data']['oracleDatabaseAsyncRequestDetails']
status = details['status']
progress = details.get('progress', 0)
print(f"INFO: Job status: {status} ({progress}%)")
if status == "FAILED":
error_msg = details.get('error', {}).get('message', 'Unknown error')
print(f"ERROR: Database clone FAILED: {error_msg}", file=sys.stderr)
print(json.dumps(response, indent=2))
sys.exit(2)
elif status == "CANCELLED":
print("WARN: Database clone CANCELLED")
sys.exit(3)
elif status == "SUCCEEDED":
print("INFO: Database clone SUCCEEDED")
print(json.dumps(response, indent=2))
return
time.sleep(15)
def main():
parser = argparse.ArgumentParser(
description="Clone or live mount an Oracle database using Rubrik Security Cloud",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False, # Disable default -h help to avoid conflict
epilog="""
Examples:
python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host SCLONE
python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host --livemount SCLONE
python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host -s source-host SCLONE
python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host -t "2025-11-14 15:30:00" SCLONE
python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host --dryrun SCLONE
python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host -c 4 SCLONE
python clone_oracle_database.py -n NEWDB -o options.txt --targethost target-host -p /path/to/pfile SCLONE
Options file format (options.txt):
CONTROL_FILES='/u01/app/oracle/oradata/NEWDB/control01.ctl, /u01/app/oracle/fast_recovery_area/NEWDB/control02.ctl'
DB_FILE_NAME_CONVERT='SCLONE','NEWDB'
DB_CREATE_FILE_DEST=/u01/app/oracle/oradata/NEWDB/
AUDIT_FILE_DEST='/u01/app/oracle/admin/NEWDB/adump'
"""
)
parser.add_argument("-n", "--newname", required=True,
help="Database name/SID of the new cloned or live mounted database")
parser.add_argument("-o", "--optionsfile", required=True,
help="Path to the options file containing advanced cloning options")
parser.add_argument("--targethost", required=True,
help="Target host where the cloned database will be created")
parser.add_argument("-s", "--sourcehost",
help="Source host where the original database is located (optional)")
parser.add_argument("-t", "--timestamp",
help="Optional timestamp for the recovery point in format 'YYYY-MM-DD HH:MM:SS'")
parser.add_argument("-d", "--dryrun", action="store_true",
help="Dry-run mode - show mutation variables without executing the clone")
parser.add_argument("--livemount", action="store_true",
help="Create a live mount instead of a clone")
parser.add_argument("-c", "--channels", type=int,
help="Optional number of RMAN channels to configure for the clone")
parser.add_argument("-p", "--pfile",
help="Optional custom pfile path for the clone")
parser.add_argument("--help", action="help", default=argparse.SUPPRESS,
help="Show this help message and exit")
parser.add_argument("srcdb",
help="Source database name or RSC database ID")
args = parser.parse_args()
# Validate arguments
if args.channels is not None and args.channels <= 0:
print("ERROR: -c requires a positive integer value", file=sys.stderr)
sys.exit(1)
if args.pfile and not args.pfile.startswith('/'):
print("ERROR: -p requires an absolute path (starting with /)", file=sys.stderr)
sys.exit(1)
try:
# Parse options file
print(f"INFO: Parsing options file: {args.optionsfile}")
options = parse_options_file(args.optionsfile)
print(f"INFO: Loaded {len(options)} configuration options")
# Find the source database
print(f"INFO: Finding source database: {args.srcdb}")
if args.sourcehost:
print(f"INFO: Filtering by source host: {args.sourcehost}")
db = find_database_by_name_or_id(args.srcdb, args.sourcehost)
print(f"INFO: Found database: {db['dbUniqueName']} (ID: {db['id']})")
print(f"INFO: Cluster: {db['cluster']['name']} (ID: {db['cluster']['id']})")
# Get recovery timestamp
if args.timestamp:
print(f"INFO: Using specified timestamp: {args.timestamp}")
try:
dt = datetime.strptime(args.timestamp, '%Y-%m-%d %H:%M:%S')
recovery_timestamp_ms = int(dt.timestamp() * 1000)
print(f"INFO: Recovery timestamp: {recovery_timestamp_ms} ms")
except ValueError as e:
print(f"ERROR: Invalid timestamp format. Use 'YYYY-MM-DD HH:MM:SS': {e}", file=sys.stderr)
sys.exit(1)
else:
print("INFO: No timestamp specified, using latest PIT")
recovery_timestamp_ms = get_latest_pit(db['id'])
# Get target host ID
print(f"INFO: Resolving target host: {args.targethost}")
target_host_id = get_oracle_host_id(args.targethost, db['cluster']['id'])
print(f"INFO: Target host ID: {target_host_id}")
# Prepare operation configuration
operation_type = "live mount" if args.livemount else "clone"
config_summary = {
"operation_type": operation_type,
"source_db": db['dbUniqueName'],
"source_db_id": db['id'],
"target_host": args.targethost,
"target_host_id": target_host_id,
"operation_name": args.newname,
"recovery_timestamp_ms": recovery_timestamp_ms,
"num_channels": args.channels,
"custom_pfile_path": args.pfile,
"options_count": len(options)
}
if args.dryrun:
print(f"\n=== DRY-RUN MODE ({operation_type.upper()}) ===")
print(f"Would execute {operation_type} with the following configuration:")
print(json.dumps(config_summary, indent=2))
print(f"Options: {json.dumps(options, indent=2)}")
print(f"=== END DRY-RUN ({operation_type.upper()}) ===")
return
# Execute the operation
print(f"INFO: Starting {operation_type} operation for database '{args.newname}'")
job_id = execute_operation(
db['id'],
target_host_id,
recovery_timestamp_ms,
args.newname,
options,
args.channels,
args.pfile,
args.livemount
)
print(f"INFO: Clone job started with ID: {job_id}")
# Monitor the job
monitor_job_status(job_id, db['cluster']['id'])
except Exception as e:
print(f"ERROR: {e}", file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()

73
introspect_schema.py Normal file
View File

@@ -0,0 +1,73 @@
#!/usr/bin/env python3
import json
import sys
from rsc_auth import RSCAuth
from rsc_graphql import RSCGraphQL
def explore_oracle_types():
"""Explore Oracle-related types in the GraphQL schema"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
# Get information about the OracleDatabase type
print("Exploring OracleDatabase type...")
try:
result = gql.get_type_info("OracleDatabase")
oracle_db_type = result['data']['__type']
if oracle_db_type:
print(f"Type: {oracle_db_type['name']}")
print(f"Kind: {oracle_db_type['kind']}")
print(f"Description: {oracle_db_type.get('description', 'No description')}")
print("\nFields:")
for field in oracle_db_type.get('fields', []):
print(f" - {field['name']}: {field['description'] or 'No description'}")
field_type = field['type']
type_name = field_type.get('name') or (field_type.get('ofType', {}).get('name') if field_type.get('ofType') else 'Unknown')
print(f" Type: {type_name}")
else:
print("OracleDatabase type not found")
except Exception as e:
print(f"Error exploring OracleDatabase type: {e}")
def list_available_types():
"""List all available types in the schema"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
print("Getting available types...")
try:
result = gql.introspect_schema()
types = result['data']['__schema']['types']
oracle_types = [t for t in types if t.get('name') and 'Oracle' in t['name']]
print(f"\nFound {len(oracle_types)} Oracle-related types:")
for t in sorted(oracle_types, key=lambda x: x['name']):
print(f" - {t['name']} ({t['kind']})")
# Also look for LogicalPath related types
logical_types = [t for t in types if t.get('name') and 'Logical' in t['name'].lower()]
if logical_types:
print(f"\nFound {len(logical_types)} Logical-related types:")
for t in logical_types:
print(f" - {t['name']} ({t['kind']})")
except Exception as e:
print(f"Error listing types: {e}")
if __name__ == "__main__":
if len(sys.argv) > 1:
type_name = sys.argv[1]
auth = RSCAuth()
gql = RSCGraphQL(auth)
try:
result = gql.get_type_info(type_name)
print(json.dumps(result, indent=2))
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
else:
list_available_types()

332
list_db_snapshots.py Normal file
View File

@@ -0,0 +1,332 @@
#!/usr/bin/env python3
import json
import sys
import os
from datetime import datetime
from rsc_auth import RSCAuth
from rsc_graphql import RSCGraphQL
from tabulate import tabulate
def find_database_by_name_or_id(identifier):
"""Find database by name or ID and return its details"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
# Check if identifier looks like a UUID (contains hyphens)
if '-' in identifier:
# It's likely a database ID
query = """
query OracleDatabase($fid: UUID!) {
oracleDatabase(fid: $fid) {
dbUniqueName
id
cluster {
id
name
}
logicalPath {
fid
name
objectType
}
}
}
"""
variables = {"fid": identifier}
else:
# It's a database name
query = """
query OracleDatabases($filter: [Filter!]) {
oracleDatabases(filter: $filter) {
nodes {
dbUniqueName
id
cluster {
id
name
}
logicalPath {
fid
name
objectType
}
}
}
}
"""
variables = {
"filter": [
{"texts": [identifier], "field": "NAME_EXACT_MATCH"},
{"texts": ["false"], "field": "IS_RELIC"},
{"texts": ["false"], "field": "IS_REPLICATED"}
]
}
response = gql.query(query, variables)
if '-' in identifier:
# Direct ID lookup
db = response['data']['oracleDatabase']
if not db:
raise ValueError(f"Database with ID '{identifier}' not found")
return db
else:
# Name lookup
databases = response['data']['oracleDatabases']['nodes']
if not databases:
raise ValueError(f"No databases found with name '{identifier}'")
if len(databases) > 1:
print(f"Multiple databases found with name '{identifier}':")
for db in databases:
host_name = db['logicalPath'][0]['name'] if db['logicalPath'] else 'Unknown'
print(f" - {db['dbUniqueName']} (ID: {db['id']}, Host: {host_name})")
raise ValueError("Please specify the database ID instead")
return databases[0]
def get_recoverable_ranges(db_id):
"""Get recoverable ranges for a database"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
query = """
query OracleDatabaseRecoverableRangesQuery($fid: String!) {
oracleRecoverableRanges(
input: {id: $fid, shouldIncludeDbSnapshotSummaries: true}
) {
data {
beginTime
endTime
status
dbSnapshotSummaries {
databaseName
isValid
hostOrRacName
baseSnapshotSummary {
id
date
isOnDemandSnapshot
replicationLocationIds
archivalLocationIds
}
}
__typename
}
__typename
}
oracleMissedRecoverableRanges(input: {id: $fid}) {
data {
beginTime
endTime
__typename
}
__typename
}
}
"""
variables = {"fid": db_id}
response = gql.query(query, variables)
return response['data']
def get_cluster_name(cluster_id):
"""Get cluster name by ID"""
if not cluster_id:
return "Unknown"
# Cache cluster names to avoid repeated queries
if not hasattr(get_cluster_name, '_cache'):
get_cluster_name._cache = {}
if cluster_id in get_cluster_name._cache:
return get_cluster_name._cache[cluster_id]
try:
auth = RSCAuth()
gql = RSCGraphQL(auth)
query = """
query ListClusters {
allClusterConnection {
nodes {
id
name
}
}
}
"""
response = gql.query(query)
clusters = response['data']['allClusterConnection']['nodes']
# Find the cluster with matching ID
for cluster in clusters:
if cluster['id'] == cluster_id:
name = cluster['name']
get_cluster_name._cache[cluster_id] = name
return name
# If not found, return fallback
return f"Cluster-{cluster_id[:8]}"
except:
return f"Cluster-{cluster_id[:8]}"
def get_location_name(location_type, location_ids):
"""Get human-readable location names"""
if not location_ids:
return []
names = []
for loc_id in location_ids:
if location_type == "replication":
name = get_cluster_name(loc_id)
names.append(f"Replicated: {name}")
elif location_type == "archival":
# For archival, we might not have cluster info, so use a generic name
names.append(f"Archive: {loc_id[:8]}...")
else:
names.append(f"{location_type}: {loc_id[:8]}...")
return names
def format_timestamp(timestamp):
"""Format ISO timestamp to readable format"""
try:
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
return dt.strftime('%Y-%m-%d %H:%M:%S UTC')
except:
return timestamp
def list_snapshots_and_ranges(identifier):
"""List snapshots and recovery ranges for a database"""
try:
# Find the database
db = find_database_by_name_or_id(identifier)
print(f"Database: {db['dbUniqueName']}")
print(f"ID: {db['id']}")
cluster_name = db['cluster']['name'] if db['cluster'] else 'Unknown'
print(f"Cluster: {cluster_name}")
host_name = db['logicalPath'][0]['name'] if db['logicalPath'] else 'Unknown'
print(f"Host: {host_name}")
print("-" * 80)
# Get recoverable ranges
ranges_data = get_recoverable_ranges(db['id'])
# Display recoverable ranges
recoverable_ranges = ranges_data.get('oracleRecoverableRanges', {}).get('data', [])
if recoverable_ranges:
print(f"\nRecoverable Ranges ({len(recoverable_ranges)} found):")
table_data = []
for range_item in recoverable_ranges:
table_data.append([
format_timestamp(range_item['beginTime']),
format_timestamp(range_item['endTime']),
range_item.get('status', 'Unknown')
])
headers = ['Begin Time', 'End Time', 'Status']
print(tabulate(table_data, headers=headers, tablefmt='grid'))
# Display snapshots by location
print(f"\nSnapshot Distribution:")
location_snapshots = {}
for range_item in recoverable_ranges:
snapshots = range_item.get('dbSnapshotSummaries', [])
for snapshot in snapshots:
base = snapshot.get('baseSnapshotSummary', {})
# Group by replication locations
repl_locations = base.get('replicationLocationIds', [])
arch_locations = base.get('archivalLocationIds', [])
# Add to local (assuming no replication/archival IDs means local)
if not repl_locations and not arch_locations:
loc_name = "Local (Source)"
if loc_name not in location_snapshots:
location_snapshots[loc_name] = []
location_snapshots[loc_name].append({
'id': base.get('id'),
'date': base.get('date'),
'isOnDemand': base.get('isOnDemandSnapshot', False),
'host': snapshot.get('hostOrRacName', 'Unknown')
})
else:
# Handle replication locations
for repl_id in repl_locations:
cluster_name = get_cluster_name(repl_id)
loc_name = f"Replicated: {cluster_name}"
if loc_name not in location_snapshots:
location_snapshots[loc_name] = []
location_snapshots[loc_name].append({
'id': base.get('id'),
'date': base.get('date'),
'isOnDemand': base.get('isOnDemandSnapshot', False),
'host': snapshot.get('hostOrRacName', 'Unknown')
})
# Handle archival locations
for arch_id in arch_locations:
loc_name = f"Archived: {arch_id[:8]}..."
if loc_name not in location_snapshots:
location_snapshots[loc_name] = []
location_snapshots[loc_name].append({
'id': base.get('id'),
'date': base.get('date'),
'isOnDemand': base.get('isOnDemandSnapshot', False),
'host': snapshot.get('hostOrRacName', 'Unknown')
})
# Display snapshots by location
for location, snapshots in location_snapshots.items():
print(f"\n📍 {location} ({len(snapshots)} snapshots):")
table_data = []
for snap in sorted(snapshots, key=lambda x: x['date']):
table_data.append([
snap['id'][:8] + '...', # Truncate ID for readability
format_timestamp(snap['date']),
'On-Demand' if snap['isOnDemand'] else 'Policy',
snap['host']
])
headers = ['Snapshot ID', 'Date', 'Type', 'Host']
print(tabulate(table_data, headers=headers, tablefmt='grid'))
else:
print("\nNo recoverable ranges found.")
# Display missed recoverable ranges
missed_ranges = ranges_data.get('oracleMissedRecoverableRanges', {}).get('data', [])
if missed_ranges:
print(f"\nMissed Recoverable Ranges ({len(missed_ranges)} found):")
table_data = []
for range_item in missed_ranges:
table_data.append([
format_timestamp(range_item['beginTime']),
format_timestamp(range_item['endTime'])
])
headers = ['Begin Time', 'End Time']
print(tabulate(table_data, headers=headers, tablefmt='grid'))
else:
print("\nNo missed recoverable ranges found.")
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
def main():
if len(sys.argv) != 2:
print("Usage: python list_db_snapshots.py <database_name_or_id>")
print("Examples:")
print(" python list_db_snapshots.py SCLONE")
print(" python list_db_snapshots.py 2cb7e201-9da0-53f2-8c69-8fc21f82e0d2")
sys.exit(1)
identifier = sys.argv[1]
list_snapshots_and_ranges(identifier)
if __name__ == "__main__":
main()

87
list_oracle_databases.py Normal file
View File

@@ -0,0 +1,87 @@
#!/usr/bin/env python3
import json
import sys
import os
from rsc_auth import RSCAuth
from rsc_graphql import RSCGraphQL
from tabulate import tabulate
def list_oracle_databases():
# Initialize auth
auth = RSCAuth()
# Initialize GraphQL client
gql = RSCGraphQL(auth)
# GraphQL query to list Oracle databases
query = """
query OracleDatabases($filter: [Filter!]) {
oracleDatabases(filter: $filter) {
nodes {
dbUniqueName
id
cluster {
id
name
}
logicalPath {
fid
name
objectType
}
}
}
}
"""
# Variables: exclude relics and replicated databases
variables = {
"filter": [
{
"texts": ["false"],
"field": "IS_RELIC"
},
{
"texts": ["false"],
"field": "IS_REPLICATED"
}
]
}
# Execute query
response = gql.query(query, variables)
# Parse and display results
databases = response['data']['oracleDatabases']['nodes']
if not databases:
print("No Oracle databases found.")
return
# Prepare data for tabulation
table_data = []
for db in databases:
cluster_name = db['cluster']['name'] if db['cluster'] else 'Unknown'
host_name = 'Unknown'
if db['logicalPath'] and len(db['logicalPath']) > 0:
# For standalone DBs, logicalPath[0] is typically the host
host_name = db['logicalPath'][0]['name']
table_data.append([
db['dbUniqueName'],
db['id'],
cluster_name,
host_name
])
# Print tabulated output
headers = ['Database Name', 'ID', 'Cluster', 'Host']
print(tabulate(table_data, headers=headers, tablefmt='grid'))
if __name__ == "__main__":
try:
list_oracle_databases()
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)

3
requirements.txt Normal file
View File

@@ -0,0 +1,3 @@
requests
tabulate
jq

6
rsc.json.example Normal file
View File

@@ -0,0 +1,6 @@
{
"client_id": "client|your-client-id-here",
"client_secret": "your-client-secret-here",
"name": "Your RSC Service Account Name",
"access_token_uri": "https://your-organization.my.rubrik.com/api/client_token"
}

81
rsc_auth.py Normal file
View File

@@ -0,0 +1,81 @@
import json
import os
import time
import requests
class RSCAuth:
def __init__(self, config_file='rsc.json'):
self.config_file = config_file
self.load_config()
self.token = None
self.token_expiration = None
def load_config(self):
if not os.path.exists(self.config_file):
raise FileNotFoundError(f"Configuration file {self.config_file} not found")
with open(self.config_file, 'r') as f:
config = json.load(f)
self.client_id = config.get('client_id')
self.client_secret = config.get('client_secret')
self.access_token_uri = config.get('access_token_uri')
if not all([self.client_id, self.client_secret, self.access_token_uri]):
raise ValueError("Missing required fields in config: client_id, client_secret, access_token_uri")
# Derive host from access_token_uri
self.host = self.access_token_uri.replace('https://', '').replace('/api/client_token', '')
def get_token(self):
# Check if we have a cached token
cache_file = self._get_cache_file()
if os.path.exists(cache_file):
with open(cache_file, 'r') as f:
expiration, token = f.read().strip().split(' ', 1)
expiration = int(expiration)
if time.time() < expiration - 1800: # Refresh 30 min before expiry
self.token = token
self.token_expiration = expiration
return token
# Get new token
return self._fetch_token()
def _fetch_token(self):
payload = {
'client_id': self.client_id,
'client_secret': self.client_secret
}
headers = {'accept': 'application/json', 'Content-Type': 'application/json'}
response = requests.post(self.access_token_uri, json=payload, headers=headers)
response.raise_for_status()
data = response.json()
self.token = data['access_token']
expires_in = data['expires_in']
self.token_expiration = int(time.time()) + expires_in
# Cache the token
cache_file = self._get_cache_file()
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
with open(cache_file, 'w') as f:
f.write(f"{self.token_expiration} {self.token}")
os.chmod(cache_file, 0o600)
return self.token
def _get_cache_file(self):
# Use the id part after 'client|'
if '|' in self.client_id:
id_part = self.client_id.split('|')[1]
else:
id_part = self.client_id
return os.path.expanduser(f"~/.rbkRscsession.{id_part}")
def get_headers(self):
return {
'Authorization': f'Bearer {self.get_token()}',
'Content-Type': 'application/json'
}

108
rsc_graphql.py Normal file
View File

@@ -0,0 +1,108 @@
import json
import requests
class RSCGraphQL:
def __init__(self, auth):
self.auth = auth
self.endpoint = f"https://{self.auth.host}/api/graphql"
def query(self, query, variables=None):
payload = {'query': query}
if variables:
payload['variables'] = variables
headers = self.auth.get_headers()
response = requests.post(self.endpoint, json=payload, headers=headers)
response.raise_for_status()
data = response.json()
# Check for GraphQL errors
if 'errors' in data:
raise Exception(f"GraphQL errors: {data['errors']}")
return data
def introspect_schema(self):
"""Introspect the GraphQL schema to get type information"""
introspection_query = """
query IntrospectionQuery {
__schema {
types {
name
kind
description
fields(includeDeprecated: true) {
name
description
type {
name
kind
ofType {
name
kind
}
}
args {
name
description
type {
name
kind
ofType {
name
kind
}
}
}
}
}
}
}
"""
return self.query(introspection_query)
def get_type_info(self, type_name):
"""Get detailed information about a specific GraphQL type"""
query = """
query GetTypeInfo($typeName: String!) {
__type(name: $typeName) {
name
kind
description
fields(includeDeprecated: true) {
name
description
type {
name
kind
ofType {
name
kind
ofType {
name
kind
}
}
}
args {
name
description
type {
name
kind
ofType {
name
kind
ofType {
name
kind
}
}
}
}
}
}
}
"""
return self.query(query, {"typeName": type_name})