Bela specifics
This commit is contained in:
@@ -83,112 +83,6 @@ def find_database_by_name_or_id(identifier):
|
||||
raise ValueError("Please specify the database ID instead")
|
||||
return databases[0]
|
||||
|
||||
def get_recoverable_ranges(db_id):
|
||||
"""Get recoverable ranges for a database"""
|
||||
auth = RSCAuth()
|
||||
gql = RSCGraphQL(auth)
|
||||
|
||||
query = """
|
||||
query OracleDatabaseRecoverableRangesQuery($fid: String!) {
|
||||
oracleRecoverableRanges(
|
||||
input: {id: $fid, shouldIncludeDbSnapshotSummaries: true}
|
||||
) {
|
||||
data {
|
||||
beginTime
|
||||
endTime
|
||||
status
|
||||
dbSnapshotSummaries {
|
||||
databaseName
|
||||
isValid
|
||||
hostOrRacName
|
||||
baseSnapshotSummary {
|
||||
id
|
||||
date
|
||||
isOnDemandSnapshot
|
||||
replicationLocationIds
|
||||
archivalLocationIds
|
||||
}
|
||||
}
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
oracleMissedRecoverableRanges(input: {id: $fid}) {
|
||||
data {
|
||||
beginTime
|
||||
endTime
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables = {"fid": db_id}
|
||||
response = gql.query(query, variables)
|
||||
|
||||
return response['data']
|
||||
|
||||
def get_cluster_name(cluster_id):
|
||||
"""Get cluster name by ID"""
|
||||
if not cluster_id:
|
||||
return "Unknown"
|
||||
|
||||
# Cache cluster names to avoid repeated queries
|
||||
if not hasattr(get_cluster_name, '_cache'):
|
||||
get_cluster_name._cache = {}
|
||||
|
||||
if cluster_id in get_cluster_name._cache:
|
||||
return get_cluster_name._cache[cluster_id]
|
||||
|
||||
try:
|
||||
auth = RSCAuth()
|
||||
gql = RSCGraphQL(auth)
|
||||
|
||||
query = """
|
||||
query ListClusters {
|
||||
allClusterConnection {
|
||||
nodes {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
response = gql.query(query)
|
||||
clusters = response['data']['allClusterConnection']['nodes']
|
||||
|
||||
# Find the cluster with matching ID
|
||||
for cluster in clusters:
|
||||
if cluster['id'] == cluster_id:
|
||||
name = cluster['name']
|
||||
get_cluster_name._cache[cluster_id] = name
|
||||
return name
|
||||
|
||||
# If not found, return fallback
|
||||
return f"Cluster-{cluster_id[:8]}"
|
||||
except:
|
||||
return f"Cluster-{cluster_id[:8]}"
|
||||
|
||||
def get_location_name(location_type, location_ids):
|
||||
"""Get human-readable location names"""
|
||||
if not location_ids:
|
||||
return []
|
||||
|
||||
names = []
|
||||
for loc_id in location_ids:
|
||||
if location_type == "replication":
|
||||
name = get_cluster_name(loc_id)
|
||||
names.append(f"Replicated: {name}")
|
||||
elif location_type == "archival":
|
||||
# For archival, we might not have cluster info, so use a generic name
|
||||
names.append(f"Archive: {loc_id[:8]}...")
|
||||
else:
|
||||
names.append(f"{location_type}: {loc_id[:8]}...")
|
||||
|
||||
return names
|
||||
|
||||
def format_timestamp(timestamp):
|
||||
"""Format ISO timestamp to readable format"""
|
||||
try:
|
||||
@@ -197,8 +91,8 @@ def format_timestamp(timestamp):
|
||||
except:
|
||||
return timestamp
|
||||
|
||||
def list_snapshots_and_ranges(identifier):
|
||||
"""List snapshots and recovery ranges for a database"""
|
||||
def list_snapshots(identifier):
|
||||
"""List all snapshots for a database using snapshotsListConnection"""
|
||||
try:
|
||||
# Find the database
|
||||
db = find_database_by_name_or_id(identifier)
|
||||
@@ -211,106 +105,140 @@ def list_snapshots_and_ranges(identifier):
|
||||
print(f"Host: {host_name}")
|
||||
print("-" * 80)
|
||||
|
||||
# Get recoverable ranges
|
||||
ranges_data = get_recoverable_ranges(db['id'])
|
||||
# Initialize auth and GraphQL client
|
||||
auth = RSCAuth()
|
||||
gql = RSCGraphQL(auth)
|
||||
|
||||
# Display recoverable ranges
|
||||
recoverable_ranges = ranges_data.get('oracleRecoverableRanges', {}).get('data', [])
|
||||
if recoverable_ranges:
|
||||
print(f"\nRecoverable Ranges ({len(recoverable_ranges)} found):")
|
||||
# Query to get all snapshots using snapshotOfASnappableConnection
|
||||
query = """
|
||||
query SnapshotsListSingleQuery($snappableId: String!, $first: Int, $sortBy: SnapshotQuerySortByField, $sortOrder: SortOrder, $includeOnlySourceSnapshots: Boolean) {
|
||||
snapshotsListConnection: snapshotOfASnappableConnection(
|
||||
workloadId: $snappableId
|
||||
first: $first
|
||||
sortBy: $sortBy
|
||||
sortOrder: $sortOrder
|
||||
includeOnlySourceSnapshots: $includeOnlySourceSnapshots
|
||||
) {
|
||||
edges {
|
||||
node {
|
||||
__typename
|
||||
id
|
||||
date
|
||||
isOnDemandSnapshot
|
||||
... on CdmSnapshot {
|
||||
cluster {
|
||||
id
|
||||
name
|
||||
}
|
||||
slaDomain {
|
||||
id
|
||||
name
|
||||
}
|
||||
snapshotRetentionInfo {
|
||||
localInfo {
|
||||
name
|
||||
isExpirationDateCalculated
|
||||
expirationTime
|
||||
}
|
||||
replicationInfos {
|
||||
locationId
|
||||
name
|
||||
}
|
||||
archivalInfos {
|
||||
locationId
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
... on PolarisSnapshot {
|
||||
archivalLocationName
|
||||
isReplica
|
||||
isArchivalCopy
|
||||
slaDomain {
|
||||
name
|
||||
... on ClusterSlaDomain {
|
||||
cluster {
|
||||
id
|
||||
name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
# Filter for the specific database
|
||||
variables = {
|
||||
"snappableId": db['id'],
|
||||
"first": 1000, # Get up to 1000 snapshots
|
||||
"sortBy": "CREATION_TIME",
|
||||
"sortOrder": "DESC",
|
||||
"includeOnlySourceSnapshots": False, # Include replicated and archived snapshots
|
||||
}
|
||||
|
||||
response = gql.query(query, variables)
|
||||
snapshots = response['data']['snapshotsListConnection']['edges']
|
||||
|
||||
if not snapshots:
|
||||
print("\nNo snapshots found.")
|
||||
return
|
||||
|
||||
# Group snapshots by actual location status
|
||||
source_snapshots = []
|
||||
replica_only_snapshots = []
|
||||
|
||||
for edge in snapshots:
|
||||
snap = edge['node']
|
||||
|
||||
# Check if snapshot exists locally
|
||||
if snap['snapshotRetentionInfo']['localInfo'] is not None:
|
||||
source_snapshots.append(snap)
|
||||
else:
|
||||
replica_only_snapshots.append(snap)
|
||||
|
||||
# Display summary
|
||||
print(f"\nSnapshot Summary:")
|
||||
print(f" Source cluster (jp-edge-proxmox): {len(source_snapshots)} snapshots")
|
||||
print(f" Replica only (jp-edge-dr): {len(replica_only_snapshots)} snapshots")
|
||||
print(f" Total: {len(snapshots)} snapshots")
|
||||
print("-" * 80)
|
||||
|
||||
# Display source snapshots first
|
||||
if source_snapshots:
|
||||
print(f"\nSOURCE CLUSTER SNAPSHOTS ({len(source_snapshots)})")
|
||||
print("=" * 80)
|
||||
table_data = []
|
||||
for range_item in recoverable_ranges:
|
||||
for snap in source_snapshots:
|
||||
table_data.append([
|
||||
format_timestamp(range_item['beginTime']),
|
||||
format_timestamp(range_item['endTime']),
|
||||
range_item.get('status', 'Unknown')
|
||||
snap['id'][:8] + '...',
|
||||
format_timestamp(snap['date']),
|
||||
'On-Demand' if snap['isOnDemandSnapshot'] else 'Policy',
|
||||
snap['slaDomain']['name'] if snap['slaDomain'] else 'None',
|
||||
'Local + Replica' if snap['snapshotRetentionInfo']['replicationInfos'] else '✅ Local only'
|
||||
])
|
||||
|
||||
headers = ['Begin Time', 'End Time', 'Status']
|
||||
headers = ['Snapshot ID', 'Date', 'Type', 'SLA Domain', 'Location Status']
|
||||
print(tabulate(table_data, headers=headers, tablefmt='grid'))
|
||||
|
||||
# Display snapshots by location
|
||||
print(f"\nSnapshot Distribution:")
|
||||
location_snapshots = {}
|
||||
|
||||
for range_item in recoverable_ranges:
|
||||
snapshots = range_item.get('dbSnapshotSummaries', [])
|
||||
for snapshot in snapshots:
|
||||
base = snapshot.get('baseSnapshotSummary', {})
|
||||
|
||||
# Group by replication locations
|
||||
repl_locations = base.get('replicationLocationIds', [])
|
||||
arch_locations = base.get('archivalLocationIds', [])
|
||||
|
||||
# Add to local (assuming no replication/archival IDs means local)
|
||||
if not repl_locations and not arch_locations:
|
||||
loc_name = "Local (Source)"
|
||||
if loc_name not in location_snapshots:
|
||||
location_snapshots[loc_name] = []
|
||||
location_snapshots[loc_name].append({
|
||||
'id': base.get('id'),
|
||||
'date': base.get('date'),
|
||||
'isOnDemand': base.get('isOnDemandSnapshot', False),
|
||||
'host': snapshot.get('hostOrRacName', 'Unknown')
|
||||
})
|
||||
else:
|
||||
# Handle replication locations
|
||||
for repl_id in repl_locations:
|
||||
cluster_name = get_cluster_name(repl_id)
|
||||
loc_name = f"Replicated: {cluster_name}"
|
||||
if loc_name not in location_snapshots:
|
||||
location_snapshots[loc_name] = []
|
||||
location_snapshots[loc_name].append({
|
||||
'id': base.get('id'),
|
||||
'date': base.get('date'),
|
||||
'isOnDemand': base.get('isOnDemandSnapshot', False),
|
||||
'host': snapshot.get('hostOrRacName', 'Unknown')
|
||||
})
|
||||
|
||||
# Handle archival locations
|
||||
for arch_id in arch_locations:
|
||||
loc_name = f"Archived: {arch_id[:8]}..."
|
||||
if loc_name not in location_snapshots:
|
||||
location_snapshots[loc_name] = []
|
||||
location_snapshots[loc_name].append({
|
||||
'id': base.get('id'),
|
||||
'date': base.get('date'),
|
||||
'isOnDemand': base.get('isOnDemandSnapshot', False),
|
||||
'host': snapshot.get('hostOrRacName', 'Unknown')
|
||||
})
|
||||
|
||||
# Display snapshots by location
|
||||
for location, snapshots in location_snapshots.items():
|
||||
print(f"\n📍 {location} ({len(snapshots)} snapshots):")
|
||||
table_data = []
|
||||
for snap in sorted(snapshots, key=lambda x: x['date']):
|
||||
table_data.append([
|
||||
snap['id'][:8] + '...', # Truncate ID for readability
|
||||
format_timestamp(snap['date']),
|
||||
'On-Demand' if snap['isOnDemand'] else 'Policy',
|
||||
snap['host']
|
||||
])
|
||||
|
||||
headers = ['Snapshot ID', 'Date', 'Type', 'Host']
|
||||
print(tabulate(table_data, headers=headers, tablefmt='grid'))
|
||||
else:
|
||||
print("\nNo recoverable ranges found.")
|
||||
|
||||
# Display missed recoverable ranges
|
||||
missed_ranges = ranges_data.get('oracleMissedRecoverableRanges', {}).get('data', [])
|
||||
if missed_ranges:
|
||||
print(f"\nMissed Recoverable Ranges ({len(missed_ranges)} found):")
|
||||
# Display replica-only snapshots
|
||||
if replica_only_snapshots:
|
||||
print(f"\nREPLICA-ONLY SNAPSHOTS ({len(replica_only_snapshots)})")
|
||||
print("=" * 80)
|
||||
table_data = []
|
||||
for range_item in missed_ranges:
|
||||
for snap in replica_only_snapshots:
|
||||
table_data.append([
|
||||
format_timestamp(range_item['beginTime']),
|
||||
format_timestamp(range_item['endTime'])
|
||||
snap['id'][:8] + '...',
|
||||
format_timestamp(snap['date']),
|
||||
'On-Demand' if snap['isOnDemandSnapshot'] else 'Policy',
|
||||
snap['slaDomain']['name'] if snap['slaDomain'] else 'None',
|
||||
'Expired from source'
|
||||
])
|
||||
|
||||
headers = ['Begin Time', 'End Time']
|
||||
headers = ['Snapshot ID', 'Date', 'Type', 'SLA Domain', 'Location Status']
|
||||
print(tabulate(table_data, headers=headers, tablefmt='grid'))
|
||||
else:
|
||||
print("\nNo missed recoverable ranges found.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
@@ -325,7 +253,7 @@ def main():
|
||||
sys.exit(1)
|
||||
|
||||
identifier = sys.argv[1]
|
||||
list_snapshots_and_ranges(identifier)
|
||||
list_snapshots(identifier)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
368
mount_oracle_filesonly.py
Normal file
368
mount_oracle_filesonly.py
Normal file
@@ -0,0 +1,368 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
import time
|
||||
from datetime import datetime
|
||||
from rsc import RSCAuth, RSCGraphQL
|
||||
|
||||
def find_database_by_name_or_id(identifier):
|
||||
"""Find database by name or ID and return its details"""
|
||||
auth = RSCAuth()
|
||||
gql = RSCGraphQL(auth)
|
||||
|
||||
# Check if identifier looks like a UUID (contains hyphens)
|
||||
if '-' in identifier:
|
||||
# It's likely a database ID
|
||||
query = """
|
||||
query OracleDatabase($fid: UUID!) {
|
||||
oracleDatabase(fid: $fid) {
|
||||
dbUniqueName
|
||||
id
|
||||
cluster {
|
||||
id
|
||||
name
|
||||
}
|
||||
logicalPath {
|
||||
fid
|
||||
name
|
||||
objectType
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
variables = {"fid": identifier}
|
||||
else:
|
||||
# It's a database name
|
||||
query = """
|
||||
query OracleDatabases($filter: [Filter!]) {
|
||||
oracleDatabases(filter: $filter) {
|
||||
nodes {
|
||||
dbUniqueName
|
||||
id
|
||||
cluster {
|
||||
id
|
||||
name
|
||||
}
|
||||
logicalPath {
|
||||
fid
|
||||
name
|
||||
objectType
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
variables = {
|
||||
"filter": [
|
||||
{"texts": [identifier], "field": "NAME_EXACT_MATCH"},
|
||||
{"texts": ["false"], "field": "IS_RELIC"},
|
||||
{"texts": ["false"], "field": "IS_REPLICATED"}
|
||||
]
|
||||
}
|
||||
|
||||
response = gql.query(query, variables)
|
||||
|
||||
if '-' in identifier:
|
||||
# Direct ID lookup
|
||||
db = response['data']['oracleDatabase']
|
||||
if not db:
|
||||
raise ValueError(f"Database with ID '{identifier}' not found")
|
||||
return db
|
||||
else:
|
||||
# Name lookup
|
||||
databases = response['data']['oracleDatabases']['nodes']
|
||||
if not databases:
|
||||
raise ValueError(f"No databases found with name '{identifier}'")
|
||||
if len(databases) > 1:
|
||||
print(f"Multiple databases found with name '{identifier}':")
|
||||
for db in databases:
|
||||
host_name = db['logicalPath'][0]['name'] if db['logicalPath'] else 'Unknown'
|
||||
print(f" - {db['dbUniqueName']} (ID: {db['id']}, Host: {host_name})")
|
||||
raise ValueError("Please specify the database ID instead")
|
||||
return databases[0]
|
||||
|
||||
def get_oracle_host_id(host_name, cluster_id):
|
||||
"""Get Oracle host ID by name and cluster"""
|
||||
auth = RSCAuth()
|
||||
gql = RSCGraphQL(auth)
|
||||
|
||||
query = """
|
||||
query OracleHosts($filter: [Filter!]) {
|
||||
oracleTopLevelDescendants(filter: $filter) {
|
||||
nodes {
|
||||
name
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables = {
|
||||
"filter": [
|
||||
{"texts": [host_name], "field": "NAME"},
|
||||
{"texts": [cluster_id], "field": "CLUSTER_ID"}
|
||||
]
|
||||
}
|
||||
|
||||
response = gql.query(query, variables)
|
||||
hosts = response['data']['oracleTopLevelDescendants']['nodes']
|
||||
|
||||
if not hosts:
|
||||
raise ValueError(f"Host '{host_name}' not found in cluster")
|
||||
|
||||
if len(hosts) > 1:
|
||||
print(f"WARN: Multiple hosts found for '{host_name}':", file=sys.stderr)
|
||||
for host in hosts:
|
||||
print(f" - {host['name']} (ID: {host['id']})", file=sys.stderr)
|
||||
# Use the first one
|
||||
print(f"WARN: Using first match: {hosts[0]['name']}", file=sys.stderr)
|
||||
|
||||
return hosts[0]['id']
|
||||
|
||||
def get_latest_pit(db_id):
|
||||
"""Get the latest Point in Time from recoverable ranges"""
|
||||
auth = RSCAuth()
|
||||
gql = RSCGraphQL(auth)
|
||||
|
||||
query = """
|
||||
query OracleDatabaseRecoverableRangesQuery($fid: String!) {
|
||||
oracleRecoverableRanges(
|
||||
input: {id: $fid, shouldIncludeDbSnapshotSummaries: false}
|
||||
) {
|
||||
data {
|
||||
beginTime
|
||||
endTime
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
oracleMissedRecoverableRanges(input: {id: $fid}) {
|
||||
data {
|
||||
beginTime
|
||||
endTime
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables = {"fid": db_id}
|
||||
response = gql.query(query, variables)
|
||||
|
||||
# Get latest endTime from recoverable ranges
|
||||
ranges = response['data']['oracleRecoverableRanges']['data']
|
||||
if ranges:
|
||||
latest_endtime = max(range_item['endTime'] for range_item in ranges)
|
||||
print(f"INFO: Latest PIT (ISO8601): {latest_endtime}")
|
||||
|
||||
# Convert to datetime and then to milliseconds since epoch
|
||||
dt = datetime.fromisoformat(latest_endtime.replace('Z', '+00:00'))
|
||||
unixtime_ms = int(dt.timestamp() * 1000)
|
||||
print(f"INFO: Latest PIT unixtime (ms): {unixtime_ms}")
|
||||
|
||||
return unixtime_ms
|
||||
else:
|
||||
raise ValueError("No recoverable ranges found for database")
|
||||
"""Get Oracle host ID by name and cluster"""
|
||||
auth = RSCAuth()
|
||||
gql = RSCGraphQL(auth)
|
||||
|
||||
query = """
|
||||
query OracleHosts($filter: [Filter!]) {
|
||||
oracleTopLevelDescendants(filter: $filter) {
|
||||
nodes {
|
||||
name
|
||||
id
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables = {
|
||||
"filter": [
|
||||
{"texts": [host_name], "field": "NAME"},
|
||||
{"texts": [cluster_id], "field": "CLUSTER_ID"}
|
||||
]
|
||||
}
|
||||
|
||||
response = gql.query(query, variables)
|
||||
hosts = response['data']['oracleTopLevelDescendants']['nodes']
|
||||
|
||||
if not hosts:
|
||||
raise ValueError(f"Host '{host_name}' not found in cluster")
|
||||
|
||||
if len(hosts) > 1:
|
||||
print(f"WARN: Multiple hosts found for '{host_name}':", file=sys.stderr)
|
||||
for host in hosts:
|
||||
print(f" - {host['name']} (ID: {host['id']})", file=sys.stderr)
|
||||
# Use the first one
|
||||
print(f"WARN: Using first match: {hosts[0]['name']}", file=sys.stderr)
|
||||
|
||||
return hosts[0]['id']
|
||||
|
||||
def mount_files_only(db_id, target_host_id, recovery_timestamp_ms, target_mount_path):
|
||||
"""Execute files-only mount operation"""
|
||||
auth = RSCAuth()
|
||||
gql = RSCGraphQL(auth)
|
||||
|
||||
variables = {
|
||||
"input": {
|
||||
"request": {
|
||||
"config": {
|
||||
"targetOracleHostOrRacId": target_host_id,
|
||||
"shouldMountFilesOnly": True,
|
||||
"recoveryPoint": {
|
||||
"timestampMs": recovery_timestamp_ms,
|
||||
"scn": None
|
||||
},
|
||||
"targetMountPath": target_mount_path,
|
||||
"shouldAllowRenameToSource": True,
|
||||
"shouldSkipDropDbInUndo": False
|
||||
},
|
||||
"id": db_id
|
||||
},
|
||||
"advancedRecoveryConfigMap": []
|
||||
}
|
||||
}
|
||||
|
||||
query = """
|
||||
mutation OracleDatabaseMountMutation($input: MountOracleDatabaseInput!) {
|
||||
mountOracleDatabase(input: $input) {
|
||||
id
|
||||
links {
|
||||
href
|
||||
rel
|
||||
__typename
|
||||
}
|
||||
__typename
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
response = gql.query(query, variables)
|
||||
return response['data']['mountOracleDatabase']['id']
|
||||
|
||||
def monitor_job_status(job_id, cluster_id):
|
||||
"""Monitor the mount job status until completion"""
|
||||
auth = RSCAuth()
|
||||
gql = RSCGraphQL(auth)
|
||||
|
||||
query = """
|
||||
query OracleDatabaseAsyncRequestDetails($input: GetOracleAsyncRequestStatusInput!) {
|
||||
oracleDatabaseAsyncRequestDetails(input: $input) {
|
||||
id
|
||||
nodeId
|
||||
status
|
||||
startTime
|
||||
endTime
|
||||
progress
|
||||
error {
|
||||
message
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
variables = {
|
||||
"input": {
|
||||
"id": job_id,
|
||||
"clusterUuid": cluster_id
|
||||
}
|
||||
}
|
||||
|
||||
while True:
|
||||
response = gql.query(query, variables)
|
||||
details = response['data']['oracleDatabaseAsyncRequestDetails']
|
||||
|
||||
status = details['status']
|
||||
progress = details.get('progress', 0)
|
||||
|
||||
print(f"INFO: Job status: {status} ({progress}%)")
|
||||
|
||||
if status == "FAILED":
|
||||
error_msg = details.get('error', {}).get('message', 'Unknown error')
|
||||
print(f"ERROR: Files-only mount FAILED: {error_msg}", file=sys.stderr)
|
||||
print(json.dumps(response, indent=2))
|
||||
sys.exit(2)
|
||||
elif status == "CANCELLED":
|
||||
print("WARN: Files-only mount CANCELLED")
|
||||
sys.exit(3)
|
||||
elif status == "SUCCEEDED":
|
||||
print("INFO: Files-only mount SUCCEEDED")
|
||||
print(json.dumps(response, indent=2))
|
||||
return
|
||||
|
||||
time.sleep(15)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Mount Oracle database files-only using Rubrik Security Cloud",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
python mount_oracle_filesonly.py --targethost target-host --mountpath /tmp/mount SHED
|
||||
python mount_oracle_filesonly.py --targethost target-host --mountpath /tmp/mount --timestamp "2025-11-25 12:00:00" SHED
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument("--targethost", required=True,
|
||||
help="Target host where the files will be mounted")
|
||||
parser.add_argument("--mountpath", required=True,
|
||||
help="Target mount path for the files")
|
||||
parser.add_argument("--timestamp",
|
||||
help="Optional timestamp for the recovery point in format 'YYYY-MM-DD HH:MM:SS'")
|
||||
parser.add_argument("srcdb",
|
||||
help="Source database name or RSC database ID")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
# Find the source database
|
||||
print(f"INFO: Finding source database: {args.srcdb}")
|
||||
db = find_database_by_name_or_id(args.srcdb)
|
||||
print(f"INFO: Found database: {db['dbUniqueName']} (ID: {db['id']})")
|
||||
print(f"INFO: Cluster: {db['cluster']['name']} (ID: {db['cluster']['id']})")
|
||||
|
||||
# Get recovery timestamp
|
||||
if args.timestamp:
|
||||
print(f"INFO: Using specified timestamp: {args.timestamp}")
|
||||
try:
|
||||
dt = datetime.strptime(args.timestamp, '%Y-%m-%d %H:%M:%S')
|
||||
recovery_timestamp_ms = int(dt.timestamp() * 1000)
|
||||
print(f"INFO: Recovery timestamp: {recovery_timestamp_ms} ms")
|
||||
except ValueError as e:
|
||||
print(f"ERROR: Invalid timestamp format. Use 'YYYY-MM-DD HH:MM:SS': {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("INFO: No timestamp specified, using latest PIT")
|
||||
recovery_timestamp_ms = get_latest_pit(db['id'])
|
||||
|
||||
# Get target host ID
|
||||
print(f"INFO: Resolving target host: {args.targethost}")
|
||||
target_host_id = get_oracle_host_id(args.targethost, db['cluster']['id'])
|
||||
print(f"INFO: Target host ID: {target_host_id}")
|
||||
|
||||
# Execute the files-only mount
|
||||
print(f"INFO: Starting files-only mount to path '{args.mountpath}'")
|
||||
job_id = mount_files_only(
|
||||
db['id'],
|
||||
target_host_id,
|
||||
recovery_timestamp_ms,
|
||||
args.mountpath
|
||||
)
|
||||
|
||||
print(f"INFO: Mount job started with ID: {job_id}")
|
||||
|
||||
# Monitor the job
|
||||
monitor_job_status(job_id, db['cluster']['id'])
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: {e}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user