Bela specifics

This commit is contained in:
2025-11-25 14:38:15 +00:00
parent 2179254c5f
commit 60fa02c181
2 changed files with 493 additions and 197 deletions

View File

@@ -83,112 +83,6 @@ def find_database_by_name_or_id(identifier):
raise ValueError("Please specify the database ID instead")
return databases[0]
def get_recoverable_ranges(db_id):
"""Get recoverable ranges for a database"""
auth = RSCAuth()
gql = RSCGraphQL(auth)
query = """
query OracleDatabaseRecoverableRangesQuery($fid: String!) {
oracleRecoverableRanges(
input: {id: $fid, shouldIncludeDbSnapshotSummaries: true}
) {
data {
beginTime
endTime
status
dbSnapshotSummaries {
databaseName
isValid
hostOrRacName
baseSnapshotSummary {
id
date
isOnDemandSnapshot
replicationLocationIds
archivalLocationIds
}
}
__typename
}
__typename
}
oracleMissedRecoverableRanges(input: {id: $fid}) {
data {
beginTime
endTime
__typename
}
__typename
}
}
"""
variables = {"fid": db_id}
response = gql.query(query, variables)
return response['data']
def get_cluster_name(cluster_id):
"""Get cluster name by ID"""
if not cluster_id:
return "Unknown"
# Cache cluster names to avoid repeated queries
if not hasattr(get_cluster_name, '_cache'):
get_cluster_name._cache = {}
if cluster_id in get_cluster_name._cache:
return get_cluster_name._cache[cluster_id]
try:
auth = RSCAuth()
gql = RSCGraphQL(auth)
query = """
query ListClusters {
allClusterConnection {
nodes {
id
name
}
}
}
"""
response = gql.query(query)
clusters = response['data']['allClusterConnection']['nodes']
# Find the cluster with matching ID
for cluster in clusters:
if cluster['id'] == cluster_id:
name = cluster['name']
get_cluster_name._cache[cluster_id] = name
return name
# If not found, return fallback
return f"Cluster-{cluster_id[:8]}"
except:
return f"Cluster-{cluster_id[:8]}"
def get_location_name(location_type, location_ids):
"""Get human-readable location names"""
if not location_ids:
return []
names = []
for loc_id in location_ids:
if location_type == "replication":
name = get_cluster_name(loc_id)
names.append(f"Replicated: {name}")
elif location_type == "archival":
# For archival, we might not have cluster info, so use a generic name
names.append(f"Archive: {loc_id[:8]}...")
else:
names.append(f"{location_type}: {loc_id[:8]}...")
return names
def format_timestamp(timestamp):
"""Format ISO timestamp to readable format"""
try:
@@ -197,8 +91,8 @@ def format_timestamp(timestamp):
except:
return timestamp
def list_snapshots_and_ranges(identifier):
"""List snapshots and recovery ranges for a database"""
def list_snapshots(identifier):
"""List all snapshots for a database using snapshotsListConnection"""
try:
# Find the database
db = find_database_by_name_or_id(identifier)
@@ -211,106 +105,140 @@ def list_snapshots_and_ranges(identifier):
print(f"Host: {host_name}")
print("-" * 80)
# Get recoverable ranges
ranges_data = get_recoverable_ranges(db['id'])
# Initialize auth and GraphQL client
auth = RSCAuth()
gql = RSCGraphQL(auth)
# Display recoverable ranges
recoverable_ranges = ranges_data.get('oracleRecoverableRanges', {}).get('data', [])
if recoverable_ranges:
print(f"\nRecoverable Ranges ({len(recoverable_ranges)} found):")
# Query to get all snapshots using snapshotOfASnappableConnection
query = """
query SnapshotsListSingleQuery($snappableId: String!, $first: Int, $sortBy: SnapshotQuerySortByField, $sortOrder: SortOrder, $includeOnlySourceSnapshots: Boolean) {
snapshotsListConnection: snapshotOfASnappableConnection(
workloadId: $snappableId
first: $first
sortBy: $sortBy
sortOrder: $sortOrder
includeOnlySourceSnapshots: $includeOnlySourceSnapshots
) {
edges {
node {
__typename
id
date
isOnDemandSnapshot
... on CdmSnapshot {
cluster {
id
name
}
slaDomain {
id
name
}
snapshotRetentionInfo {
localInfo {
name
isExpirationDateCalculated
expirationTime
}
replicationInfos {
locationId
name
}
archivalInfos {
locationId
name
}
}
}
... on PolarisSnapshot {
archivalLocationName
isReplica
isArchivalCopy
slaDomain {
name
... on ClusterSlaDomain {
cluster {
id
name
}
}
}
}
}
}
}
}
"""
# Filter for the specific database
variables = {
"snappableId": db['id'],
"first": 1000, # Get up to 1000 snapshots
"sortBy": "CREATION_TIME",
"sortOrder": "DESC",
"includeOnlySourceSnapshots": False, # Include replicated and archived snapshots
}
response = gql.query(query, variables)
snapshots = response['data']['snapshotsListConnection']['edges']
if not snapshots:
print("\nNo snapshots found.")
return
# Group snapshots by actual location status
source_snapshots = []
replica_only_snapshots = []
for edge in snapshots:
snap = edge['node']
# Check if snapshot exists locally
if snap['snapshotRetentionInfo']['localInfo'] is not None:
source_snapshots.append(snap)
else:
replica_only_snapshots.append(snap)
# Display summary
print(f"\nSnapshot Summary:")
print(f" Source cluster (jp-edge-proxmox): {len(source_snapshots)} snapshots")
print(f" Replica only (jp-edge-dr): {len(replica_only_snapshots)} snapshots")
print(f" Total: {len(snapshots)} snapshots")
print("-" * 80)
# Display source snapshots first
if source_snapshots:
print(f"\nSOURCE CLUSTER SNAPSHOTS ({len(source_snapshots)})")
print("=" * 80)
table_data = []
for range_item in recoverable_ranges:
for snap in source_snapshots:
table_data.append([
format_timestamp(range_item['beginTime']),
format_timestamp(range_item['endTime']),
range_item.get('status', 'Unknown')
snap['id'][:8] + '...',
format_timestamp(snap['date']),
'On-Demand' if snap['isOnDemandSnapshot'] else 'Policy',
snap['slaDomain']['name'] if snap['slaDomain'] else 'None',
'Local + Replica' if snap['snapshotRetentionInfo']['replicationInfos'] else '✅ Local only'
])
headers = ['Begin Time', 'End Time', 'Status']
headers = ['Snapshot ID', 'Date', 'Type', 'SLA Domain', 'Location Status']
print(tabulate(table_data, headers=headers, tablefmt='grid'))
# Display snapshots by location
print(f"\nSnapshot Distribution:")
location_snapshots = {}
for range_item in recoverable_ranges:
snapshots = range_item.get('dbSnapshotSummaries', [])
for snapshot in snapshots:
base = snapshot.get('baseSnapshotSummary', {})
# Group by replication locations
repl_locations = base.get('replicationLocationIds', [])
arch_locations = base.get('archivalLocationIds', [])
# Add to local (assuming no replication/archival IDs means local)
if not repl_locations and not arch_locations:
loc_name = "Local (Source)"
if loc_name not in location_snapshots:
location_snapshots[loc_name] = []
location_snapshots[loc_name].append({
'id': base.get('id'),
'date': base.get('date'),
'isOnDemand': base.get('isOnDemandSnapshot', False),
'host': snapshot.get('hostOrRacName', 'Unknown')
})
else:
# Handle replication locations
for repl_id in repl_locations:
cluster_name = get_cluster_name(repl_id)
loc_name = f"Replicated: {cluster_name}"
if loc_name not in location_snapshots:
location_snapshots[loc_name] = []
location_snapshots[loc_name].append({
'id': base.get('id'),
'date': base.get('date'),
'isOnDemand': base.get('isOnDemandSnapshot', False),
'host': snapshot.get('hostOrRacName', 'Unknown')
})
# Handle archival locations
for arch_id in arch_locations:
loc_name = f"Archived: {arch_id[:8]}..."
if loc_name not in location_snapshots:
location_snapshots[loc_name] = []
location_snapshots[loc_name].append({
'id': base.get('id'),
'date': base.get('date'),
'isOnDemand': base.get('isOnDemandSnapshot', False),
'host': snapshot.get('hostOrRacName', 'Unknown')
})
# Display snapshots by location
for location, snapshots in location_snapshots.items():
print(f"\n📍 {location} ({len(snapshots)} snapshots):")
table_data = []
for snap in sorted(snapshots, key=lambda x: x['date']):
table_data.append([
snap['id'][:8] + '...', # Truncate ID for readability
format_timestamp(snap['date']),
'On-Demand' if snap['isOnDemand'] else 'Policy',
snap['host']
])
headers = ['Snapshot ID', 'Date', 'Type', 'Host']
print(tabulate(table_data, headers=headers, tablefmt='grid'))
else:
print("\nNo recoverable ranges found.")
# Display missed recoverable ranges
missed_ranges = ranges_data.get('oracleMissedRecoverableRanges', {}).get('data', [])
if missed_ranges:
print(f"\nMissed Recoverable Ranges ({len(missed_ranges)} found):")
# Display replica-only snapshots
if replica_only_snapshots:
print(f"\nREPLICA-ONLY SNAPSHOTS ({len(replica_only_snapshots)})")
print("=" * 80)
table_data = []
for range_item in missed_ranges:
for snap in replica_only_snapshots:
table_data.append([
format_timestamp(range_item['beginTime']),
format_timestamp(range_item['endTime'])
snap['id'][:8] + '...',
format_timestamp(snap['date']),
'On-Demand' if snap['isOnDemandSnapshot'] else 'Policy',
snap['slaDomain']['name'] if snap['slaDomain'] else 'None',
'Expired from source'
])
headers = ['Begin Time', 'End Time']
headers = ['Snapshot ID', 'Date', 'Type', 'SLA Domain', 'Location Status']
print(tabulate(table_data, headers=headers, tablefmt='grid'))
else:
print("\nNo missed recoverable ranges found.")
except Exception as e:
print(f"Error: {e}", file=sys.stderr)
@@ -325,7 +253,7 @@ def main():
sys.exit(1)
identifier = sys.argv[1]
list_snapshots_and_ranges(identifier)
list_snapshots(identifier)
if __name__ == "__main__":
main()