first commit

This commit is contained in:
2025-09-04 08:54:46 +01:00
commit b5d849d029
7 changed files with 1292 additions and 0 deletions

123
SHED_to_SCLONE.txt Normal file
View File

@@ -0,0 +1,123 @@
###############################################################################
# #
# This is an example Advanced Recovery Configuration file for a customized #
# recovery of an Oracle database. #
# #
# Use this file by modifying the values for the applicable options. Once the #
# changes are complete, remove the preceding hash mark (#) to uncomment the #
# lines containing the changed options. Upload the modified file to the #
# Rubrik Mount or Clone menu. #
# #
# The modified values override the values from the source database when using #
# an Advanced Recovery Options file. #
# #
# Any custom directory paths which are specified in this file must exist on #
# the target before triggering the Live Mount or Clone operation. #
# #
# See the CDM user guide for more information about using these parameters. #
# #
# Using Custom Pfile: #
# Specify only these parameters in the configuration file when using a #
# custom pfile and an Advanced Recovery Configuration file: #
# - ORACLE_HOME #
# - SPFILE_LOCATION #
# - DB_CREATE_ONLINE_LOG_DEST_* (supported for Live Mount only) #
# Add other parameters to the custom pfile. #
# #
# Using Clone Database Name: #
# When a custom database name is specified for a Clone operation, Rubrik #
# recommends either of the following sets of parameters: #
# - DB_FILE_NAME_CONVERT, LOG_FILE_NAME_CONVERT, PARAMETER_VALUE_CONVERT #
# - CONTROL_FILES, DB_CREATE_FILE_DEST #
# #
# Using Live Mount or Clone with DB name change: #
# When a custom database name is specified for a Live Mount or Clone #
# operation to the source host or RAC, specify all of the following #
# parameters: #
# - CONTROL_FILES, AUDIT_FILE_DEST, DB_FILE_NAME_CONVERT #
# Additionally, specify at least one of the following parameters: #
# - DB_CREATE_FILE_DEST, DB_RECOVERY_FILE_DEST, #
# DB_CREATE_ONLINE_LOG_DEST_n, LOG_FILE_NAME_CONVERT #
# #
###############################################################################
###############################################################################
# Recovered database memory parameters:
###############################################################################
# SGA_MAX_SIZE=2G
# SGA_TARGET=2G
# PGA_AGGREGATE_TARGET=1G
# USE_LARGE_PAGES=false
###############################################################################
# Full path to the restored spfile after recovery:
###############################################################################
# SPFILE_LOCATION='/u01/app/oracle/product/dbhome/spfilerbk.ora'
###############################################################################
# Locations of the controlfiles (may be either ASM disk groups, ASM directories
# or filesystem paths):
###############################################################################
CONTROL_FILES='/u01/app/oracle/oradata/SCLONE/control01.ctl, /u01/app/oracle/fast_recovery_area/SCLONE/control02.ctl'
###############################################################################
# Default locations for Oracle-managed control files and online redo logs
# (Specify up to 5 locations):
###############################################################################
# DB_CREATE_ONLINE_LOG_DEST_1=+DG1
# DB_CREATE_ONLINE_LOG_DEST_2=+DG2
# DB_CREATE_ONLINE_LOG_DEST_3=+DG3
# DB_CREATE_ONLINE_LOG_DEST_4=/u01/log_dest_4
# DB_CREATE_ONLINE_LOG_DEST_5=/u01/log_dest_5
###############################################################################
# Locations where archived logs will be created (Specify up to 31 locations):
###############################################################################
# LOG_ARCHIVE_DEST_1='LOCATION=+DG1'
# LOG_ARCHIVE_DEST_2='LOCATION=+DG2'
# LOG_ARCHIVE_DEST_3='LOCATION=+DG3'
# LOG_ARCHIVE_DEST_4='LOCATION=+DG4'
# LOG_ARCHIVE_DEST_5='LOCATiON=+DG5'
###############################################################################
# Configuration of the Fast Recovery Area (FRA)
# (If db_recovery_file_dest is specified, db_recovery_file_dest_size must also
# be specified):
###############################################################################
# DB_RECOVERY_FILE_DEST=+FRA
# DB_RECOVERY_FILE_DEST_SIZE=3G
###############################################################################
# Default location of Oracle-managed data files:
###############################################################################
DB_CREATE_FILE_DEST=/u01/app/oracle/oradata/SCLONE/
###############################################################################
# Location of the audit records:
###############################################################################
AUDIT_FILE_DEST='/u01/app/oracle/admin/SCLONE/adump'
###############################################################################
# ORACLE_HOME path on the target Oracle host or RAC:
###############################################################################
# ORACLE_HOME='/u02/app/oracle/product/12.2.0/db_1'
###############################################################################
# NOTE: These parameters are supported when Live Mounting or Cloning an
# Oracle database.
#
# Each of these parameters may be used to alter the file paths
# of each of the database files:
# - PARAMETER_VALUE_CONVERT (substitute values in all parameters)
# - DB_FILE_NAME_CONVERT (alters the file paths for data files)
# - LOG_FILE_NAME_CONVERT (alters the file paths for online redo log files)
################################################################################
# PARAMETER_VALUE_CONVERT='STESBC','RBKTEST'
DB_FILE_NAME_CONVERT='SHED','SCLONE'

247
oracle_funcs.sh Executable file
View File

@@ -0,0 +1,247 @@
#!/bin/bash
#
#--------------------------------------------------------------------------------------------------------
# Oracle shell script support functions
# v0.2 - James Pattinson - August 2021
# v0.3 - U701053 - 30.03.2022 - if database not found, try to search with the ENDPOINT datagurad
# v0.4 - James Pattinson 25/01/23 - Adding support for Service Accounts
#--------------------------------------------------------------------------------------------------------
MYDIR="$(dirname "$(realpath "$0")")"
source $MYDIR/rbk_api.conf
# -- Mobi config ---------
#RBK_ENV=$OUTI/shell/rubrik/conf/rbk_env
# -- End Mobi config ----
# -- Mobi config -----------
#if [ -f "${RBK_ENV}" ] ; then
# source ${RBK_ENV}
#else
# echo "The ${RBK_ENV} file is not found ...."
# exit_with_error
#fi
# -- End Mobi config -------
nowait=0
if [[ "$OSTYPE" == "darwin"* ]]; then
DATE=gdate
else
DATE=date
fi
exit_with_error () {
rm -f /tmp/rbkresponse.$$
echo Aborting Script!
exit 1
}
# for canceled status
exit_with_cancel () {
rm -f /tmp/rbkresponse.$$
echo Canceling Script!
exit 1
}
# for undoing status
exit_with_undoing () {
rm -f /tmp/rbkresponse.$$
echo undoing Script!
exit 1
}
check_http_error () {
# All good responses start with a 2
if [ ${http_response:0:1} != "2" ]; then
echo FATAL: HTTP error from API call: $http_response. The server responded with:
cat /tmp/rbkresponse.$$ ; echo ; exit_with_error
fi
}
# Given RBK_SID return $db_id of matching database
find_database () {
# First get IDs of all the mounted DBs for this SID
ENDPOINT="https://$RUBRIK_IP/api/internal/oracle/db/mount"
rest_api_get
cat /tmp/rbkresponse.$$ | jq -r --arg SID "$RBK_SID" '.data[] | select(.mountedDatabaseId!=null and .mountedDatabaseName==$SID) | .mountedDatabaseId' > /tmp/mountedDBs.$$
# Now get a list of Oracle DBs
ENDPOINT="https://$RUBRIK_IP/api/v1/oracle/db?name=$RBK_SID"
rest_api_get
# If no dtaabse is found, try with dataguard
if [ `cat /tmp/rbkresponse.$$ | grep "id\":" | wc -l` -eq 0 ]; then
echo " DB not found, try with dataguard..."
ENDPOINT="https://$RUBRIK_IP/api/v1/oracle/db?is_data_guard_group=true&is_relic=false&name=$RBK_SID"
rest_api_get
fi
#echo "================================="
#cat /tmp/rbkresponse.$$ | jq -r '.data[]' | tee /tmp/titi
#echo "================================="
#echo "cat /tmp/rbkresponse.$$ | jq -r '.data[]'"
# If no host is specified then just look for the DB with the right SID
if [ -z $RBK_HOST ]; then
# get list of DB IDs in scope (sid matches and not a relic)
myDBs=$(cat /tmp/rbkresponse.$$ | jq -r --arg SID "$RBK_SID" '.data[] | select(.name==$SID and .isRelic==false) | .id' ; cat /tmp/rbkresponse.$$ | jq -r --arg SID "$RBK_SID" '.data[] | select(.name==$SID and .isRelic==false) | .dataGuardGroupId' | sort | uniq)
for db in $myDBs; do
id=$(echo $db | cut -d: -f 4)
if grep -q $id /tmp/mountedDBs.$$; then
continue
else
break
fi
done
# Get the details for the specific DB ID
read name db_id sla_id dg_type dg_id RBK_HOST num_instances grp_name < <(echo $(cat /tmp/rbkresponse.$$ | jq -r --arg ID "OracleDatabase:::$id" '.data[] | select(.id==$ID) | .name, .id, .effectiveSlaDomainId, .dataGuardType, .dataGuardGroupId, .instances[0].hostName, .numInstances, .dataGuardGroupName'))
# Host was specified
else
read name db_id sla_id dg_type dg_id RBK_HOST num_instances grp_name < <(echo $(cat /tmp/rbkresponse.$$ | jq -r --arg SID "$RBK_SID" --arg HOST "$RBK_HOST" '.data[] | select(.sid==$SID and .infraPath[0].name==$HOST and .isRelic==false) | .name, .id, .effectiveSlaDomainId, .dataGuardType, .dataGuardGroupId, .instances[0].hostName, .numInstances, .dataGuardGroupName'))
fi
if [ "$dg_type" == "DataGuardMember" ]; then
db_id=$dg_id
# Now find SLA of the DG GROUP not just the DB (which will be unprotected)
ENDPOINT="https://$RUBRIK_IP/api/v1/oracle/db/$db_id"
rest_api_get
sla_id=$(cat /tmp/rbkresponse.$$ | jq -r '.effectiveSlaDomainId')
fi
if [ -z "$db_id" ]; then
echo FATAL: No DB found with SID $RBK_SID on host $RBK_HOST
if [ "${SCRIPT}" = "check_recoverable_range.sh" ] ; then
export ret=11
else
exit_with_error
fi
else
echo " db_id is: $db_id"
fi
}
check_get_token () {
if [ -z "${AUTH_TOKEN}" ]; then
if [[ "${ID}" =~ ^client ]]; then
# Looks like an RSC service account
id_string=$(echo $ID | cut -d\| -f 2)
else
# Not an RSC service account
id_string=$(echo $ID | cut -d: -f 4)
fi
# If there is a cached credential file, use it
if [ -f ~/.rbksession.$id_string ]; then
read expiration token < <(echo $(cat ~/.rbksession.$id_string))
# If token expires within 30 min, get a new one
if [ $($DATE +%s -d $expiration) -lt $(( $($DATE +%s) + 1800 )) ]; then
get_token
else
AUTH_TOKEN=$token
fi
else
get_token
fi
fi
}
get_token () {
MYENDPOINT="https://$RUBRIK_IP/api/v1/service_account/session"
MYPAYLOAD="{\"serviceAccountId\":\"$ID\",\"secret\":\"$SECRET\"}"
http_response=$(curl -s -k -o /tmp/rbkresponse.$$ -w "%{http_code}" -X POST $MYENDPOINT -H "accept: application/json" -H "Content-Type: application/json" -d $MYPAYLOAD)
check_http_error
AUTH_TOKEN=$(cat /tmp/rbkresponse.$$ | jq -r '.token')
SESSION=$(cat /tmp/rbkresponse.$$ | jq -r '.sessionId')
EXPIRATION=$(cat /tmp/rbkresponse.$$ | jq -r '.expirationTime')
echo "$EXPIRATION $AUTH_TOKEN" > ~/.rbksession.$id_string
}
# HTTP GET: Given $ENDPOINT write output to file
rest_api_get () {
check_get_token
http_response=$(curl -s -k -o /tmp/rbkresponse.$$ -w "%{http_code}" -X GET $ENDPOINT -H "accept: application/json" -H "Authorization: Bearer $AUTH_TOKEN")
check_http_error
}
# HTTP POST: Given $ENDPOINT and $PAYLOAD write output to file
rest_api_post () {
check_get_token
http_response=$(curl -s -k -o /tmp/rbkresponse.$$ -w "%{http_code}" -X POST $ENDPOINT -H "accept: application/json" -H "Authorization: Bearer $AUTH_TOKEN" -H "Content-Type: application/json" -d $PAYLOAD)
check_http_error
}
rest_api_post_empty () {
check_get_token
http_response=$(curl -s -k -o /tmp/rbkresponse.$$ -w "%{http_code}" -X POST $ENDPOINT -H "accept: application/json" -H "Authorization: Bearer $AUTH_TOKEN" -H "Content-Type: application/json")
check_http_error
}
rest_api_patch () {
check_get_token
http_response=$(curl -s -k -o /tmp/rbkresponse.$$ -w "%{http_code}" -X PATCH $ENDPOINT -H "accept: application/json" -H "Authorization: Bearer $AUTH_TOKEN" -H "Content-Type: application/json" -d $PAYLOAD)
check_http_error
}
rest_api_delete () {
check_get_token
http_response=$(curl -s -k -o /tmp/rbkresponse.$$ -w "%{http_code}" -X DELETE $ENDPOINT -H "accept: application/json" -H "Authorization: Bearer $AUTH_TOKEN")
check_http_error
}
get_cluster_uuid () {
ENDPOINT="https://$RUBRIK_IP/api/v1/cluster/me"
rest_api_get
cluster_uuid=$(cat /tmp/rbkresponse.$$ | jq -r .id)
}
# Given an ENDPOINT of an async job, monitor it
check_status () {
if [ $nowait -ne 1 ]; then
# Check the status in a loop
while true; do
rest_api_get
status=$(cat /tmp/rbkresponse.$$ | jq -r '.status')
if [ $status != "SUCCEEDED" ] && [ $status != "FAILED" ] && [ $status != "CANCELED" ] && [ $status != "UNDOING" ]; then
echo Status is $status, checking in 30 seconds
else
if [ $status != "SUCCEEDED" ]; then
if [ $status = "CANCELED" ] ; then
echo OPERATION FAILED WITH STATUS $status
exit_with_cancel
elif [ $status = "UNDOING" ] ; then
echo OPERATION FAILED WITH STATUS $status
exit_with_undoing
else
echo OPERATION FAILED WITH STATUS $status
exit_with_error
fi
else
echo OPERATION SUCCEEDED
exit 0
fi
fi
sleep 30
done
fi
}
cleanup () {
rm -f /tmp/mountedDBs.$$
rm -f /tmp/rbkresponse.$$
rm -f /tmp/payload.$$
}

11
rbk_api.conf Executable file
View File

@@ -0,0 +1,11 @@
# IP Address (or DNS name) of Rubrik CDM
RUBRIK_IP=192.168.10.76
#AUTH_TOKEN=eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiI0NjlmMGUzNS00OWQxLTQ2NTEtOGY1ZS00MjA5OTU0MTU3NmUiLCJpc01mYVJlbWVtYmVyVG9rZW4iOmZhbHNlLCJpc3MiOiJmZWU2MDYzNy05Y2Q4LTRjMmItOTM4NS1hNjljMjViNTA0YmUiLCJpc1JlZHVjZWRTY29wZSI6ZmFsc2UsImlhdCI6MTY3NDYzNjAzNiwianRpIjoiZGQ5MjhmYzQtODM3Mi00ZWE4LWIzMDQtNzg4MjY4MDNkYjc3In0.2-ZQQvm0TmmujVQxJEEB8w8xjhowx5GjBfdBeDkiPVw
ID="client|d332d008-a86b-4943-b5c4-b3d9720b1960"
SECRET=HsVg6hsgJwqkVsXarQEFIxjFaMeUOADGuSpYeLC2MI0TQEigK4HGvxppbLvUV_X9
RSC_HOST=rubrik-rbkpso20.my.rubrik.com
RSC_ID="client|d332d008-a86b-4943-b5c4-b3d9720b1960"
RSC_SECRET=HsVg6hsgJwqkVsXarQEFIxjFaMeUOADGuSpYeLC2MI0TQEigK4HGvxppbLvUV_X9

400
rsc_clone.sh Executable file
View File

@@ -0,0 +1,400 @@
#!/bin/bash
#
# Example RSC API call script
# v0.1 - James Pattinson - August 2025
#
# Perfoms a database clone operation
#
# usage: rsc_clone.sh -n <newname> -o <optionsfile> -h <targethost> [-s sourcehost] [-t "YYYY-MM-DD HH:MM:SS"] <srcdb>
#
# Options:
# -n <newname> : db_name / SID of the new cloned database
# -o <optionsfile> : Path to the options file containing advanced cloning options
# -h <targethost> : Target host where the cloned database will be created
# -s <sourcehost> : Source host where the original database is located (optional, use when there is ambiguity)
# -t "YYYY-MM-DD HH:MM:SS" : Optional timestamp for the recovery point, defaults to latest PIT
# <srcdb> : Source database name or RSC dbid (if known, can be used directly)
#
# Example options file content:
# CONTROL_FILES='/u01/app/oracle/oradata/NEWNAME/control01.ctl, /u01/app/oracle/fast_recovery_area/NEWNAME/control02.ctl'
# DB_FILE_NAME_CONVERT='OLDNAME','NEWNAME'
# DB_CREATE_FILE_DEST=/u01/app/oracle/oradata/NEWNAME/
# AUDIT_FILE_DEST='/u01/app/oracle/admin/NEWNAME/adump'
usage() { echo "Usage: $0 -n <newname> -o <optionsfile> -h <targethost> [-s sourcehost] [-t "YYYY-MM-DD HH:MM:SS"] <srcdb>" 1>&2; exit 1; }
MYDIR="$(dirname "$(realpath "$0")")"
source $MYDIR/oracle_funcs.sh
source $MYDIR/rsc_ops.sh
while getopts "n:o:t:h:s:" o; do
case "${o}" in
n)
newName=${OPTARG}
;;
t)
datestring=${OPTARG}
;;
o)
optionsFile=${OPTARG}
;;
h)
targetHost=${OPTARG}
;;
s)
node_name=${OPTARG}
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
# Check if required options are set
if [[ -z "$1" || -z "$newName" || -z "$targetHost" || -z "$optionsFile" ]]; then
usage
fi
# Check if optionsFile exists
if [[ ! -f "$optionsFile" ]]; then
echo "ERROR: Options file '$optionsFile' does not exist."
exit_with_error
fi
template_to_json() {
local input_file="${1}"
local first=1
echo "["
# Use tr to remove CR characters, then process lines
while IFS= read -r line; do
# Remove any CR characters and then check for empty lines
line=$(echo "$line" | tr -d '\r')
# Ignore empty lines and lines starting with #
[[ -z "$line" || "$line" =~ ^[[:space:]]*# ]] && continue
key="${line%%=*}"
value="${line#*=}"
# Trim whitespace from key only
key="$(echo -n "$key" | xargs)"
# Remove leading/trailing whitespace but preserve quotes
value="$(echo -n "$value" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//')"
# Handle value escaping
if [[ $value =~ ^\'.*\'$ ]]; then
# Keep the single quotes in the value
value_escaped=$(printf '%s' "$value" | sed 's/"/\\"/g')
else
# If no surrounding quotes, then trim and remove any single quotes
value="$(echo -n "$value" | xargs)"
value_escaped=$(printf '%s' "$value" | sed "s/'//g" | sed 's/"/\\"/g')
fi
if [[ $first -eq 0 ]]; then
echo ","
fi
echo -n " { \"key\": \"${key}\", \"value\": \"${value_escaped}\" }"
first=0
done < <(tr -d '\r' < "$input_file")
echo
echo "]"
}
get_latest_pit() {
gql_getRR='query OracleDatabaseRecoverableRangesQuery($fid: String!) {
oracleRecoverableRanges(
input: {id: $fid, shouldIncludeDbSnapshotSummaries: false}
) {
data {
beginTime
endTime
__typename
}
__typename
}
oracleMissedRecoverableRanges(input: {id: $fid}) {
data {
beginTime
endTime
__typename
}
__typename
}
}'
variables="{ \"fid\": \"$dbid\" }"
gqlQuery="$(echo $gql_getRR)"
gqlVars="$(echo $variables)"
rsc_gql_query
# Get latest endTime
latest_endtime=$(cat /tmp/rbkresponse.$$ | jq -r '.data.oracleRecoverableRanges.data[] | .endTime' | sort -r | head -n 1)
echo "Latest PIT (ISO8601): $latest_endtime"
# Convert to unixtime in milliseconds
latest_unixtime_ms=$(date -d "$latest_endtime" +%s 2>/dev/null)
if [[ -z "$latest_unixtime_ms" ]]; then
# Try with gdate (macOS)
latest_unixtime_ms=$(gdate -d "$latest_endtime" +%s 2>/dev/null)
fi
if [[ -z "$latest_unixtime_ms" ]]; then
echo "ERROR: Unable to convert $latest_endtime to unixtime"
exit 5
fi
latest_unixtime_ms=$((latest_unixtime_ms * 1000))
echo "Latest PIT unixtime (ms): $latest_unixtime_ms"
export latest_unixtime_ms
}
get_oracle_host_id() {
gql_list_targets='query ExampleQuery($filter: [Filter!]) {
oracleTopLevelDescendants(filter: $filter) {
nodes {
name
id
}
}
}'
variables="{
\"filter\": [
{
\"texts\": [\"$1\"],
\"field\": \"NAME\"
},
{
\"texts\": [\"$cdmId\"],
\"field\": \"CLUSTER_ID\"
}
]
}"
gqlQuery="$(echo $gql_list_targets)"
gqlVars="$(echo $variables)"
rsc_gql_query
# Get all matching host IDs (portable, no mapfile)
host_ids=$(cat /tmp/rbkresponse.$$ | jq -r '.data.oracleTopLevelDescendants.nodes[] | .id')
host_count=$(echo "$host_ids" | grep -c .)
if [[ $host_count -ne 1 ]]; then
echo "ERROR: Multiple hosts found for '$1':"
cat /tmp/rbkresponse.$$ | jq -r '.data.oracleTopLevelDescendants.nodes[] | "\(.name) \(.id)"'
exit_with_error
fi
# Set the first match (or empty if none)
targetHostId=$(echo "$host_ids" | head -n 1)
}
# If $1 looks like a dbid (contains hyphens), use it directly and skip DB lookup
if [[ "$1" == *-* ]]; then
dbid="$1"
echo "INFO: Using provided dbid: $dbid"
gql_lookupCdmId='query OracleDatabase($fid: UUID!) {
oracleDatabase(fid: $fid) {
cluster {
id
}
}
}'
variables="{ \"fid\": \"$dbid\" }"
gqlQuery="$(echo $gql_lookupCdmId)"
gqlVars="$(echo $variables)"
rsc_gql_query
cdmId=$(cat /tmp/rbkresponse.$$ | jq -r '.data.oracleDatabase.cluster.id')
if [[ -z "$cdmId" ]]; then
echo "ERROR: Could not find CDM ID for dbid '$dbid'"
exit 1
fi
echo "CDM ID is $cdmId"
else
gql_DBListQuery='query OracleDatabases($filter: [Filter!]) {
oracleDatabases(filter: $filter) {
nodes {
dbUniqueName
id
cluster {
id
}
logicalPath {
fid
name
objectType
}
}
}
}'
variables="{
\"filter\": [
{
\"texts\": [\"$1\"],
\"field\": \"NAME_EXACT_MATCH\"
},
{
\"texts\": [\"false\"],
\"field\": \"IS_RELIC\"
},
{
\"texts\": [\"false\"],
\"field\": \"IS_REPLICATED\"
}
]
}"
gqlQuery="$(echo $gql_DBListQuery)"
gqlVars="$(echo $variables)"
rsc_gql_query
dbid=$(cat /tmp/rbkresponse.$$ | jq -r --arg NODE "$node_name" '.data.oracleDatabases.nodes[] | select(.logicalPath[]?.name == $NODE) | .id')
cdmId=$(cat /tmp/rbkresponse.$$ | jq -r --arg NODE "$node_name" '.data.oracleDatabases.nodes[] | select(.logicalPath[]?.name == $NODE) | .cluster.id')
dbid_count=$(echo "$dbid" | grep -c .)
if [[ "$dbid_count" -ne 1 || -z "$dbid" ]]; then
echo "ERROR: Expected exactly one database running on node '$node_name', found $dbid_count:"
cat /tmp/rbkresponse.$$ | jq -r '.data.oracleDatabases.nodes[] | "\(.dbUniqueName) \(.id)"'
cleanup
exit 4
fi
echo "DEBUG: DB ID is $dbid"
fi
# Only run UTC conversion if -t was used
if [[ -n "${datestring:-}" ]]; then
utctime=$($DATE -d"$datestring" +"%Y-%m-%d %H:%M:%S")
if [ $? -ne 0 ]; then
echo ERROR: Unable to convert supplied timestamp to UTC time
exit_with_error
fi
unixtime=$($DATE -d"$datestring" +%s)
unixtime_ms=$((unixtime * 1000))
echo INFO: Requested time is $datestring which is $utctime in UTC, unixtime is $unixtime
else
echo INFO: No time specified, using latest PIT
get_latest_pit
unixtime_ms=$latest_unixtime_ms
fi
# Call the function and capture the output
get_oracle_host_id "$targetHost"
if [[ -z "$targetHostId" ]]; then
echo "ERROR: Could not resolve target host ID for '$targetHost'"
exit_with_error
fi
echo Target Host ID is $targetHostId
cloningOptions=$(template_to_json $optionsFile)
variables="
{
\"input\": {
\"request\": {
\"id\": \"$dbid\",
\"config\": {
\"targetOracleHostOrRacId\": \"$targetHostId\",
\"shouldRestoreFilesOnly\": false,
\"recoveryPoint\": {
\"timestampMs\": $unixtime_ms
},
\"cloneDbName\": \"$newName\",
\"shouldAllowRenameToSource\": true,
\"shouldSkipDropDbInUndo\": false
}
},
\"advancedRecoveryConfigMap\": $cloningOptions
}
}"
gqlClone='mutation OracleDatabaseExportMutation($input: ExportOracleDatabaseInput!) {
exportOracleDatabase(input: $input) {
id
links {
href
rel
__typename
}
__typename
}
}'
gqlQuery="$(echo $gqlClone)"
gqlVars="$(echo $variables)"
rsc_gql_query
cat /tmp/rbkresponse.$$ | jq
# Save the id from the response
job_id=$(cat /tmp/rbkresponse.$$ | jq -r '.data.exportOracleDatabase.id')
echo "DEBUG: Job id is $job_id"
gqlCheckStatus='query OracleDatabaseAsyncRequestDetails($input: GetOracleAsyncRequestStatusInput!) {
oracleDatabaseAsyncRequestDetails(input: $input) {
id
nodeId
status
startTime
endTime
progress
error {
message
}
}
}'
variables="{
\"input\": {
\"id\": \"$job_id\",
\"clusterUuid\": \"$cdmId\"
}
}"
gqlQuery="$(echo $gqlCheckStatus)"
gqlVars="$(echo $variables)"
while true; do
rsc_gql_query
status=$(cat /tmp/rbkresponse.$$ | jq -r '.data.oracleDatabaseAsyncRequestDetails.status')
progress=$(cat /tmp/rbkresponse.$$ | jq -r '.data.oracleDatabaseAsyncRequestDetails.progress')
echo "Job status: $status $progress percent"
if [[ "$status" == "FAILED" ]]; then
echo "Database clone FAILED"
cat /tmp/rbkresponse.$$ | jq
cleanup
exit 2
elif [[ "$status" == "CANCELLED" ]]; then
echo "Database clone CANCELLED"
exit 3
elif [[ "$status" == "SUCCEEDED" ]]; then
echo "Database clone SUCCEEDED"
cat /tmp/rbkresponse.$$ | jq
cleanup
exit 0
fi
sleep 15
done
echo "Database clone SUCCEEDED"
cat /tmp/rbkresponse.$$ | jq
cleanup
exit 0
fi
sleep 15
done

54
rsc_list_oracle_slas.sh Executable file
View File

@@ -0,0 +1,54 @@
#!/bin/bash
#
# Example RSC API call script
# v0.1 - James Pattinson - June 2024
#
# Lists Global SLAs from RSC which support Oracle
#
# usage: rsc_list_slas.sh [filter]
usage() { echo "Usage: $0 [filter]" 1>&2; exit 1; }
MYDIR="$(dirname "$(realpath "$0")")"
# source $MYDIR/rbk_api.conf
source $MYDIR/oracle_funcs.sh
source $MYDIR/rsc_ops.sh
gql_SLAListQuery='query SLAListQuery($after: String, $first: Int, $filter: [GlobalSlaFilterInput!], $sortBy: SlaQuerySortByField, $sortOrder: SortOrder) {
slaDomains(
after: $after
first: $first
filter: $filter
sortBy: $sortBy
sortOrder: $sortOrder
) {
edges {
node {
name
... on GlobalSlaReply {
id
objectTypes
__typename
}
}
}
}
}'
filter=$1
variables="{
\"shouldShowPausedClusters\":true,
\"filter\":[{\"field\":\"NAME\",\"text\":\"$filter\"},{\"field\":\"OBJECT_TYPE\",\"objectTypeList\":[\"ORACLE_OBJECT_TYPE\"]}],
\"sortBy\":\"NAME\",
\"sortOrder\":\"ASC\",
\"first\":50
}"
gqlQuery="$(echo $gql_SLAListQuery)"
gqlVars="$(echo $variables)"
rsc_gql_query
cat /tmp/rbkresponse.$$ | jq -r '.data.slaDomains.edges[] | .node.name'
cleanup

134
rsc_log_backup.sh Executable file
View File

@@ -0,0 +1,134 @@
#!/bin/bash
#
# Example RSC API call script
# v0.1 - James Pattinson - June 2024
#
# Performs a log backup for an Oracle database
#
# usage: rsc_log_backup.sh [filter]
usage() { echo "Usage: $0 [filter]" 1>&2; exit 1; }
if [[ -z "$1" ]]; then
usage
fi
MYDIR="$(dirname "$(realpath "$0")")"
# source $MYDIR/rbk_api.conf
source $MYDIR/oracle_funcs.sh
source $MYDIR/rsc_ops.sh
gql_DBListQuery='query OracleDatabases($filter: [Filter!]) {
oracleDatabases(filter: $filter) {
nodes {
dbUniqueName
id
cluster {
id
}
}
}
}'
variables="{
\"filter\": [
{
\"texts\": [\"$1\"],
\"field\": \"NAME_EXACT_MATCH\"
},
{
\"texts\": [\"false\"],
\"field\": \"IS_RELIC\"
},
{
\"texts\": [\"false\"],
\"field\": \"IS_REPLICATED\"
}
]
}"
gqlQuery="$(echo $gql_DBListQuery)"
gqlVars="$(echo $variables)"
rsc_gql_query
dbid=$(cat /tmp/rbkresponse.$$ | jq -r '.data.oracleDatabases.nodes[] | .id')
cdmId=$(cat /tmp/rbkresponse.$$ | jq -r '.data.oracleDatabases.nodes[] | .cluster.id')
# Check for multiple dbids
dbid_count=$(echo "$dbid" | wc -l)
if [[ "$dbid_count" -ne 1 || -z "$dbid" ]]; then
echo "ERROR: Expected exactly one database match! found:"
cat /tmp/rbkresponse.$$ | jq -r '.data.oracleDatabases.nodes[] | .dbUniqueName'
cleanup
exit 4
fi
echo "DEBUG: DB ID is $dbid"
variables="{
\"input\": {
\"id\": \"$dbid\"
}
}"
gqlLogBackup='mutation TakeLogBackupOracleMutation($input: TakeOnDemandOracleLogSnapshotInput!) {
takeOnDemandOracleLogSnapshot(input: $input) {
id
__typename
}
}'
gqlQuery="$(echo $gqlLogBackup)"
gqlVars="$(echo $variables)"
rsc_gql_query
# Save the id from the response
log_backup_id=$(cat /tmp/rbkresponse.$$ | jq -r '.data.takeOnDemandOracleLogSnapshot.id')
echo "DEBUG: Job id is $log_backup_id"
gqlCheckStatus='query OracleDatabaseAsyncRequestDetails($input: GetOracleAsyncRequestStatusInput!) {
oracleDatabaseAsyncRequestDetails(input: $input) {
id
nodeId
status
startTime
endTime
progress
error {
message
}
}
}'
variables="{
\"input\": {
\"id\": \"$log_backup_id\",
\"clusterUuid\": \"$cdmId\"
}
}"
gqlQuery="$(echo $gqlCheckStatus)"
gqlVars="$(echo $variables)"
while true; do
rsc_gql_query
status=$(cat /tmp/rbkresponse.$$ | jq -r '.data.oracleDatabaseAsyncRequestDetails.status')
progress=$(cat /tmp/rbkresponse.$$ | jq -r '.data.oracleDatabaseAsyncRequestDetails.progress')
echo "Job status: $status $progress percent"
if [[ "$status" == "FAILED" ]]; then
echo "Log backup FAILED"
cat /tmp/rbkresponse.$$ | jq
cleanup
exit 2
elif [[ "$status" == "CANCELLED" ]]; then
echo "Log backup CANCELLED"
exit 3
elif [[ "$status" == "SUCCEEDED" ]]; then
echo "Log backup SUCCEEDED"
cat /tmp/rbkresponse.$$ | jq
cleanup
exit 0
fi
sleep 15
done

323
rsc_ops.sh Executable file
View File

@@ -0,0 +1,323 @@
#!/bin/bash
#
#--------------------------------------------------------------------------------------------------------
# RSC shell script support functions
# v0.2 - James Pattinson - August 2021
# v0.3 - U701053 - 30.03.2022 - if database not found, try to search with the ENDPOINT datagurad
# v0.4 - James Pattinson 25/01/23 - Adding support for Service Accounts
#--------------------------------------------------------------------------------------------------------
MYDIR="$(dirname "$(realpath "$0")")"
#source $MYDIR/rbk_api.conf
check_get_rsc_token () {
if [ -z "${RSC_AUTH_TOKEN}" ]; then
if [[ "${ID}" =~ ^client ]]; then
# Looks like an RSC service account
id_string=$(echo $ID | cut -d\| -f 2)
else
# Not an RSC service account
exit_with_error
fi
# If there is a cached credential file, use it
if [ -f ~/.rbkRscsession.$id_string ]; then
read expiration token < <(echo $(cat ~/.rbkRscsession.$id_string))
# If token expires within 30 min, get a new one
if [ $expiration -lt $(( $($DATE +%s) + 1800 )) ]; then
get_rsc_token
else
RSC_AUTH_TOKEN=$token
fi
else
get_rsc_token
fi
fi
}
get_rsc_token () {
MYENDPOINT="https://${RSC_HOST}/api/client_token"
MYPAYLOAD="{\"client_id\":\"$ID\",\"client_secret\":\"$SECRET\"}"
http_response=$(curl -s -k -o /tmp/rbkresponse.$$ -w "%{http_code}" -X POST $MYENDPOINT -H "accept: application/json" -H "Content-Type: application/json" -d $MYPAYLOAD)
check_http_error
RSC_AUTH_TOKEN=$(cat /tmp/rbkresponse.$$ | jq -r '.access_token')
SECONDS=$(cat /tmp/rbkresponse.$$ | jq -r '.expires_in')
EXPIRATION=$($DATE +%s -d "+${SECONDS} seconds")
#cat /tmp/rbkresponse.$$ | jq
echo "$EXPIRATION $RSC_AUTH_TOKEN" > ~/.rbkRscsession.$id_string
}
rsc_api_get () {
check_get_rsc_token
http_response=$(curl -s -k -o /tmp/rbkresponse.$$ -w "%{http_code}" -X GET $ENDPOINT -H "accept: application/json" -H "Authorization: Bearer $RSC_AUTH_TOKEN")
check_http_error
}
rsc_api_post () {
check_get_rsc_token
http_response=$(curl -s -k -o /tmp/rbkresponse.$$ -w "%{http_code}" -X POST $ENDPOINT -H "accept: application/json" -H "Authorization: Bearer $RSC_AUTH_TOKEN" -H "Content-Type: application/json" -d $PAYLOAD)
check_http_error
}
rsc_gql_query_old () {
check_get_rsc_token
ENDPOINT="https://${RSC_HOST}/api/graphql"
#curl -s -o /tmp/rbkresponse.$$ -w "%{http_code}" -X POST $ENDPOINT \
curl -s -o /tmp/rbkresponse.$$ -X POST $ENDPOINT \
-H "Authorization: Bearer ${RSC_AUTH_TOKEN}" \
-H 'Content-Type: application/json' \
-d @- <<EOF
{"query": "$gqlQuery", "variables": $gqlVars}
EOF
}
rsc_gql_query () {
check_get_rsc_token
ENDPOINT="https://${RSC_HOST}/api/graphql"
cat - <<EOF > /tmp/payload.$$
{"query": "$gqlQuery", "variables": $gqlVars}
EOF
http_response=$(curl -s -o /tmp/rbkresponse.$$ -w "%{http_code}" -X POST $ENDPOINT \
-H "Authorization: Bearer ${RSC_AUTH_TOKEN}" \
-H 'Content-Type: application/json' \
-d @/tmp/payload.$$)
#cat /tmp/payload.$$ | jq -r
error=$(cat /tmp/rbkresponse.$$ | jq -r '.errors // empty')
if [ "$error" ]; then
echo "ERROR: The last GraphQL API call returned an error"
echo
echo "PAYLOAD:"
cat /tmp/payload.$$ | jq -r
echo "RESPONSE:"
cat /tmp/rbkresponse.$$ | jq -r '.errors'
exit_with_error
fi
check_http_error
}
rsc_find_database () {
# if cluster_uuid is not set, get it
# gql query to find DB name, return the best one (based on CDM UUID etc)
#
if [ -z "${cluster_uuid}" ]; then
#echo cluster UUID not, set, getting it
get_cluster_uuid
#echo Cluster UUID is $cluster_uuid
else
echo Cluster UUID was already $cluster_uuid
fi
variables="{
\"filter\":[{\"field\":\"REGEX\",\"texts\":[\"$RBK_SID\"]},{\"field\":\"IS_GHOST\",\"texts\":[\"false\"]},{\"field\":\"IS_ACTIVE\",\"texts\":[\"true\"]},{\"field\":\"CLUSTER_ID\",\"texts\":[\"$cluster_uuid\"]}],
\"sortBy\":\"NAME\",
\"sortOrder\":\"ASC\",
\"first\":500
}"
gql_GlobalSearchOracle='query GlobalSearchObjectQuery($first: Int!, $filter: [Filter!]!, $sortBy: HierarchySortByField, $sortOrder: SortOrder, $after: String) {
globalSearchResults(
first: $first
filter: $filter
sortBy: $sortBy
sortOrder: $sortOrder
after: $after
) {
edges {
cursor
node {
id
name
objectType
logicalPath {
fid
name
objectType
__typename
}
physicalPath {
fid
name
objectType
__typename
}
... on HierarchyObject {
...EffectiveSlaColumnFragment
__typename
}
... on OracleDatabase {
cluster {
...ClusterFragment
__typename
}
primaryClusterLocation {
id
__typename
}
isRelic
__typename
}
... on OracleRac {
cluster {
...ClusterFragment
__typename
}
primaryClusterLocation {
id
__typename
}
__typename
}
... on OracleDataGuardGroup {
cluster {
...ClusterFragment
__typename
}
primaryClusterLocation {
id
__typename
}
isRelic
__typename
}
__typename
}
__typename
}
pageInfo {
endCursor
startCursor
hasNextPage
hasPreviousPage
__typename
}
__typename
}
}
fragment ClusterFragment on Cluster {
id
name
__typename
}
fragment EffectiveSlaColumnFragment on HierarchyObject {
id
effectiveSlaDomain {
...EffectiveSlaDomainFragment
... on GlobalSlaReply {
description
__typename
}
__typename
}
... on CdmHierarchyObject {
pendingSla {
...SLADomainFragment
__typename
}
__typename
}
__typename
}
fragment EffectiveSlaDomainFragment on SlaDomain {
id
name
... on GlobalSlaReply {
isRetentionLockedSla
retentionLockMode
__typename
}
... on ClusterSlaDomain {
fid
cluster {
id
name
__typename
}
isRetentionLockedSla
retentionLockMode
__typename
}
__typename
}
fragment SLADomainFragment on SlaDomain {
id
name
... on ClusterSlaDomain {
fid
cluster {
id
name
__typename
}
__typename
}
__typename
}'
gqlQuery="$(echo $gql_GlobalSearchOracle)"
gqlVars="$(echo $variables)"
rsc_gql_query
#cat /tmp/rbkresponse.$$ | jq -r
num=$(cat /tmp/rbkresponse.$$ | jq -r '[.data.globalSearchResults.edges[] | select (.node.objectType=="ORACLE_DATA_GUARD_GROUP" and .node.isRelic==false)]| length')
if [ $num -eq 1 ]; then
#echo Good, There is just one DG with name $RBK_SID
read name rsc_db_id < <(echo $(cat /tmp/rbkresponse.$$ | jq -r '.data.globalSearchResults.edges[] | select (.node.objectType=="ORACLE_DATA_GUARD_GROUP" and .node.isRelic==false)| .node.name, .node.id'))
database_type="Data Guard"
elif [ $num -gt 1 ]; then
echo "ERROR: There were $num entries returned for Data Guard databases with name $RBK_SID"
exit_with_error
fi
if [ -z "$rsc_db_id" ]; then
#echo INFO: No Data Guard DB found with SID $RBK_SID. Looking for standalone DBs
if [ -z "$RBK_HOST" ]; then
num=$(cat /tmp/rbkresponse.$$ | jq -r '[.data.globalSearchResults.edges[] | select (.node.objectType=="OracleDatabase" and .node.isRelic==false)] | length')
if [ $num -eq 1 ]; then
read name rsc_db_id < <(echo $(cat /tmp/rbkresponse.$$ | jq -r '.data.globalSearchResults.edges[] | select (.node.objectType=="OracleDatabase" and .node.isRelic==false)| .node.name, .node.id'))
database_type="Standalone"
#echo Good, There is just one Standalone DB with name $name and RSC ID $rsc_db_id
else
echo "ERROR: There were $num entries returned from JQ for DB with name $RBK_SID on host $RBK_HOST"
exit_with_error
fi
else
num=$(cat /tmp/rbkresponse.$$ | jq -r --arg HOST "$RBK_HOST" '[.data.globalSearchResults.edges[] | select (.node.logicalPath[0].name==$HOST and .node.objectType=="OracleDatabase" and .node.isRelic==false)] | length')
if [ $num -eq 1 ]; then
read name rsc_db_id < <(echo $(cat /tmp/rbkresponse.$$ | jq -r --arg HOST "$RBK_HOST" '.data.globalSearchResults.edges[] | select (.node.logicalPath[0].name==$HOST and .node.objectType=="OracleDatabase" and .node.isRelic==false)| .node.name, .node.id'))
database_type="Standalone"
#echo Good, There is just one Standalone DB with name $name on ${RBK_HOST} and RSC ID $rsc_db_id
else
echo "ERROR: There were $num entries returned from for DB with name $RBK_SID on host $RBK_HOST"
exit_with_error
fi
fi
fi
}