#!/usr/bin/env bash
WORKING_DIR="./.granica"
TIMESTAMP_STR=$(date +"%Y_%m_%d_%H_%M_%S")
SCRIPT_OUTFILE="${WORKING_DIR}/script_run_${TIMESTAMP_STR}.log"
SCRIPT_OUTKEY="script_runs/script_run_${TIMESTAMP_STR}.log"
ANALYTICS_BUCKET_NAME=""
ANALYTICS_BUCKET_PREFIX="granica-analytics"
ANALYTICS_BUCKETS_NAMES=()
ANALYTICS_BUCKETS_LOCATIONS=()
function read_yes_no() {
local init_prompt="${1} [y/n][default=y]: "
local repeating_prompt="${2} [y/n][default=y]: "
read -p "$init_prompt" response
response=${response:-y}
while [[ "$response" != [yYnN] ]]; do
echo "Invalid response. Expected [y/n]"
read -p "$repeating_prompt" response
response=${response:-y}
done
read_yes_no_rvalue=${response}
}
function get_region() {
local profile_name=$1
profile_region=$(aws configure get region --profile $profile_name)
if [[ -z $profile_region ]]; then
echo "No AWS region associated with profile ${profile_name}"
read -p "Enter AWS region name [default = us-west-2]: " profile_region
profile_region=${profile_region:-"us-west-2"}
else
echo "Profile $profile_name is associated with region $profile_region"
local prompt="Use region [$profile_region]"
read_yes_no "$prompt" "$prompt"
if [[ "$read_yes_no_rvalue" == [nN] ]]; then
read -p "Enter AWS region name: " profile_region
fi
fi
get_region_rvalue=$profile_region
}
function get_bucket_region() {
local profile_name=$1
local bucket_name=$2
location=$(aws s3api get-bucket-location --profile $profile_name --bucket $bucket_name \
--query LocationConstraint --output text)
if [ "$location" == "None" ]; then
get_bucket_region_rvalue="us-east-1"
elif [ -z $location ]; then
get_bucket_region_rvalue="us-east-1"
else
get_bucket_region_rvalue=$location
fi
}
function bucket_exists() {
local profile_name=$1
local bucket_name=$2
aws s3api head-bucket --profile "${profile_name}" --bucket "${bucket_name}" > /dev/null 2>&1
if [ $? -eq 0 ]; then
bucket_exists_rvalue=1
else
bucket_exists_rvalue=0
fi
}
function get_object_count() {
local profile_name=$1
local bucket_name=$2
two_day_ago=$(date -v-2d -Idate)
four_day_ago=$(date -v-4d -Idate)
query_out=$(aws cloudwatch --profile $profile_name get-metric-statistics --namespace AWS/S3 --metric-name NumberOfObjects \
--dimensions Name=BucketName,Value=$bucket_name Name=StorageType,Value=AllStorageTypes \
--start-time $four_day_ago --end-time $two_day_ago --statistics Average --unit Count \
--period 86400 --output text | grep "DATAPOINTS")
if [ -z "${query_out}" ]; then
metric_value=-1
else
read -ra query_array <<< "$query_out"
metric_float=${query_array[1]}
metric_value=${metric_float%.*}
fi
get_object_count_rvalue=$metric_value
}
function sync_script_output() {
local profile_name=$1
local analytics_bucket=$2
aws s3api put-object --bucket "${analytics_bucket}" --key "${SCRIPT_OUTKEY}" --body "${SCRIPT_OUTFILE}" --profile "${profile_name}" > /dev/null 2>&1
}
function gen_salt() {
chars=abcdefghijklmnopqrstuvwxyz0123456789
result=""
for i in {1..5} ; do
char="${chars:RANDOM%${#chars}:1}"
result="${result}${char}"
done
gen_salt_rvalue=$result
}
function get_analytics_bucket_name() {
local profile_name=$1
local bucket_region=$2
NUM_BUCKETS=${#ANALYTICS_BUCKETS_LOCATIONS[@]}
for (( i=0; i<$NUM_BUCKETS; i++ )); do
bucket_location="${ANALYTICS_BUCKETS_LOCATIONS[$i]}"
if [ "$location" == "$bucket_location" ]; then
get_analytics_bucket_name_rvalue="${ANALYTICS_BUCKETS_NAMES[$i]}"
return
fi
done
gen_salt
analytics_bucket_name="${ANALYTICS_BUCKET_PREFIX}-${bucket_region}-${gen_salt_rvalue}"
echo "Creating analytics bucket for storing analytics: $analytics_bucket_name"
echo "Note: This bucket will store analytics on your buckets."
if [ $bucket_region == "us-east-1" ]; then
aws s3api create-bucket --bucket $analytics_bucket_name --region $bucket_region --profile $profile_name
else
aws s3api create-bucket --bucket $analytics_bucket_name --region $bucket_region \
--create-bucket-configuration LocationConstraint=$bucket_region --profile $profile_name
fi
get_analytics_bucket_name_rvalue=$analytics_bucket_name
if [ "$SHARE_ANALYTICS" = true ] ; then
ANALYTICS_BUCKET_POLICY=$(cat <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowS3ToWriteInventoryReport",
"Effect": "Allow",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Action": "s3:PutObject",
"Resource": [
"arn:aws:s3:::${analytics_bucket_name}/inventory/*"
],
"Condition": {
"StringEquals": {
"aws:SourceAccount": "${SOURCE_ACCOUNT_ID}",
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
},
{
"Sid": "AllowS3ToWriteAccessLogs",
"Effect": "Allow",
"Principal": {
"Service": "logging.s3.amazonaws.com"
},
"Action": [
"s3:PutObject"
],
"Resource": "arn:aws:s3:::${analytics_bucket_name}/*"
},
{
"Sid": "AllowGranicaToReadAnalytics",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::401252763139:role/GRANICA_ONBOARDING",
"arn:aws:iam::401252763139:user/granica_onboarding"
]
},
"Action": [
"s3:Get*",
"s3:List*"
],
"Resource": [
"arn:aws:s3:::${analytics_bucket_name}",
"arn:aws:s3:::${analytics_bucket_name}/*"
]
}
]
}
EOF
)
else
ANALYTICS_BUCKET_POLICY=$(cat <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowS3ToWriteInventoryReport",
"Effect": "Allow",
"Principal": {
"Service": "s3.amazonaws.com"
},
"Action": "s3:PutObject",
"Resource": [
"arn:aws:s3:::${analytics_bucket_name}/inventory/*"
],
"Condition": {
"StringEquals": {
"aws:SourceAccount": "${SOURCE_ACCOUNT_ID}",
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
},
{
"Sid": "AllowS3ToWriteAccessLogs",
"Effect": "Allow",
"Principal": {
"Service": "logging.s3.amazonaws.com"
},
"Action": [
"s3:PutObject"
],
"Resource": "arn:aws:s3:::${analytics_bucket_name}/*"
}
]
}
EOF
)
fi
ANALYTICS_BUCKET_LIFECYCLE_POLICY=$(cat <<EOF
{
"Rules": [
{
"ID": "Expire after 7 days",
"Status": "Enabled",
"Prefix": "",
"Expiration": {
"Days": 7
}
}
]
}
EOF
)
echo "Applying analytics bucket policy. "
aws s3api put-bucket-policy --profile $profile_name --bucket $analytics_bucket_name --policy "${ANALYTICS_BUCKET_POLICY}"
aws s3api put-bucket-lifecycle-configuration --profile $profile_name --bucket $analytics_bucket_name --lifecycle-configuration "${ANALYTICS_BUCKET_LIFECYCLE_POLICY}"
echo "Applied analytics bucket policy: [${ANALYTICS_BUCKET_POLICY}] " >> $SCRIPT_OUTFILE
echo "Applied analytics bucket lifecycle policy: [${ANALYTICS_BUCKET_LIFECYCLE_POLICY}}] " >> $SCRIPT_OUTFILE
echo "Applying analytics bucket policy. "
aws s3api put-bucket-policy --profile $profile_name --bucket $analytics_bucket_name --policy "${ANALYTICS_BUCKET_POLICY}"
ANALYTICS_BUCKETS_NAMES+=("${analytics_bucket_name}")
ANALYTICS_BUCKETS_LOCATIONS+=("${bucket_region}")
}
function write_bucket_properties() {
local profile=$1
local src_bucket=$2
local analytics_bucket=$3
cmd_out=$(aws s3api get-bucket-acl --bucket $src_bucket --profile $profile 2>&1)
filename="acl"
output_filepath="${WORKING_DIR}/${filename}"
object_key="bucket_properties/${src_bucket}/${filename}"
echo $cmd_out > $output_filepath
aws s3api put-object --bucket $analytics_bucket --key $object_key --body $output_filepath --profile $profile > /dev/null 2>&1
echo "get-bucket-acl for bucket [${src_bucket}], output: [${cmd_out}]" >> $SCRIPT_OUTFILE
cmd_out=$(aws s3api get-bucket-encryption --bucket $src_bucket --profile $profile 2>&1)
filename="encryption"
output_filepath="${WORKING_DIR}/${filename}"
object_key="bucket_properties/${src_bucket}/${filename}"
echo $cmd_out > $output_filepath
aws s3api put-object --bucket $analytics_bucket --key $object_key --body $output_filepath --profile $profile > /dev/null 2>&1
echo "get-bucket-encryption for bucket [${src_bucket}], output: [${cmd_out}]" >> $SCRIPT_OUTFILE
cmd_out=$(aws s3api list-bucket-intelligent-tiering-configurations --bucket $src_bucket --profile $profile 2>&1)
filename="intelligent_tiering_configurations"
output_filepath="${WORKING_DIR}/${filename}"
object_key="bucket_properties/${src_bucket}/${filename}"
echo $cmd_out > $output_filepath
aws s3api put-object --bucket $analytics_bucket --key $object_key --body $output_filepath --profile $profile > /dev/null 2>&1
echo "list-bucket-intelligent-tiering-configurations for bucket [${src_bucket}], output: [${cmd_out}]" >> $SCRIPT_OUTFILE
cmd_out=$(aws s3api get-bucket-location --bucket $src_bucket --profile $profile 2>&1)
filename="location"
output_filepath="${WORKING_DIR}/${filename}"
object_key="bucket_properties/${src_bucket}/${filename}"
echo $cmd_out > $output_filepath
aws s3api put-object --bucket $analytics_bucket --key $object_key --body $output_filepath --profile $profile > /dev/null 2>&1
echo "get-bucket-location for bucket [${src_bucket}], output: [${cmd_out}]" >> $SCRIPT_OUTFILE
cmd_out=$(aws s3api get-bucket-lifecycle-configuration --bucket $src_bucket --profile $profile 2>&1)
filename="lifecycle"
output_filepath="${WORKING_DIR}/${filename}"
object_key="bucket_properties/${src_bucket}/${filename}"
echo $cmd_out > $output_filepath
aws s3api put-object --bucket $analytics_bucket --key $object_key --body $output_filepath --profile $profile > /dev/null 2>&1
echo "get-bucket-lifecycle-configuration for bucket [${src_bucket}], output: [${cmd_out}]" >> $SCRIPT_OUTFILE
cmd_out=$(aws s3api get-bucket-policy-status --bucket $src_bucket --profile $profile 2>&1)
filename="public_access"
output_filepath="${WORKING_DIR}/${filename}"
object_key="bucket_properties/${src_bucket}/${filename}"
echo $cmd_out > $output_filepath
aws s3api put-object --bucket $analytics_bucket --key $object_key --body $output_filepath --profile $profile > /dev/null 2>&1
echo "get-bucket-policy-status for bucket [${src_bucket}], output: [${cmd_out}]" >> $SCRIPT_OUTFILE
cmd_out=$(aws s3api get-bucket-request-payment --bucket $src_bucket --profile $profile 2>&1)
filename="request_payment"
output_filepath="${WORKING_DIR}/${filename}"
object_key="bucket_properties/${src_bucket}/${filename}"
echo $cmd_out > $output_filepath
aws s3api put-object --bucket $analytics_bucket --key $object_key --body $output_filepath --profile $profile > /dev/null 2>&1
echo "get-bucket-request-payment for bucket [${src_bucket}], output: [${cmd_out}]" >> $SCRIPT_OUTFILE
cmd_out=$(aws s3api get-bucket-versioning --bucket $src_bucket --profile $profile 2>&1)
filename="versioning"
output_filepath="${WORKING_DIR}/${filename}"
object_key="bucket_properties/${src_bucket}/${filename}"
echo $cmd_out > $output_filepath
aws s3api put-object --bucket $analytics_bucket --key $object_key --body $output_filepath --profile $profile > /dev/null 2>&1
echo "get-bucket-versioning for bucket [${src_bucket}], output: [${cmd_out}]" >> $SCRIPT_OUTFILE
}
echo "This script will guide you through the process of sharing analytics about your S3 usage with Granica. "
echo "These analytics include S3 Storage Lens report, S3 inventory reports, S3 server access logs, and properties of buckets being analyzed. "
echo "If this script is rerun, all selections must be re-entered. "
echo "Note: this script requires aws CLI"
echo ""
mkdir -p "${WORKING_DIR}"
touch $SCRIPT_OUTFILE
echo "NOTE: You can use this script to generate analytics where the analytics are not shared with Granica. "
echo "Additionally, this script can share those analytics with Granica. "
read_yes_no "Would you like to enable sharing analytics with Granica?"
SHARE_ANALYTICS=false
if [[ "$read_yes_no_rvalue" == [yY] ]] ; then
SHARE_ANALYTICS=true
fi
echo "SHARE_ANALYTICS selection: [${SHARE_ANALYTICS}]" >> $SCRIPT_OUTFILE
read -p "Enter AWS CLI profile name. This profile will be used to share data with Granica: " PROFILE
while [[ -z "$PROFILE" ]]; do
read -p "Profile name cannot be empty. Enter AWS CLI profile name: " PROFILE
done
echo "Using profile [${PROFILE}]" >> $SCRIPT_OUTFILE
echo ""
get_region "${PROFILE}"
REGION_NAME=$get_region_rvalue
echo "The region associated with profile [${PROFILE}] is [${REGION_NAME}]"
echo "Using region [${REGION_NAME}]" >> $SCRIPT_OUTFILE
echo ""
SOURCE_ACCOUNT_ID=$(aws sts get-caller-identity --profile $PROFILE --query 'Account' --output text)
echo "The next steps will guide you through enabling inventory report and access logs on individual buckets"
read -p "Would you like to share inventory report and access logs for: [1] selected buckets or [2] all buckets: " SHARE_TYPE
while [[ "$SHARE_TYPE" != [12] ]]; do
echo "Invalid response. Expected 1 or 2 "
read -p "Would you like to share inventory report and access logs for: [1] selected buckets or [2] all buckets: " SHARE_TYPE
done
echo "User selected bucket analytics sharing mode [${SHARE_TYPE}]" >> $SCRIPT_OUTFILE
echo ""
if [[ "$SHARE_TYPE" == 2 ]]; then
LISTING=$(aws s3api --profile $PROFILE list-buckets --query "Buckets[].Name" --output text)
IFS=$' \t\n' read -r -a BUCKETS_ARR <<< "${LISTING}"
BUCKET_COUNT=${#BUCKETS_ARR[@]}
echo "There are ${BUCKET_COUNT} buckets in your account. These are their names are: ${LISTING}"
echo ""
ENABLE_ANALYTICS=0
if [ "${BUCKET_COUNT}" -gt 5 ]; then
EXPECTED_USER_INPUT="enable analytics on all buckets"
echo "To enable analytics on all buckets, enter: "\""${EXPECTED_USER_INPUT}"\"" "
echo "To *NOT* enable analytics enter anything else"
read -p "> " USER_CONFIRMATION
# lowercase user input to compare
USER_CONFIRMATION=$(echo $USER_CONFIRMATION | tr [:upper:] [:lower:])
#echo "USER_CONFIRMATION"
#echo $USER_CONFIRMATION
if [[ $USER_CONFIRMATION == "${EXPECTED_USER_INPUT}" ]]; then
ENABLE_ANALYTICS=1
fi
else
# 5 or less buckets found; prompt yes/no to continue
read_yes_no "Would you like to enable analytics on all buckets" "Would you like to enable analytics on all buckets"
if [[ "$read_yes_no_rvalue" == [yY] ]]; then
ENABLE_ANALYTICS=1
fi
fi
if [[ "${ENABLE_ANALYTICS}" -eq 1 ]]; then
# enable analytics on all buckets
echo "Enabling analytics on all buckets...."
for SOURCE_BUCKET_NAME in "${BUCKETS_ARR[@]}"
do
get_bucket_region "${PROFILE}" "${SOURCE_BUCKET_NAME}"
get_analytics_bucket_name "${PROFILE}" "${get_bucket_region_rvalue}"
ANALYTICS_BUCKET_NAME="${get_analytics_bucket_name_rvalue}"
echo "Enabling inventory and access logs for ${SOURCE_BUCKET_NAME}"
# deindent the cat to reduce whitespace in document
INVENTORY_CONFIG=$(cat <<EOF
{
"Id": "report_granica",
"Destination": {
"S3BucketDestination": {
"Bucket": "arn:aws:s3:::${ANALYTICS_BUCKET_NAME}",
"Prefix": "inventory",
"Format": "CSV"
}
},
"IsEnabled": true,
"Schedule": {
"Frequency": "Daily"
},
"IncludedObjectVersions": "All",
"OptionalFields": [
"Size",
"LastModifiedDate",
"StorageClass",
"ETag",
"IsMultipartUploaded",
"ReplicationStatus",
"EncryptionStatus",
"ObjectLockRetainUntilDate",
"ObjectLockMode",
"ObjectLockLegalHoldStatus",
"IntelligentTieringAccessTier",
"BucketKeyStatus"
]
}
EOF
)
ACCESS_LOGGING_CONFIG=$(cat <<EOF
{
"LoggingEnabled": {
"TargetBucket": "${ANALYTICS_BUCKET_NAME}",
"TargetPrefix": "access_logs/${SOURCE_BUCKET_NAME}/"
}
}
EOF
)
# skip inventory for bucket with more than 2B objects
get_object_count "$PROFILE" "${SOURCE_BUCKET_NAME}"
ENABLE_INVENTORY=1
if [[ "${get_object_count_rvalue}" -gt "2000000000" ]] ; then
# skip inventory config
echo "Skipping inventory report; bucket [${SOURCE_BUCKET_NAME}] has ${get_object_count_rvalue} objects"
else
# apply inventory config
CMD_OUT=$(aws s3api put-bucket-inventory-configuration --profile $PROFILE --bucket $SOURCE_BUCKET_NAME --id report_granica --inventory-configuration "${INVENTORY_CONFIG}" 2>&1 )
echo "put-bucket-inventory-configuration for bucket [${SOURCE_BUCKET_NAME}] output: [${CMD_OUT}]" >> $SCRIPT_OUTFILE
echo $CMD_OUT
fi
CMD_OUT=$(aws s3api put-bucket-logging --profile $PROFILE --bucket $SOURCE_BUCKET_NAME --bucket-logging-status "${ACCESS_LOGGING_CONFIG}" 2>&1 )
echo "put-bucket-logging for bucket [${SOURCE_BUCKET_NAME}] output: [${CMD_OUT}]" >> $SCRIPT_OUTFILE
echo $CMD_OUT
write_bucket_properties "${PROFILE}" "${SOURCE_BUCKET_NAME}" "${ANALYTICS_BUCKET_NAME}"
done
else
# do not enable analytics
echo "Not enabling analytics on buckets"
fi
# 6.2. allow user to enter what buckets to enable analytics on
else
# set value to enter loop
read_yes_no_rvalue=y
while [[ "$read_yes_no_rvalue" == [yY] ]]; do
read_yes_no "Would you like to enter a bucket to analyze" "Would you like to enter a bucket to analyze"
if [[ "$read_yes_no_rvalue" == [nN] ]]; then
break
fi
read -p "Enter name of bucket to enable analytics on [empty to skip]: " SOURCE_BUCKET_NAME
if [[ -z "$SOURCE_BUCKET_NAME" ]]; then
echo "Read empty bucket name"
continue
fi
# check if bucket exists
bucket_exists "${PROFILE}" "${SOURCE_BUCKET_NAME}"
if [ "$bucket_exists_rvalue" -eq 0 ]; then
echo "Bucket ${SOURCE_BUCKET_NAME} not found."
continue
fi
# get bucket region
# will be used to create analytics bucket
get_bucket_region "${PROFILE}" "${SOURCE_BUCKET_NAME}"
get_analytics_bucket_name "${PROFILE}" "${get_bucket_region_rvalue}"
ANALYTICS_BUCKET_NAME="${get_analytics_bucket_name_rvalue}"
# deindent the cat to reduce whitespace in document
INVENTORY_CONFIG=$(cat <<EOF
{
"Id": "report_granica",
"Destination": {
"S3BucketDestination": {
"Bucket": "arn:aws:s3:::${ANALYTICS_BUCKET_NAME}",
"Prefix": "inventory",
"Format": "CSV"
}
},
"IsEnabled": true,
"Schedule": {
"Frequency": "Daily"
},
"IncludedObjectVersions": "All",
"OptionalFields": [
"Size",
"LastModifiedDate",
"StorageClass",
"ETag",
"IsMultipartUploaded",
"ReplicationStatus",
"EncryptionStatus",
"ObjectLockRetainUntilDate",
"ObjectLockMode",
"ObjectLockLegalHoldStatus",
"IntelligentTieringAccessTier",
"BucketKeyStatus"
]
}
EOF
)
ACCESS_LOGGING_CONFIG=$(cat <<EOF
{
"LoggingEnabled": {
"TargetBucket": "${ANALYTICS_BUCKET_NAME}",
"TargetPrefix": "access-logs/${SOURCE_BUCKET_NAME}/"
}
}
EOF
)
prompt="Would you like to enable analytics on ${SOURCE_BUCKET_NAME}"
read_yes_no "${prompt}" "${prompt}"
if [[ "$read_yes_no_rvalue" == [yY] ]]; then
get_object_count "${PROFILE}" "${SOURCE_BUCKET_NAME}"
ENABLE_INVENTORY=1
if [[ "${get_object_count_rvalue}" -gt "2000000000" ]] ; then
# prompt secondary confirmation for buckets with more than 2B objects
EXPECTED_USER_INPUT="enable inventory report"
echo "The bucket [${SOURCE_BUCKET_NAME}] has ${get_object_count_rvalue} objects"
echo "Inventory reports cost \$2.5 per billion objects per day"
echo "To enable inventory report for this buckets, enter: "\""${EXPECTED_USER_INPUT}"\"" "
echo "To *NOT* enable analytics enter anything else"
read -p "> " USER_CONFIRMATION
# lowercase user input to compare
USER_CONFIRMATION=$(echo $USER_CONFIRMATION | tr [:upper:] [:lower:])
if [[ $USER_CONFIRMATION == "${EXPECTED_USER_INPUT}" ]]; then
ENABLE_INVENTORY=1
else
ENABLE_INVENTORY=0
fi
fi
if [[ ENABLE_INVENTORY -eq 0 ]] ; then
# enable access logs only
echo "Enabling analytics (access logs only) for ${SOURCE_BUCKET_NAME}"
else
# enable both inventory and access logs
echo "Enabling analytics (inventory and access logs) for ${SOURCE_BUCKET_NAME}"
CMD_OUT=$(aws s3api put-bucket-inventory-configuration --profile $PROFILE --bucket $SOURCE_BUCKET_NAME --id report_granica --inventory-configuration "${INVENTORY_CONFIG}" 2>&1 )
echo "put-bucket-inventory-configuration for bucket [${SOURCE_BUCKET_NAME}] output: [${CMD_OUT}]" >> $SCRIPT_OUTFILE
echo $CMD_OUT
echo ""
fi
# enable access logs
CMD_OUT=$(aws s3api put-bucket-logging --profile $PROFILE --bucket $SOURCE_BUCKET_NAME --bucket-logging-status "${ACCESS_LOGGING_CONFIG}" 2>&1 )
echo "put-bucket-logging for bucket [${SOURCE_BUCKET_NAME}] output: [${CMD_OUT}]" >> $SCRIPT_OUTFILE
echo $CMD_OUT
echo ""
# write bucket properties
write_bucket_properties "${PROFILE}" "${SOURCE_BUCKET_NAME}" "${ANALYTICS_BUCKET_NAME}"
fi
read_yes_no "Would you like to enter a bucket to analyze" "Would you like to enter a bucket to analyze"
done
fi
sync_script_output $PROFILE $ANALYTICS_BUCKET_NAME
# 9. share storage lens
echo "Would you like to share S3 Storage Lens reports? "
echo "The S3 Storage Lens report provides account level summary of all buckets. "
read_yes_no "Enable S3 Storage Lens" "Enable S3 Storage Lens"
if [[ "$read_yes_no_rvalue" == [yY] ]]; then
echo "Enabling S3 Storage Lens."
echo "User selected to enable S3 Storage Lens " >> $SCRIPT_OUTFILE
ORGANIZATION_ARN=$(aws organizations describe-organization --profile $PROFILE --query "Organization.Arn" --output text)
get_bucket_region "${PROFILE}" "${ANALYTICS_BUCKET_NAME}"
STORAGE_LENS_REGION="${get_bucket_region_rvalue}"
STORAGE_LENS_CONFIG=$(cat <<EOF
{
"Id": "storage_lens_report_granica",
"AccountLevel": {
"BucketLevel": {}
},
"DataExport": {
"S3BucketDestination": {
"Format": "CSV",
"OutputSchemaVersion": "V_1",
"AccountId": "${SOURCE_ACCOUNT_ID}",
"Arn": "arn:aws:s3:::${ANALYTICS_BUCKET_NAME}"
},
"CloudWatchMetrics": {
"IsEnabled": false
}
},
"IsEnabled": true,
"AwsOrg": {
"Arn": "${ORGANIZATION_ARN}"
}
}
EOF
)
CMD_OUT=$(aws s3control put-storage-lens-configuration --account-id "${SOURCE_ACCOUNT_ID}" --profile $PROFILE --region "${STORAGE_LENS_REGION}" --config-id storage_lens_report_granica --storage-lens-configuration "${STORAGE_LENS_CONFIG}" 2>&1 )
echo "Attempted s3control put-storage-lens-configuration [region: ${STORAGE_LENS_REGION}], config: [${STORAGE_LENS_CONFIG}]" >> $SCRIPT_OUTFILE
echo "s3control put-storage-lens-configuration, output: [${CMD_OUT}]" >> $SCRIPT_OUTFILE
echo $CMD_OUT
fi
sync_script_output $PROFILE $ANALYTICS_BUCKET_NAME
echo ""
# 11. cleanup
rm -rf "${WORKING_DIR}"
echo "You have configured the sharing of analytics data."
echo "Note, it can take upto 24 hours for AWS to produce analytics."
echo "Your Granica representative will contact you shortly to help visualize and analyze your reports, and suggest next steps"