truenas

This commit is contained in:
auricom
2023-01-02 06:16:48 +01:00
parent 78fb7af1b2
commit bb1e43504a
31 changed files with 749 additions and 683 deletions

View File

@@ -44,7 +44,7 @@ jobs:
echo "RENOVATE_DRY_RUN=${{ github.event.inputs.dryRun || env.RENOVATE_DRY_RUN }}" >> "${GITHUB_ENV}"
echo "LOG_LEVEL=${{ github.event.inputs.logLevel || env.LOG_LEVEL }}" >> "${GITHUB_ENV}"
- name: Renovate
uses: renovatebot/github-action@33fce9286b85fbc0be2a9a18f03db7e0acf3bfa1 # v34.77.1
uses: renovatebot/github-action@b0b629aa1d1bffb348fbe0e67ac2fad818d0f384 # v34.78.0
with:
configurationFile: "${{ env.RENOVATE_CONFIG_FILE }}"
token: "${{ steps.generate-token.outputs.token }}"

View File

@@ -10,4 +10,4 @@
name: "daily backup"
minute: "14"
hour: "4"
job: "/storage/backup.bash && curl -fsS -m 10 --retry 5 -o /dev/null https://uptime-kuma.{{ secret_cluster_domain }}api/push/peJYY3K5sH?status=up&msg=OK&ping="
job: "/storage/backup.bash"

View File

@@ -5,7 +5,6 @@
dest: "{{ scripts_dir }}/{{ item }}"
mode: 0755
loop:
- certificates_deploy.py
- snapshots_clearempty.py
- snapshots_prune.py
- telegraf_hddtemp.bash
@@ -16,10 +15,4 @@
dest: "{{ scripts_dir }}/{{ item.name }}"
mode: "{{ item.mode }}"
loop:
- { name: "backupconfig_cloudsync_pre.bash", mode: "0775" }
- { name: "certificates_deploy.bash", mode: "0775" }
- { name: "certificates_deploy.conf", mode: "0664" }
- { name: "snapshots_prune.sh", mode: "0775" }
- { name: "report_pools.sh", mode: "0775" }
- { name: "report_smart.sh", mode: "0775" }
- { name: "report_ups.sh", mode: "0775" }

View File

@@ -1,17 +0,0 @@
#!/bin/bash
# DEBUG
# set -x
# Configuration backup Cloud Sync pre-script
# Variables
DATE=$(date +%Y%m%d)
BACKUP_FOLDER="{{ backups_dir }}servers/{{ ansible_facts['nodename'] }}"
cp -p /data/freenas-v1.db ${BACKUP_FOLDER}/${DATE}.db
chmod -R 775 ${BACKUP_FOLDER}/${DATE}.db
chown -R homelab:homelab ${BACKUP_FOLDER}/${DATE}.db
# Keep the last 90 backups on disk
find ${BACKUP_FOLDER}/*.db -mtime +90 -type f -delete

View File

@@ -1,37 +0,0 @@
#!/bin/bash
# DEBUG
# set -x
# Variables
TARGET=$(hostname)
DAYS=21
SCRIPT_PATH="{{ scripts_dir }}"
CERTIFICATE_PATH="{{ certificates_dir }}"
CONFIG_FILE="${SCRIPT_PATH}/certificates_deploy.conf"
UPTIME_KUMA_ID="{{uptime_kuma_id_truenas_cert}}"
# Check if cert is older than 69 days
result=$(find ${CERTIFICATE_PATH}/cert.pem -mtime +69)
if [[ "$result" == "${CERTIFICATE_PATH}/cert.pem" ]]; then
echo "ERROR - Certificate is older than 69 days"
echo "ERROR - Verify than it has been renewed by ACME client on opnsense and that the upload automation has been executed"
curl -s \
--form-string "token={{secret_pushover_api_key}}" \
--form-string "user={{secret_pushover_user_key}}" \
--form-string "message=Certificate on $TARGET is older than 69 days. Verify than it has been renewed by ACME client on opnsense and that the upload automation has been executed" \
https://api.pushover.net/1/messages.json
else
echo "checking if $TARGET expires in less than $DAYS days"
openssl x509 -checkend $(( 24*3600*$DAYS )) -noout -in <(openssl s_client -showcerts -connect $TARGET:443 </dev/null 2>/dev/null | openssl x509 -outform PEM)
if [ $? -ne 0 ]; then
echo "INFO - Certificate expires in less than $DAYS days"
echo "INFO - Deploying new certificate"
# Deploy certificate (truenas UI & minio)
python ${SCRIPT_PATH}/certificates_deploy.py -c ${CONFIG_FILE}
test $? -eq 0 && curl https://uptime-kuma.{{secret_cluster_domain}}/api/push/${UPTIME_KUMA_ID}?status=up&msg=OK&ping=
else
echo "INFO - Certificate expires in more than $DAYS"
fi
fi

View File

@@ -1,48 +0,0 @@
# Configuration file for deploy_certificates.py
[deploy]
# Choose one of the following authentication methods, "api_key" or "password" (comment out the other one).
# Auth via API keys is highly recommended, but is only available from TrueNAS (Core) 12.0 up.
# You can generate a new API key in the web interface under "Settings" (upper right) > "API Keys".
api_key = {{ root_api_key }}
# If you are on FreeNAS 11 or lower, set this to your FreeNAS root password
# password =
# Everything below here is optional
# cert_fqdn specifies the FQDN used for your certificate. Default is your system hostname
# cert_fqdn = foo.bar.baz
# connect_host specifies the hostname the script should attempt to connect to, to deploy the cert.
# Default is localhost (assuming the script is running on your FreeNAS box)
# connect_host = baz.bar.foo
# verify sets whether the script will attempt to verify the server's certificate with a HTTPS
# connection. Set to true if you're using a HTTPS connection to a remote host. If connect_host
# is set to localhost (or is unset), set to false. Default is false.
# verify = false
# privkey_path is the path to the certificate private key on your system. Default
# assumes you're using acme.sh:
# /root/.acme.sh/cert_fqdn/cert_fqdn.key
privkey_path = {{ certificates_dir }}/key.pem
# fullchain_path is the path to the full chain (leaf cert + intermediate certs)
# on your system. Default assumes you're using acme.sh:
# /root/.acme.sh/cert_fqdn/fullchain.cer
fullchain_path = {{ certificates_dir }}/fullchain.pem
# protocol sets the connection protocol, http or https. Include '://' at the end.
# Default is http
# protocol = https://
# port sets the port to use to connect. Default is 80. If protocol is https,
# this MUST be set to your https port.
# port = 443
# set ftp_enabled to true if you have the FTP service enabled on your FreeNAS. Default is false.
# ftp_enabled = true
{% if service_s3 is defined %}
s3_enabled = true
{% endif %}

View File

@@ -1,162 +0,0 @@
#!/bin/sh
### Parameters ###
# Specify your email address here:
email="truenas@{{ secret_email_domain }}"
# zpool output changed from FreeNAS version 11.0 to 11.1, breaking
# our parsing of the scrubErrors and scrubDate variables. Added a
# conditional to test for the FreeNAS version and parse accordingly.
# This changed again with the release of TrueNAS. Ironically, back to
# the way parsing worked with older versions of FreeNAS.
#
# We obtain the FreeBSD version using uname, as suggested by user
# Chris Moore on the FreeBSD forum.
#
# 'uname -K' gives 7-digit OS release and version, e.g.:
#
# FreeBSD 11.0 1100512
# FreeBSD 11.1 1101505
# FreeBSD 12.2 1202000
fbsd_relver=$(uname -K)
freenashost=$(hostname -s | tr '[:lower:]' '[:upper:]')
boundary="===== MIME boundary; FreeNAS server ${freenashost} ====="
logfile="/tmp/zpool_report.tmp"
subject="ZPool Status Report for ${freenashost}"
pools=$(zpool list -H -o name)
usedWarn=75
usedCrit=90
scrubAgeWarn=30
warnSymbol="?"
critSymbol="!"
### Set email headers ###
printf "%s\n" "To: ${email}
Subject: ${subject}
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=\"$boundary\"
--${boundary}
Content-Type: text/html; charset=\"US-ASCII\"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline
<html><head></head><body><pre style=\"font-size:14px; white-space:pre\">" >> ${logfile}
###### summary ######
(
echo "########## ZPool status report summary for all pools on server ${freenashost} ##########"
echo ""
echo "+--------------+--------+------+------+------+----+----+--------+------+-----+"
echo "|Pool Name |Status |Read |Write |Cksum |Used|Frag|Scrub |Scrub |Last |"
echo "| | |Errors|Errors|Errors| | |Repaired|Errors|Scrub|"
echo "| | | | | | | |Bytes | |Age |"
echo "+--------------+--------+------+------+------+----+----+--------+------+-----+"
) >> ${logfile}
for pool in $pools; do
if [ "$fbsd_relver" -ge 1101000 ]; then
frag="$(zpool list -H -o frag "$pool")"
else
if [ "${pool}" = "freenas-boot" ] || [ "${pool}" = "boot-pool" ]; then
frag=""
else
frag="$(zpool list -H -o frag "$pool")"
fi
fi
status="$(zpool list -H -o health "$pool")"
errors="$(zpool status "$pool" | grep -E "(ONLINE|DEGRADED|FAULTED|UNAVAIL|REMOVED)[ \t]+[0-9]+")"
readErrors=0
for err in $(echo "$errors" | awk '{print $3}'); do
if echo "$err" | grep -E -q "[^0-9]+"; then
readErrors=1000
break
fi
readErrors=$((readErrors + err))
done
writeErrors=0
for err in $(echo "$errors" | awk '{print $4}'); do
if echo "$err" | grep -E -q "[^0-9]+"; then
writeErrors=1000
break
fi
writeErrors=$((writeErrors + err))
done
cksumErrors=0
for err in $(echo "$errors" | awk '{print $5}'); do
if echo "$err" | grep -E -q "[^0-9]+"; then
cksumErrors=1000
break
fi
cksumErrors=$((cksumErrors + err))
done
if [ "$readErrors" -gt 999 ]; then readErrors=">1K"; fi
if [ "$writeErrors" -gt 999 ]; then writeErrors=">1K"; fi
if [ "$cksumErrors" -gt 999 ]; then cksumErrors=">1K"; fi
used="$(zpool list -H -p -o capacity "$pool")"
scrubRepBytes="N/A"
scrubErrors="N/A"
scrubAge="N/A"
if [ "$(zpool status "$pool" | grep "scan" | awk '{print $2}')" = "scrub" ]; then
scrubRepBytes="$(zpool status "$pool" | grep "scan" | awk '{print $4}')"
if [ "$fbsd_relver" -gt 1101000 ] && [ "$fbsd_relver" -lt 1200000 ]; then
scrubErrors="$(zpool status "$pool" | grep "scan" | awk '{print $10}')"
scrubDate="$(zpool status "$pool" | grep "scan" | awk '{print $17"-"$14"-"$15"_"$16}')"
else
scrubErrors="$(zpool status "$pool" | grep "scan" | awk '{print $8}')"
scrubDate="$(zpool status "$pool" | grep "scan" | awk '{print $15"-"$12"-"$13"_"$14}')"
fi
scrubTS="$(date -j -f "%Y-%b-%e_%H:%M:%S" "$scrubDate" "+%s")"
currentTS="$(date "+%s")"
scrubAge=$((((currentTS - scrubTS) + 43200) / 86400))
fi
if [ "$status" = "FAULTED" ] || [ "$used" -gt "$usedCrit" ]; then
symbol="$critSymbol"
elif [ "$scrubErrors" != "N/A" ] && [ "$scrubErrors" != "0" ]; then
symbol="$critSymbol"
elif [ "$status" != "ONLINE" ] \
|| [ "$readErrors" != "0" ] \
|| [ "$writeErrors" != "0" ] \
|| [ "$cksumErrors" != "0" ] \
|| [ "$used" -gt "$usedWarn" ] \
|| [ "$(echo "$scrubAge" | awk '{print int($1)}')" -gt "$scrubAgeWarn" ]; then
symbol="$warnSymbol"
elif [ "$scrubRepBytes" != "0" ] && [ "$scrubRepBytes" != "0B" ] && [ "$scrubRepBytes" != "N/A" ]; then
symbol="$warnSymbol"
else
symbol=" "
fi
(
printf "|%-12s %1s|%-8s|%6s|%6s|%6s|%3s%%|%4s|%8s|%6s|%5s|\n" \
"$pool" "$symbol" "$status" "$readErrors" "$writeErrors" "$cksumErrors" \
"$used" "$frag" "$scrubRepBytes" "$scrubErrors" "$scrubAge"
) >> ${logfile}
done
(
echo "+--------------+--------+------+------+------+----+----+--------+------+-----+"
) >> ${logfile}
###### for each pool ######
for pool in $pools; do
(
echo ""
echo "########## ZPool status report for ${pool} ##########"
echo ""
zpool status -v "$pool"
) >> ${logfile}
done
printf "%s\n" "</pre></body></html>
--${boundary}--" >> ${logfile}
### Send report ###
if [ -z "${email}" ]; then
echo "No email address specified, information available in ${logfile}"
else
sendmail -t -oi < ${logfile}
rm ${logfile}
fi

View File

@@ -1,267 +0,0 @@
#!/bin/sh
### Parameters ###
# Specify your email address here:
email="truenas@{{ secret_email_domain }}"
# Full path to 'smartctl' program:
smartctl=/usr/local/sbin/smartctl
freenashost=$(hostname -s | tr '[:lower:]' '[:upper:]')
boundary="===== MIME boundary; FreeNAS server ${freenashost} ====="
logfile="smart_report.tmp"
subject="SMART Status Report for ${freenashost}"
tempWarn=40
tempCrit=45
sectorsCrit=10
testAgeWarn=1
warnSymbol="?"
critSymbol="!"
Drive_count=0
SATA_count=0
SAS_count=0
Drive_list=""
SATA_list=""
SAS_list=""
# Get list of SMART-enabled drives
get_smart_drives()
{
gs_drives=$("$smartctl" --scan | awk '{print $1}')
for gs_drive in $gs_drives; do
gs_smart_flag=$("$smartctl" -i "$gs_drive" | grep -E "SMART support is:[[:blank:]]+Enabled" | awk '{print $4}')
if [ "$gs_smart_flag" = "Enabled" ]; then
Drive_list="$Drive_list $gs_drive"
Drive_count=$((Drive_count + 1))
fi
done
}
# Get list of SATA disks, including older drives that only report an ATA version
get_sata_drives()
{
for drive in $Drive_list; do
lFound=0
gsata_smart_flag=$("$smartctl" -i "$drive" | grep -E "SATA Version is:[[:blank:]]" | awk '{print $4}')
if [ "$gsata_smart_flag" = "SATA" ]; then
lFound=$((lFound + 1))
else
gsata_smart_flag=$("$smartctl" -i "$drive" | grep -E "ATA Version is:[[:blank:]]" | awk '{print $1}')
if [ "$gsata_smart_flag" = "ATA" ]; then
lFound=$((lFound + 1))
fi
fi
if [ $lFound -gt 0 ]; then
SATA_list="$SATA_list $drive"
SATA_count=$((SATA_count + 1))
fi
done
}
# Get list of SAS disks
get_sas_drives()
{
for drive in $Drive_list; do
gsas_smart_flag=$("$smartctl" -i "$drive" | grep -E "Transport protocol:[[:blank:]]+SAS" | awk '{print $3}')
if [ "$gsas_smart_flag" = "SAS" ]; then
SAS_list="$SAS_list $drive"
SAS_count=$((SAS_count + 1))
fi
done
}
### Fetch drive lists ###
get_smart_drives
get_sata_drives
get_sas_drives
### Set email headers ###
printf "%s\n" "To: ${email}
Subject: ${subject}
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=\"$boundary\"
--${boundary}
Content-Type: text/html; charset=\"US-ASCII\"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline
<html><head></head><body><pre style=\"font-size:14px; white-space:pre\">" > ${logfile}
if [ $Drive_count -eq 0 ]; then
echo "##### No SMART-enabled disks found on this system #####" >> "$logfile"
fi
###### Summary for SATA drives ######
if [ $SATA_count -gt 0 ]; then
(
echo "########## SMART status report summary for all SATA drives on server ${freenashost} ##########"
echo ""
echo "+------+------------------------+----+------+-----+-----+-------+-------+--------+------+----------+------+-----------+----+"
echo "|Device|Serial |Temp| Power|Start|Spin |ReAlloc|Current|Offline |Seek |Total |High | Command|Last|"
echo "| |Number | | On |Stop |Retry|Sectors|Pending|Uncorrec|Errors|Seeks |Fly | Timeout|Test|"
echo "| | | | Hours|Count|Count| |Sectors|Sectors | | |Writes| Count |Age |"
echo "+------+------------------------+----+------+-----+-----+-------+-------+--------+------+----------+------+-----------+----+"
) >> "$logfile"
###### Detail information for each SATA drive ######
for drive in $SATA_list; do
(
devid=$(basename "$drive")
lastTestHours=$("$smartctl" -l selftest "$drive" | grep "# 1" | awk '{print $9}')
"$smartctl" -A -i -v 7,hex48 "$drive" | \
awk -v device="$devid" -v tempWarn="$tempWarn" -v tempCrit="$tempCrit" -v sectorsCrit="$sectorsCrit" \
-v testAgeWarn="$testAgeWarn" -v warnSymbol="$warnSymbol" -v critSymbol="$critSymbol" \
-v lastTestHours="$lastTestHours" '
/Serial Number:/{serial=$3}
/190 Airflow_Temperature/{temp=$10}
/194 Temperature/{temp=$10}
/Power_On_Hours/{split($10,a,"+");sub(/h/,"",a[1]);onHours=a[1];}
/Start_Stop_Count/{startStop=$10}
/Spin_Retry_Count/{spinRetry=$10}
/Reallocated_Sector/{reAlloc=$10}
/Current_Pending_Sector/{pending=$10}
/Offline_Uncorrectable/{offlineUnc=$10}
/Seek_Error_Rate/{seekErrors=("0x" substr($10,3,4));totalSeeks=("0x" substr($10,7))}
/High_Fly_Writes/{hiFlyWr=$10}
/Command_Timeout/{cmdTimeout=$10}
END {
testAge=sprintf("%.0f", (onHours - lastTestHours) / 24);
if (temp > tempCrit || reAlloc > sectorsCrit || pending > sectorsCrit || offlineUnc > sectorsCrit)
device=device " " critSymbol;
else if (temp > tempWarn || reAlloc > 0 || pending > 0 || offlineUnc > 0 || testAge > testAgeWarn)
device=device " " warnSymbol;
seekErrors=sprintf("%d", seekErrors);
totalSeeks=sprintf("%d", totalSeeks);
if (totalSeeks == "0") {
seekErrors="N/A";
totalSeeks="N/A";
}
if (temp > tempWarn || temp > tempCrit) temp=temp"*"
if (reAlloc > 0 || reAlloc > sectorsCrit) reAlloc=reAlloc"*"
if (pending > 0 || pending > sectorsCrit) pending=pending"*"
if (offlineUnc > 0 || offlineUnc > sectorsCrit) offlineUnc=offlineUnc"*"
if (testAge > testAgeWarn) testAge=testAge"*"
if (hiFlyWr == "") hiFlyWr="N/A";
if (cmdTimeout == "") cmdTimeout="N/A";
printf "|%-6s|%-24s|%-4s|%6s|%5s|%5s|%7s|%7s|%8s|%6s|%10s|%6s|%11s|%4s|\n",
device, serial, temp, onHours, startStop, spinRetry, reAlloc, pending, offlineUnc,
seekErrors, totalSeeks, hiFlyWr, cmdTimeout, testAge;
}'
) >> "$logfile"
done
(
echo "+------+------------------------+----+------+-----+-----+-------+-------+--------+------+----------+------+-----------+----+"
) >> "$logfile"
fi
###### Summary for SAS drives ######
if [ $SAS_count -gt 0 ]; then
(
if [ $SATA_count -gt 0 ]; then
echo ""
fi
echo "########## SMART status report summary for all SAS drives on server ${freenashost} ##########"
echo ""
echo "+------+------------------------+----+-----+------+------+------+------+------+------+"
echo "|Device|Serial |Temp|Start|Load |Defect|Uncorr|Uncorr|Uncorr|Non |"
echo "| |Number | |Stop |Unload|List |Read |Write |Verify|Medium|"
echo "| | | |Count|Count |Elems |Errors|Errors|Errors|Errors|"
echo "+------+------------------------+----+-----+------+------+------+------+------+------+"
) >> "$logfile"
###### Detail information for each SAS drive ######
for drive in $SAS_list; do
(
devid=$(basename "$drive")
"$smartctl" -a "$drive" | \
awk -v device="$devid" -v tempWarn="$tempWarn" -v tempCrit="$tempCrit" \
-v warnSymbol="$warnSymbol" -v critSymbol="$critSymbol" '\
/Serial number:/{serial=$3}
/Current Drive Temperature:/{temp=$4} \
/start-stop cycles:/{startStop=$4} \
/load-unload cycles:/{loadUnload=$4} \
/grown defect list:/{defectList=$6} \
/read:/{readErrors=$8} \
/write:/{writeErrors=$8} \
/verify:/{verifyErrors=$8} \
/Non-medium error count:/{nonMediumErrors=$4} \
END {
if (temp > tempCrit)
device=device " " critSymbol;
else if (temp > tempWarn)
device=device " " warnSymbol;
printf "|%-6s|%-24s| %3s|%5s|%6s|%6s|%6s|%6s|%6s|%6s|\n",
device, serial, temp, startStop, loadUnload, defectList, \
readErrors, writeErrors, verifyErrors, nonMediumErrors;
}'
) >> "$logfile"
done
(
echo "+------+------------------------+----+-----+------+------+------+------+------+------+"
) >> "$logfile"
fi
if [ $SATA_count -gt 0 ] || [ $SAS_count -gt 0 ]; then
###### Emit SATA drive information ######
for drive in $SATA_list; do
vendor=$("$smartctl" -i "$drive" | grep "Vendor:" | awk '{print $NF}')
if [ -z "$vendor" ]; then
dfamily=$("$smartctl" -i "$drive" | grep "Model Family" | awk '{print $3, $4, $5, $6, $7}' | sed -e 's/[[:space:]]*$//')
dmodel=$("$smartctl" -i "$drive" | grep "Device Model" | awk '{print $3, $4, $5, $6, $7}' | sed -e 's/[[:space:]]*$//')
if [ -z "$dfamily" ]; then
dinfo=$dmodel
else
dinfo="$dfamily ($dmodel)"
fi
else
product=$("$smartctl" -i "$drive" | grep "Product:" | awk '{print $NF}')
revision=$("$smartctl" -i "$drive" | grep "Revision:" | awk '{print $NF}')
dinfo="$vendor $product $revision"
fi
serial=$("$smartctl" -i "$drive" | grep "Serial Number" | awk '{print $3}')
(
echo ""
echo "########## SATA drive $drive Serial: $serial"
echo "########## ${dinfo}"
"$smartctl" -n never -H -A -l error "$drive"
"$smartctl" -n never -l selftest "$drive" | grep "# 1 \\|Num" | cut -c6-
) >> "$logfile"
done
###### Emit SAS drive information ######
for drive in $SAS_list; do
devid=$(basename "$drive")
brand=$("$smartctl" -i "$drive" | grep "Product" | sed "s/^.* //")
serial=$("$smartctl" -i "$drive" | grep "Serial number" | sed "s/^.* //")
(
echo ""
echo "########## SMART status for SAS drive $drive $serial (${brand}) ##########"
"$smartctl" -n never -H -A -l error "$drive"
"$smartctl" -n never -l selftest "$drive" | grep "# 1 \\|Num" | cut -c6-
) >> "$logfile"
done
fi
sed -i '' -e '/smartctl 7.*/d' "$logfile"
sed -i '' -e '/smartctl 6.*/d' "$logfile"
sed -i '' -e '/smartctl 5.*/d' "$logfile"
sed -i '' -e '/smartctl 4.*/d' "$logfile"
sed -i '' -e '/Copyright/d' "$logfile"
sed -i '' -e '/=== START OF READ/d' "$logfile"
sed -i '' -e '/SMART Attributes Data/d' "$logfile"
sed -i '' -e '/Vendor Specific SMART/d' "$logfile"
sed -i '' -e '/SMART Error Log Version/d' "$logfile"
printf "%s\n" "</pre></body></html>
--${boundary}--" >> ${logfile}
### Send report ###
if [ -z "${email}" ]; then
echo "No email address specified, information available in ${logfile}"
else
sendmail -t -oi < "$logfile"
rm "$logfile"
fi

View File

@@ -1,92 +0,0 @@
#!/bin/sh
# Send UPS report to designated email address
# Reference: http://networkupstools.org/docs/developer-guide.chunked/apas01.html
### Parameters ###
# Specify your email address here:
email="truenas@{{ secret_email_domain }}"
# Set to a value greater than zero to include all available UPSC
# variables in the report:
senddetail=0
freenashost=$(hostname -s)
freenashostuc=$(hostname -s | tr '[:lower:]' '[:upper:]')
boundary="===== MIME boundary; FreeNAS server ${freenashost} ====="
logfile="/tmp/ups_report.tmp"
subject="UPS Status Report for ${freenashostuc}"
### Set email headers ###
printf "%s\n" "To: ${email}
Subject: ${subject}
Mime-Version: 1.0
Content-Type: multipart/mixed; boundary=\"$boundary\"
--${boundary}
Content-Type: text/html; charset=\"US-ASCII\"
Content-Transfer-Encoding: 7bit
Content-Disposition: inline
<html><head></head><body><pre style=\"font-size:14px; white-space:pre\">" >> ${logfile}
# Get a list of all ups devices installed on the system:
upslist=$(upsc -l "${freenashost}")
### Set email body ###
(
date "+Time: %Y-%m-%d %H:%M:%S"
echo ""
for ups in $upslist; do
ups_type=$(upsc "${ups}" device.type 2> /dev/null | tr '[:lower:]' '[:upper:]')
ups_mfr=$(upsc "${ups}" ups.mfr 2> /dev/null)
ups_model=$(upsc "${ups}" ups.model 2> /dev/null)
ups_serial=$(upsc "${ups}" ups.serial 2> /dev/null)
ups_status=$(upsc "${ups}" ups.status 2> /dev/null)
ups_load=$(upsc "${ups}" ups.load 2> /dev/null)
ups_realpower=$(upsc "${ups}" ups.realpower 2> /dev/null)
ups_realpowernominal=$(upsc "${ups}" ups.realpower.nominal 2> /dev/null)
ups_batterycharge=$(upsc "${ups}" battery.charge 2> /dev/null)
ups_batteryruntime=$(upsc "${ups}" battery.runtime 2> /dev/null)
ups_batteryvoltage=$(upsc "${ups}" battery.voltage 2> /dev/null)
ups_inputvoltage=$(upsc "${ups}" input.voltage 2> /dev/null)
ups_outputvoltage=$(upsc "${ups}" output.voltage 2> /dev/null)
printf "=== %s %s, model %s, serial number %s\n\n" "${ups_mfr}" "${ups_type}" "${ups_model}" "${ups_serial} ==="
echo "Name: ${ups}"
echo "Status: ${ups_status}"
echo "Output Load: ${ups_load}%"
if [ ! -z "${ups_realpower}" ]; then
echo "Real Power: ${ups_realpower}W"
fi
if [ ! -z "${ups_realpowernominal}" ]; then
echo "Real Power: ${ups_realpowernominal}W (nominal)"
fi
if [ ! -z "${ups_inputvoltage}" ]; then
echo "Input Voltage: ${ups_inputvoltage}V"
fi
if [ ! -z "${ups_outputvoltage}" ]; then
echo "Output Voltage: ${ups_outputvoltage}V"
fi
echo "Battery Runtime: ${ups_batteryruntime}s"
echo "Battery Charge: ${ups_batterycharge}%"
echo "Battery Voltage: ${ups_batteryvoltage}V"
echo ""
if [ $senddetail -gt 0 ]; then
echo "=== ALL AVAILABLE UPS VARIABLES ==="
upsc "${ups}"
echo ""
fi
done
) >> ${logfile}
printf "%s\n" "</pre></body></html>
--${boundary}--" >> ${logfile}
### Send report ###
if [ -z "${email}" ]; then
echo "No email address specified, information available in ${logfile}"
else
sendmail -t -oi < ${logfile}
rm ${logfile}
fi

View File

@@ -41,8 +41,6 @@ spec:
value: "3"
- name: HEALTHCHECK_PORT
value: "8080"
- name: WEBHOOK_URL
value: https://uptime-kuma.${SECRET_CLUSTER_DOMAIN}/api/push/45cHKtahUg?status=up&msg=OK&ping=
command:
- "/backup.sh"
volumeMounts:

View File

@@ -5,9 +5,3 @@ kind: Kustomization
namespace: default
resources:
- ./external-backup.yaml
configMapGenerator:
- name: postgres-external-backup
files:
- ./00-webhook
generatorOptions:
disableNameSuffixHash: true

View File

@@ -75,7 +75,6 @@ spec:
done
done
echo "INFO: Backup done"
curl -m 10 --retry 5 https://uptime-kuma.${SECRET_CLUSTER_DOMAIN}/api/push/Xk21W4T5mC?status=up&msg=OK&ping=
EOF
volumeMounts:
- name: secret

View File

@@ -51,6 +51,7 @@ resources:
- ./smtp-relay/ks.yaml
- ./tandoor/ks.yaml
- ./theme-park/ks.yaml
- ./truenas/ks.yaml
- ./unifi/ks.yaml
- ./uptime-kuma/ks.yaml
- ./vaultwarden/ks.yaml

View File

@@ -21,7 +21,7 @@ spec:
initContainers:
containers:
- name: transcode-incremental
image: ghcr.io/auricom/freac:1.1.6@sha256:17a3c42e7e678bc68ea154e3750cdca2bc702a6d03dc8918ef6ce86660633842
image: ghcr.io/auricom/freac:1.1.6@sha256:8cdd3f7020a16f11000292f2856e28628921f80a8697048802617f59bba5c93a
imagePullPolicy: IfNotPresent
env:
- name: TRANSCODE_INPUT_DIR

View File

@@ -29,5 +29,3 @@ curl -fsSL \
-H "Content-Type: text/xml" \
-H "Authorization: AWS ${AWS_ACCESS_KEY_ID}:${http_signature}" \
"${S3_URL}/${http_filepath}"
test $? -eq 0 && curl -m 10 --retry 5 https://uptime-kuma.${SECRET_CLUSTER_DOMAIN}/api/push/14g69yYBhu?status=up&msg=OK&ping=

View File

@@ -33,8 +33,6 @@ spec:
if [[ $QBITTORRENT_POD == *"qbittorrent"* ]]; then
kubectl cp /tmp/ipfilter.dat default/$QBITTORRENT_POD:/config/ipfilter.dat
kubectl rollout restart deployment qbittorrent --namespace default
curl http://uptime-kuma.default.svc.cluster.local.:3001/api/push/6RUDha9bDp?status=up&msg=OK&ping=
sleep 5
else
echo "qbittorrent deployment not found"
exit 1

View File

@@ -0,0 +1,90 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/helmrelease_v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: truenas-backup
namespace: default
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 1.2.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 3
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
controller:
type: cronjob
cronjob:
concurrencyPolicy: Forbid
schedule: "@daily"
restartPolicy: OnFailure
image:
repository: ghcr.io/auricom/kubectl
tag: 1.26.0@sha256:f512e3008d0492cbae7aac6eaccc21b13d723374715aaedd59d352d840f0229c
command: ["/bin/bash", "/app/truenas-backup.sh"]
env:
HOSTNAME: truenas
SECRET_DOMAIN: ${SECRET_DOMAIN}
SECRET_CLUSTER_DOMAIN: ${SECRET_CLUSTER_DOMAIN}
envFrom:
- secretRef:
name: truenas-backup-secret
service:
main:
enabled: false
persistence:
config:
enabled: true
type: configMap
name: truenas-backup-configmap
subPath: truenas-backup.sh
mountPath: /app/truenas-backup.sh
defaultMode: 0775
readOnly: true
ssh:
enabled: true
type: secret
name: truenas-backup-secret
subPath: SSH_KEY
mountPath: /opt/id_rsa
defaultMode: 0775
readOnly: true
additionalContainers:
truenas-remote-backup:
name: truenas-remote-backup
image: ghcr.io/auricom/kubectl:1.26.0@sha256:f512e3008d0492cbae7aac6eaccc21b13d723374715aaedd59d352d840f0229c
command: ["/bin/bash", "/app/truenas-backup.sh"]
env:
- name: HOSTNAME
value: truenas-remote
- name: SECRET_DOMAIN
value: ${SECRET_DOMAIN}
- name: SECRET_CLUSTER_DOMAIN
value: ${SECRET_CLUSTER_DOMAIN}
envFrom:
- secretRef:
name: truenas-backup-secret
volumeMounts:
- name: config
readOnly: true
mountPath: /app/truenas-backup.sh
subPath: truenas-backup.sh
- name: ssh
readOnly: true
mountPath: /opt/id_rsa
subPath: SSH_KEY

View File

@@ -0,0 +1,16 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./helmrelease.yaml
- ./secret.sops.yaml
configMapGenerator:
- name: truenas-backup-configmap
files:
- ./truenas-backup.sh
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled

View File

@@ -0,0 +1,30 @@
apiVersion: v1
kind: Secret
metadata:
name: truenas-backup-secret
namespace: default
type: Opaque
stringData:
AWS_ACCESS_KEY_ID: ENC[AES256_GCM,data:4Waq8U9rY/IsdzKInsJQGoXD1Q4=,iv:N05MKTKyY4LatzfPZS6Vke1dyZmYs0tOhU/O51K8mwQ=,tag:bQHdjgc5Xqg//PBOVuUccg==,type:str]
AWS_SECRET_ACCESS_KEY: ENC[AES256_GCM,data:JN6f87JOBaZVC5ue4aArSDrQ/NVe73vZZgmbXYeGAVcl4urzUbO4qA==,iv:i0RP/gidkJG7pccRVIT6FUd3IHm7Z5y2hnjSBqVwHLA=,tag:L688v2TfeIMnX7BNmA5kmA==,type:str]
SSH_KEY: ENC[AES256_GCM,data:bGIefWSzJHPFL98GrIjMz1ojanyrwf2qOVkfFHscBO3GAgjdBlkXk7JKWc5ykpPdMGbNd7G6XIjxw4fx2nj87RqiavRPRKwJu3VouzzBee75aGDqKrWRmZUhwOwliv/RS8Ipma8qTMDBNTfpHcAFHOW7kYYAe1gCYlWKum8KDjJ3G4YW0eApjSczyWsQ5ApzbiqAbOswCRvwa4dBzFw8rWVjUVpbqCA9iiql8x4agApY/tO2lrSsnnpv5URcoOD+qiXDOb+bkMDg/CKxyLCvcoQF1MtodjOazTgo1yFefGqD5Y4q3KFS70wLKkU6NK1WbfWz/s0hGt2M6RnUvxWKXmeX4AeSN/716/Kju9kZ07ddGq5A2okrMxqVOiVlOc/lMJICe3pANdfLE1/TNLQwSf5L5R18ulJ3Wj/wiB8FkFvqHeA28Hqtb+L2kiDa8nGbxr6qeOrOr8EuRdwogCKy56cU4w8Tjo6UVwfxyWFD+MuXlzYjxg90O5CRV44KfTV+tya2JyCIDY0K9/vmLYEDFtIWpIYLLEwBj6ZvomZbIFMJYK0BYo+G5E6xEi4IZzb00DIy6p/UUkDs1dLhMTdPigKMXrt0chX30Txw245rCQ5V/tzNBAvpsmglIsOyn7KihE+HUGJBTGfLgBDXkgplvz3wckq+axGWRDv8Z5/HN76xkucVE1ikTN5qRKY/xqi99fdmAXc6EhGbfQX4WvSyJFvtI1QUFy1Acdl55tQG9QsrFJ7Xd3ruhIkYNx6+IqvgvOZMOWRt5rvQ0b8VBZCMquNxETTRXNEg1ltCbgQ+mXKlX34gFBJWBzsfVdfosOSh9RKEBgrqSZ+wdH2pkjV4Ka8KO+NcDhap2VmDQWcA34LkX1825c53HjQScSfkgLR2Lty9cEwIqA78eI6gwx2Zw+TmFc6Jj1vktiNOudrzlwQphhh8ggNl9MagJbi8maiRIyb4xGsIIJdcp6kfHV/FBoYrb1SrDzmLSG/c01rrEr1/oL35EFncF+hn8MdBOXNcJSxdKVdLoF3/rjL/Lgz/U1gxgbCJekc5UwjlEsDmM1Mlwd0dXWJ94+djOXWRLyu/FhlJrMtA5YdCecm/x9d3wLS61zkYH5HU8HTKhkZgreRvcGEmT2SEgesA4MVLkY4cFeGaYNgSX2oneacl9eHyv0CBfXE/nTxDFZA/tzSSVs94TDe6o3tBcmANB9C4AcmBp+lVYfUdZf9ynRK6E4NAsJklpIWVLYxyWDNy83X/yctGQEXBVVwLK2Eq7Y/jbXKlWR9vBf4ZHaCDUO+eEtCWz/JoQrxv2zu0g42HSrwn9yD6WAM1CMOhGhR+5X6VfYZ6mIInOHB8cO2UXkRkzJRqkvK4FhEOswtVHlCWRUCEtxqYFCjuYiSun+EvdzCeP84MHjI64lPB1QhS0KXR+A9MgQiWFbvGTCasORvrlRRs+J30W1UI5UbhyNh07LIdH68EQmdq8TGCr6n7v9mEOIAbrrLLv6Wi/pJB5irsVQxO2p63xtj+lb33wYj+/2HLhq2z/1ElXbFbqkibWSz+Hp2ZYswlcthmSyCa7YSwacVVFYGoUyepvom9PcizeDcD+QEx68rXnlKo+ilWNFpJUybRVtoEkpKLAyJ7VA8SUmhVhAPCEz5IMvXPsCd5KkA0u2XGOc/McBjcOMGgCH8NvRAer3HBHjLq3aex41J4hQAAmmZUMAnomDj2V9kZsxzD5FnsH7+f026GUtVaN393dcS1zxCN8PI57dEVIBkUn+HcgbJ8rq+8A5kPBKAO+CX8HHDZ0zRoero7PT7nXxKhPWQ7VxNm/FYdhGP+N4FbbQkBwmK2nUT2MWwW/nFgHX95rCO+qA6uq9uW0zhW7ML4NHbIXXFcxHfa28A/CUSValk60cFsjf/TYk2n5aETVby9sLnYkvPDOJle52JVvtWrpnvpka0sh1/Ve3L92upX19pXv/0925F+4EdrCe1OPScAibMhNt90EiNAuyaWkAtycmdvlazfEUEcrkYA/KqQx+KXXyQcYDxvd7rbRScTv4GorlKLOc90kOd6eRgC+GW0g//XZCkHEdHL2WbY8mDuMSC+5O6ig8pqxwzT3efIlaS0asJ27Hnz0w9pzrktAJyz523SaLIujJKsQX7o8yQ89LDYeqtTZCGA1jOQybQGsGhGuOQOCkEzwRVeqpY/MbNwkAL2TOcbxpEDaVSMPw64ECTLYITJGd2/2WAUkdEjAegURamjvuYDliXQwuyOF4LriM+A0AdJm/BgbTR0sS8f6TKAqiDmEEWrG885V07i+TvxKT4K60bjqX/FzO10u+3LaV8tAvGbNR0VP/euxnt+mmNIQ2qEYdMiLGQP4Dc8wPb2vegMPDmSSTFIJ64zwe2MiWX55O/5IRNxPLEoi4m9FS7ArZgzELEX2N6ufrVScgLD+moYDEQfGO6XXUqNsCpiqf2IGzwiZDicjrp43Spwu4CyKdeZnlf89F+SY4jkYb9l0K7OwvVvYGpuPBQsI+O3t/Xt79l6VaaYwVaf5pOwxFPjmwmoYqdvl4CDv5XVnyIz+sz9ySvMtdrKN0Jv/TJ2Qh2MQaZqVDPIOnHuz8c+uIhn2qjPpKejnJ8RQv/421ZRfu/3K3INRBq7qI3vTA/3JyqxTWQ2MpwAZJzjw4qzeLfALqXv+WIrN/tbZJHw1luOPEW/E31Ipypri2ITRKJruyEbfASK/VmrTOYcV/dSgpyaxmGdWwQ6zQeEHgypPHIU1W/5WPg8QAPNF0uu7cy5TQHFVtdTwCeDQ4BPdMB5faCcaKoDxZ4dIEU7srGKi5hi0TyUw/5HSmry3H+e6VEXHhTw7TRrSImfwASuol1lavkZcEnGv4Moj7mIZ+/PqBwJvH+eP9d2Q0f+z2lWQ/kCl9R1IOsP4H8f51j2TUc302U/8Px/TrywCAn4L8qspyHJiC9xQvkRevqI/MjeA1em0a9bUrtO4z1FsJeWiSShDK0pQuXBmw3iGzHsye3iKsxDtT/4FGKc2BfhnzYd+58MEpZih1MaOLmaC6+s8HbwaH7awHnK4/wKx57IZsDQ4vl9jShZ2WqeDdFFGOuAE8tUmyficHPJZ9u/RcJXwm1AK3p7fX8Ekf133de7qWSsNMHPgQnGCG1NlPTccdpVX6qUB+EyZCS11NYuJk8834hOH2yT7UMzRKMquCtWor1fAH/q6RDTntMkuUH+M5R55EVdKuWBqhlx0Umd2B2p7n6izvi91X7OeRvwl3C1EBoKaDynf3hJTjKQ3ZrU4teOePAIrPy/Xk1qzIo48ELIDz7ZEz/ffIPalTc/wNSW+/h0Pogftm6YvCoVPWCyvwA4o6twJ9YKjXm8A5fizGwOfGua25gkZvn8HpVL1ZJyA7LJWKR4IUOi6989Q5/zJR15/X8piMGkItq0MKc7gcdiuqkjJilegn7c5uYmcRyRIFgEx37Ty6KMX2ljEGdOfbIi4xLETtjZ2DTr,iv:NkbvqlEf99WrgjBKF1vyl0kWxbsUcPzJmfTiiAsMUfI=,tag:3Okc7Dkh9bATeff8i2LQjw==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmMEhOUTJMcUYvNFozRDNr
WnhJTEYzVWN4V1VXemhtWWU1SmMvUmljNFFNCk91aHhXRVBDSzhhcjIzalQ5SEpN
cTJIOGVVYWNYRGdtMm5nZUZ5Q0EzTE0KLS0tIFRMYnNGakdrSktjT2ZoNk1sN21C
YlhlTVhRdDFJUVZiMTdtVXlveWNDWE0KG7MKLp5tUCm7KpuhpmsvAWDrreBuHSEp
zyH6hY1i7jgjh020qZI32zNDHeTIJhi+mHur/jvBJhEGLMz6JYUPrg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2023-01-02T20:30:20Z"
mac: ENC[AES256_GCM,data:O3rYI2l6/VbuxOD+uigagizpMzY6SIMXlu8sT2nWIDDp/7q1OLd8xilAKtTD85jYGbqFk5bluhyMiFdjq4sA9RZAPXoYY/l9RqMSBeR/gptUPAqK5qkYL9XX1AXbWuxziXIAtJYvyQuyTYeWPMsMNkmHNb1APxDWc0quUTfphjA=,iv:Tdvt08Qm6yD22YM9p0pQ/Gxfc4RAM9m9J0mBShAJ0X4=,tag:FgQxh1qBlVsfDRDCnmyyPA==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -0,0 +1,51 @@
#!/usr/bin/env bash
set -o nounset
set -o errexit
mkdir -p ~/.ssh
cp /opt/id_rsa ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
printf -v aws_access_key_id_str %q "$AWS_ACCESS_KEY_ID"
printf -v aws_secret_access_key_str %q "$AWS_SECRET_ACCESS_KEY"
printf -v secret_domain_str %q "$SECRET_DOMAIN"
ssh -o StrictHostKeyChecking=no root@${HOSTNAME}.${SECRET_DOMAIN} "/bin/bash -s $aws_access_key_id_str $aws_secret_access_key_str $secret_domain_str" << 'EOF'
set -o nounset
set -o errexit
AWS_ACCESS_KEY_ID=$1
AWS_SECRET_ACCESS_KEY=$2
SECRET_DOMAIN=$3
config_filename="$(date "+%Y%m%d-%H%M%S").tar"
http_host=truenas.${SECRET_DOMAIN}
http_request_date=$(date -R)
http_content_type="application/x-tar"
http_filepath="truenas/$(hostname)/${config_filename}"
http_signature=$(
printf "PUT\n\n${http_content_type}\n%s\n/%s" "${http_request_date}" "${http_filepath}" \
| openssl sha1 -hmac "${AWS_SECRET_ACCESS_KEY}" -binary \
| base64
)
echo "Creating backup archive ..."
tar -cvlf /tmp/backup-${config_filename} --strip-components=2 /data/freenas-v1.db /data/pwenc_secret
echo "Upload backup to s3 bucket ..."
curl -fsSL \
-X PUT -T "/tmp/backup-${config_filename}" \
-H "Host: ${http_host}" \
-H "Date: ${http_request_date}" \
-H "Content-Type: ${http_content_type}" \
-H "Authorization: AWS ${AWS_ACCESS_KEY_ID}:${http_signature}" \
"https://truenas.${SECRET_DOMAIN}:51515/${http_filepath}"
rm /tmp/backup-*.tar
EOF

View File

@@ -0,0 +1,105 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/helmrelease_v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: truenas-certs-deploy
namespace: default
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 1.2.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 3
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
controller:
type: cronjob
cronjob:
concurrencyPolicy: Forbid
schedule: "@daily"
restartPolicy: OnFailure
image:
repository: ghcr.io/auricom/kubectl
tag: 1.26.0@sha256:f512e3008d0492cbae7aac6eaccc21b13d723374715aaedd59d352d840f0229c
command: ["/bin/bash", "/app/truenas-certs-deploy.sh"]
env:
HOSTNAME: truenas
TRUENAS_HOME: /mnt/storage/home/homelab
SECRET_DOMAIN: ${SECRET_DOMAIN}
CERTS_DEPLOY_S3_ENABLED: "True"
envFrom:
- secretRef:
name: truenas-certs-deploy-secret
service:
main:
enabled: false
persistence:
config:
enabled: true
type: configMap
name: truenas-certs-deploy-configmap
subPath: truenas-certs-deploy.sh
mountPath: /app/truenas-certs-deploy.sh
defaultMode: 0775
readOnly: true
config-python:
enabled: true
type: configMap
name: truenas-certs-deploy-configmap
subPath: truenas-certs-deploy.py
mountPath: /app/truenas-certs-deploy.py
defaultMode: 0775
readOnly: true
ssh:
enabled: true
type: secret
name: truenas-certs-deploy-secret
subPath: SSH_KEY
mountPath: /opt/id_rsa
defaultMode: 0775
readOnly: true
additionalContainers:
truenas-remote-certs-deploy:
name: truenas-remote-certs-deploy
image: ghcr.io/auricom/kubectl:1.26.0@sha256:f512e3008d0492cbae7aac6eaccc21b13d723374715aaedd59d352d840f0229c
command: ["/bin/bash", "/app/truenas-certs-deploy.sh"]
env:
- name: HOSTNAME
value: truenas-remote
- name: TRUENAS_HOME
value: /mnt/vol1/home/homelab
- name: SECRET_DOMAIN
value: ${SECRET_DOMAIN}
- name: CERTS_DEPLOY_S3_ENABLED
value: "True"
envFrom:
- secretRef:
name: truenas-certs-deploy-secret
volumeMounts:
- name: config
readOnly: true
mountPath: /app/truenas-certs-deploy.sh
subPath: truenas-certs-deploy.sh
- name: config-python
readOnly: true
mountPath: /app/truenas-certs-deploy.py
subPath: truenas-certs-deploy.py
- name: ssh
readOnly: true
mountPath: /opt/id_rsa
subPath: SSH_KEY

View File

@@ -0,0 +1,17 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./helmrelease.yaml
- ./secret.sops.yaml
configMapGenerator:
- name: truenas-certs-deploy-configmap
files:
- ./truenas-certs-deploy.sh
- ./truenas-certs-deploy.py
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled

View File

@@ -0,0 +1,31 @@
apiVersion: v1
kind: Secret
metadata:
name: truenas-certs-deploy-secret
namespace: default
type: Opaque
stringData:
TRUENAS_API_KEY: ENC[AES256_GCM,data:0B0eF5hqqwDuv61BFxirXqxrIEtABYCRnHv97XiiyIEEKM2+DH/L0VknFczxEZIbdhERip30is4irI8mUhJOT9S2,iv:JlHKJhRd/UPJh354GyUftnrFBHLZLhIRGSfYbxKriCs=,tag:njMr8GG+YCjKpZvK3pFWsQ==,type:str]
PUSHOVER_API_KEY: ENC[AES256_GCM,data:cyk9BKRm/sSP9/y58+P1T6KMog+FqD/088NFgJ9E,iv:4d9NorzBh+XpvV0oAk6eC+d5adcDkoqwpg/iX1tI6J0=,tag:PAWmAMz6p6wXjTtMSBeJwQ==,type:str]
PUSHOVER_USER_KEY: ENC[AES256_GCM,data:TDSEIhc63jIoquDRBAeU987nfDHIhrmie41m5iA/,iv:3pHGEh9tJgeBr0B6DIT0sKtfedEZSXkAsFd+7oaIb2U=,tag:6SMb0MQzXfQNNlGsVbr3AA==,type:str]
SSH_KEY: ENC[AES256_GCM,data:/u/tzgpNhYeTfO3Y1V5DsXXWHcnpGCpmSQmcHmhtiQHeuWJJyCy9cObf6Hm8BQjmYPSnUwCGMC1GQM8rjHBiWDXVMK5IXiQlpd5gFxxH8LK8DjfMy8hiNLEe40dkYB044mfGJbyfBEK3AaGURVHlgfzGQiXMWbqSL9BGGr3hhvfuWyrpFxJSDC8CmJ0Swu9Fw8J6LliTwMG1LQAJUNd8ZJkV5qFpbHG0LDlYYI2ebjQspS8tsQuwsE4ewPrVeD2YDY81klcFnshHLD1fR7HHCNtEpMUfV9lstDVqm12JAFWGN0wFoxDR5C+JBZLgEtFwjq2hE6OZ858D5h+jwCYEVmrW3hWVK7aTYjoSmPxDaiZ5Ro7YcXSilMuSSi3IacS3iWQP6VXFfKDZTD4C527ZlJRgMQFKAvGb6FcJj2NhpcmMM9fDLsSF4VFsEYBkkp/GM+TZHqeIp+kOFvNcDwZwgqwgfu8VCB0ogwpvyTr8rpdoS8phH6P8hFO5Nzxx3HEV3cHIbiXFOrDFO2xzM1YdaAJs2sOvVm5+uTl/XJqg6kXLCzoe1LZAoK4MIVFCk2U2rItGN05wLPUTMopJuEUvHxrCg79AQToIZCD2v9Xl7HucyXR386yuL9VrS341Euc8bPELDwPNgJLxnsGRDp8xUN1CsvZWxcVxpw4k+jdXHZKd110WMKcfUMaYcKPxupdu/qDqvR6DhpywpBxPhgJL1/f8V7T09t2KCdUa81rwTsPVuK9B1H3Q/YYc8h99nBUZaYrIQk+WQtbgKYvzz204I7lev+lliPkie7H6umDWw3NADoOQqvF84kxAfO4jUbvTIeLeFSik6p0RNN3CdTnK5hNEdtpbk4+KuHSw6WBB9aTFcm3JkHGZsEuYVXWNoEgbIEjL17JLXm2FV0kNJil+vbQ5qcan5H7aKm1vcHgXylDGmKPU2QzSpXSSSwTMxOeAKGrPVIusT+gpqw22+YHa/kS2trz0XrPt/rXY4SDAXcjSNSzS5WcnvVX5v7DioGHo8/emYY9XEjML2iig1mxxyaP71GuWvzmITPWQHM3iJvXPywwgki1UiZqN+3WsZUi2zFrGJP/VXuL+8lCKvwZmRg0ACK+TeAenZsSSr3AdiKKbpriGHeqjjnSUvMmx6DUNIYdrxc9gNXBQ4tnNMN+pYBDj2jeiSIx2pb2derhOMGKyBCn3vfFylp2ljp7xJ39+N8fTlB7oTDQRCXmW/CR6bHd41/DeP9Hli8D0iYq0toM2oxby/eXPr/+I7wOZU23CKi/kQssxdn5XnlAGV0j7moF3ys3q7qFWesRQw1iYsS+dIvSr714u1NJXvE0nU6v5Vv64s22g+AC1FrWlsOdSo3CDLc1KctIuuFclyxI4mIekQk3iOKl/4a6XK/suOhmyzWHEKlq3LhbHZEA/maMOsKU//tX0uA+asOBLQJPtixwuQ9ZE0vpr4uL0LRJjpnYk09ktuE2YerQu6pGkMBt7uQnpWSzAlO2+3jPducXUdft9MXYM2jaK+PoUuCLUNegeqcpzF3KnGT9zDfbR15abg9nrY1Gv4cHlNN94JFxD2Z9qYBnNHBmG+Pdkq5xYDOcSmC3AV1IF+OAY+IYbb2BAUVy2JvqJal2mvGnlgOSVxtaA51VOkov8Bd0XwMUC/QDaj+CxMS/uDIDUsg6qbuw4dg+HjDVYhlnc6YElwET6LBoLkS5SIX+8W8XoVDAoTapOlrSXDo/elRW/WY4TJ4MEdW0xasjDuCxNDCpdmzbGsNUbpXQqDSz3sJvEggri0Q5ShGBJ57XCNEbXO/ZjlzBE1eN1bVjKkAbNrdj89NHpwFJiUUwiqhJmMFt1lbCdA9ihpKsuUyF7jBwnOdVnSLqvcL+U2WG6xWfJHTyoMpRFlaJJfchc3Nv3upOajk1rPFCdEK0vztAYinN8ldieKOz1bSJL8/RomSyjWJ3CeyyQYAODZn8KNf8E8YqapENbHA4Mj687NvaMxXl+sRxp+uprbZ/KDY609vEuub/46q0S5yddd3VAUrXX1x+leeRusGqXR0I9iwyaYSXpZO7fyUm1762o71fQHYgXcvco2NmDCH8AeFO0/Dc6bm8n//2g5XUg0ej+o1YZQaQ+plDjM9pHX33/BOvXDOJ7MoQMGIHadv9tS+USaLKNI45Z4tklIKGzkNh5q6JCrIGnja76ncaLGGOJpxG2tuQh7CATdSCWtDOJTLYDa84kWLH1lkXJeLkYGdTrKDpjNZnUQkggVyKisK9cSlnsUNsW+xwl2fKvuoNjm0wrBXN4MkUdLbbGBFt5CbZbyzz3f4et8twQ77TT6KJ5hORj/D3FFfM8LCDS/WQYQ9mHEZjWhtIfIRphSt192I/6hHuhHb7nl+ZO/SKoqfKN7Z32OUIR/gc6tcD+CYC02EpNgqZw9Lacq+zIAvY2m5KUbjaX/ddjI6OE7WyrBSArjtr/o1Vrto8xBZ5eHbbauFTFHN/QuadfU/VHDLUM2KQoWw1luYMTQBpyhK/ZCnOcU6hhJfWyMBY7lHBZBc44iu1ntyc+BPXL5kg9RoA832vNJAqt+1TGllnT1l6rd29+evfW7LIVLwQKVfCcR7DNX9NJYF8LDz2TCL7633udmoJC4bDCIW4mt1D5ITItat3AHSLruYyFPrUx4GwWGL4SKZTQxqpVxvj8mQC77h8WbkfH2csU6hFr7yCjVMpg7RSax942GHUPZzJnHmjgbgyNnats/ZgKjWxlVi/D7bUDxcYGOAHwjodyidGZbCiQckxkwF8brcfLM4VcgYfZ4Xe2tcMFLQML4id8WTgpnCv+jnSmOxn0IJog4SQ1qxPScKCAQL78SKCCeG67GTmcMbBPXhKTf7AQTpCIAUPSeAiR5rvNv5TZjIXzWYu6SP0Cw8+NxkENaS5RZsXtjVJTGnL3UJjVEQw9BC5qjXta2n8R8X1ELDB6qwjdDy3U5hbrdwEyjV8h5TJRe3xcYcMrKicmLfG6bhdcx1SpiCiI9M3hpc32S2Dn8FCoYFlP4tsuzT4b2VcaOZAlkLahchNbTTIMlehzJnGkXX1HwCh+c+fSD/apiONh49VPbVOAvV/rsSjn15AemL25MrP76evzIZwfrYdsZkGyq+sT2UyO8sGhu9U08bJkdC+1saQzSam75AWvV2dS7/HLefsMoezSLurhkr1jGjVsxaRXM9UCWxRYP2a4HHT6fHXdUGP9dHyEv8E/B5RpvD/HzNr8udxHdq7SgXhoEHMqWOKPtNVkXlqttlAWbcoWDnJaOBH1/50+DZYuZ9bmF+X5qk/TM2jyhQIyekLSw1quOdnfx5OM8K9ZmgUEqyJ2LibXW9yZWoth/Pjq7sFIkC8YSneWiFRk7uOFZjcfnoItHrjghW5tK7diluj97+O3p2FqtV9pqB2UhyPAeIRSmWFxhPj5dornJVIQL/Zr2Jv911HICP2G7wQnsKv54Fnp4t+9ZjhJzkTh4IFK,iv:vF3GSh82JgjFVTTkTJrxu142JQGIF1/1r9b1yfcDXGE=,tag:rf0/VoDl2vKwL9gwepX4rg==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmMEhOUTJMcUYvNFozRDNr
WnhJTEYzVWN4V1VXemhtWWU1SmMvUmljNFFNCk91aHhXRVBDSzhhcjIzalQ5SEpN
cTJIOGVVYWNYRGdtMm5nZUZ5Q0EzTE0KLS0tIFRMYnNGakdrSktjT2ZoNk1sN21C
YlhlTVhRdDFJUVZiMTdtVXlveWNDWE0KG7MKLp5tUCm7KpuhpmsvAWDrreBuHSEp
zyH6hY1i7jgjh020qZI32zNDHeTIJhi+mHur/jvBJhEGLMz6JYUPrg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2023-01-02T20:30:02Z"
mac: ENC[AES256_GCM,data:Zl15uw3w7dLj+XWyevM6RsPBD8K7I6G4DQMROt47fcIhVxsoINl2/2r9nuOeICP7n+gQpKIX4OhZnxowUoU+YAwBPYOg6Ez3oT3DeSHXJxANA3mZ5PExd1Ius4nQNAnFJFNDI6rEF6onGQjhO1tw5bvwPqyjfBIRtsIXj9u9VZo=,iv:IXC7V/ejYG4lb2xKG1ZtnrIDqeIpzaNR8Wh/MdQ05RM=,tag:aq+3ZCRWZQtFv0U6b4G8VA==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -27,34 +27,17 @@ from datetime import datetime, timedelta
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
parser = argparse.ArgumentParser(description='Import and activate a SSL/TLS certificate into FreeNAS.')
parser.add_argument('-c', '--config', default=(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'deploy_config')), help='Path to config file, defaults to deploy_config.')
args = parser.parse_args()
API_KEY = os.getenv('CERTS_DEPLOY_API_KEY')
if os.path.isfile(args.config):
config = configparser.ConfigParser()
config.read(args.config)
deploy = config['deploy']
else:
print("Config file", args.config, "does not exist!")
exit(1)
# We'll use the API key if provided
API_KEY = deploy.get('api_key')
# Otherwise fallback to basic password authentication
USER = "root"
PASSWORD = deploy.get('password')
DOMAIN_NAME = deploy.get('cert_fqdn',socket.gethostname())
FREENAS_ADDRESS = deploy.get('connect_host','localhost')
VERIFY = deploy.getboolean('verify',fallback=False)
PRIVATEKEY_PATH = deploy.get('privkey_path',"/root/.acme.sh/" + DOMAIN_NAME + "/" + DOMAIN_NAME + ".key")
FULLCHAIN_PATH = deploy.get('fullchain_path',"/root/.acme.sh/" + DOMAIN_NAME + "/fullchain.cer")
PROTOCOL = deploy.get('protocol','http://')
PORT = deploy.get('port','80')
FTP_ENABLED = deploy.getboolean('ftp_enabled',fallback=False)
S3_ENABLED = deploy.getboolean('s3_enabled',fallback=False)
DOMAIN_NAME = socket.gethostname()
TRUENAS_ADDRESS = 'localhost'
VERIFY = False
PRIVATEKEY_PATH = os.getenv('CERTS_DEPLOY_PRIVATE_KEY_PATH')
FULLCHAIN_PATH = os.getenv('CERTS_DEPLOY_FULLCHAIN_PATH')
PROTOCOL = 'http://'
PORT = '80'
FTP_ENABLED = bool(os.getenv('CERTS_DEPLOY_FTP_ENABLED', ''))
S3_ENABLED = bool(os.getenv('CERTS_DEPLOY_S3_ENABLED', ''))
now = datetime.now()
cert = "letsencrypt-%s-%s-%s-%s" %(now.year, now.strftime('%m'), now.strftime('%d'), ''.join(c for c in now.strftime('%X') if
c.isdigit()))
@@ -69,10 +52,14 @@ if API_KEY:
session.headers.update({
'Authorization': f'Bearer {API_KEY}'
})
elif PASSWORD:
session.auth = (USER, PASSWORD)
else:
print ("Unable to authenticate. Specify 'api_key' or 'password' in the config.")
print ("Unable to authenticate. Specify 'CERTS_DEPLOY_API_KEY' in the os Env.")
exit(1)
if not PRIVATEKEY_PATH:
print ("Unable to find private key. Specify 'CERTS_DEPLOY_PRIVATE_KEY_PATH' in the os Env.")
exit(1)
if not FULLCHAIN_PATH:
print ("Unable to find private key. Specify 'CERTS_DEPLOY_FULLCHAIN_PATH' in the os Env.")
exit(1)
# Load cert/key
@@ -83,7 +70,7 @@ with open(FULLCHAIN_PATH, 'r') as file:
# Update or create certificate
r = session.post(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/certificate/',
PROTOCOL + TRUENAS_ADDRESS + ':' + PORT + '/api/v2.0/certificate/',
verify=VERIFY,
data=json.dumps({
"create_type": "CERTIFICATE_CREATE_IMPORTED",
@@ -106,7 +93,7 @@ time.sleep(5)
# Download certificate list
limit = {'limit': 0} # set limit to 0 to disable paging in the event of many certificates
r = session.get(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/certificate/',
PROTOCOL + TRUENAS_ADDRESS + ':' + PORT + '/api/v2.0/certificate/',
verify=VERIFY,
params=limit
)
@@ -134,7 +121,7 @@ if not new_cert_data:
# Set our cert as active
r = session.put(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/system/general/',
PROTOCOL + TRUENAS_ADDRESS + ':' + PORT + '/api/v2.0/system/general/',
verify=VERIFY,
data=json.dumps({
"ui_certificate": cert_id,
@@ -151,7 +138,7 @@ else:
if FTP_ENABLED:
# Set our cert as active for FTP plugin
r = session.put(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/ftp/',
PROTOCOL + TRUENAS_ADDRESS + ':' + PORT + '/api/v2.0/ftp/',
verify=VERIFY,
data=json.dumps({
"ssltls_certfile": cert,
@@ -168,7 +155,7 @@ if FTP_ENABLED:
if S3_ENABLED:
# Set our cert as active for S3 plugin
r = session.put(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/s3/',
PROTOCOL + TRUENAS_ADDRESS + ':' + PORT + '/api/v2.0/s3/',
verify=VERIFY,
data=json.dumps({
"certificate": cert_id,
@@ -205,7 +192,7 @@ if cert_id in cert_ids_same_san:
# Delete expired and old certificates with same SAN from freenas
for cid in (cert_ids_same_san | cert_ids_expired):
r = session.delete(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/certificate/id/' + str(cid),
PROTOCOL + TRUENAS_ADDRESS + ':' + PORT + '/api/v2.0/certificate/id/' + str(cid),
verify=VERIFY
)
@@ -224,7 +211,7 @@ for cid in (cert_ids_same_san | cert_ids_expired):
# If everything goes right, the request fails with a ConnectionError
try:
r = session.post(
PROTOCOL + FREENAS_ADDRESS + ':' + PORT + '/api/v2.0/system/general/ui_restart',
PROTOCOL + TRUENAS_ADDRESS + ':' + PORT + '/api/v2.0/system/general/ui_restart',
verify=VERIFY
)

View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -o nounset
set -o errexit
mkdir -p ~/.ssh
cp /opt/id_rsa ~/.ssh/id_rsa
chmod 600 ~/.ssh/id_rsa
printf -v truenas_api_key %q "$TRUENAS_API_KEY"
printf -v cert_deploy_s3_enabled_str %q CERTS_DEPLOY_S3_ENABLED
printf -v pushover_api_key_str %q "$PUSHOVER_API_KEY"
printf -v pushover_user_key_str %q "$PUSHOVER_USER_KEY"
printf -v secret_domain_str %q "$SECRET_DOMAIN"
scp -o StrictHostKeyChecking=no /app/truenas-certs-deploy.py homelab@${HOSTNAME}.${SECRET_DOMAIN}:${TRUENAS_HOME}/scripts/certificates_deploy.py
ssh -o StrictHostKeyChecking=no homelab@${HOSTNAME}.${SECRET_DOMAIN} "/bin/bash -s $truenas_api_key $cert_deploy_s3_enabled_str $pushover_api_key_str $pushover_user_key_str $secret_domain_str" << 'EOF'
set -o nounset
set -o errexit
PUSHOVER_API_KEY=$3
PUSHOVER_USER_KEY=$4
SECRET_DOMAIN=$5
# Variables
TARGET=$(hostname)
DAYS=21
CERTIFICATE_PATH="${HOME}/letsencrypt/${SECRET_DOMAIN}"
CONFIG_PATH="${HOME}/scripts"
export CERTS_DEPLOY_API_KEY=$1
export CERTS_DEPLOY_PRIVATE_KEY_PATH
export CERTS_DEPLOY_FULLCHAIN_PATH
export CERTS_DEPLOY_S3_ENABLED=$2
# Check if cert is older than 69 days
result=$(find ${CERTIFICATE_PATH}/cert.pem -mtime +69)
if [[ "$result" == "${CERTIFICATE_PATH}/cert.pem" ]]; then
echo "ERROR - Certificate is older than 69 days"
echo "ERROR - Verify than it has been renewed by ACME client on opnsense and that the upload automation has been executed"
curl -s \
--form-string "token=${PUSHOVER_API_KEY}" \
--form-string "user=${PUSHOVER_USER_KEY}" \
--form-string "message=Certificate on $TARGET is older than 69 days. Verify than it has been renewed by ACME client on opnsense and that the upload automation has been executed" \
https://api.pushover.net/1/messages.json
else
echo "checking if $TARGET expires in less than $DAYS days"
openssl x509 -checkend $(( 24*3600*$DAYS )) -noout -in <(openssl s_client -showcerts -connect $TARGET:443 </dev/null 2>/dev/null | openssl x509 -outform PEM)
if [ $? -ne 0 ]; then
echo "INFO - Certificate expires in less than $DAYS days"
echo "INFO - Deploying new certificate"
# Deploy certificate (truenas UI & minio)
python ${SCRIPT_PATH}/certificates_deploy.py
else
echo "INFO - Certificate expires in more than $DAYS"
fi
fi
EOF

View File

@@ -0,0 +1,69 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomization_v1beta2.json
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: cluster-apps-truenas-backup
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
path: ./kubernetes/apps/default/truenas/backup
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: truenas-backup
namespace: default
interval: 30m
retryInterval: 1m
timeout: 3m
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomization_v1beta2.json
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: cluster-apps-truenas-certs-deploy
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
path: ./kubernetes/apps/default/truenas/certs-deploy
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: truenas-certs-deploy
namespace: default
interval: 30m
retryInterval: 1m
timeout: 3m
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomization_v1beta2.json
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: cluster-apps-truenas-minio-rclone
namespace: flux-system
labels:
substitution.flux.home.arpa/enabled: "true"
spec:
path: ./kubernetes/apps/default/truenas/minio-rclone
prune: true
sourceRef:
kind: GitRepository
name: home-ops-kubernetes
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
name: truenas-minio-rclone
namespace: default
interval: 30m
retryInterval: 1m
timeout: 3m

View File

@@ -0,0 +1,58 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/helmrelease_v2beta1.json
apiVersion: helm.toolkit.fluxcd.io/v2beta1
kind: HelmRelease
metadata:
name: truenas-minio-rclone
namespace: default
spec:
interval: 15m
chart:
spec:
chart: app-template
version: 1.2.0
sourceRef:
kind: HelmRepository
name: bjw-s
namespace: flux-system
maxHistory: 3
install:
createNamespace: true
remediation:
retries: 3
upgrade:
cleanupOnFail: true
remediation:
retries: 3
uninstall:
keepHistory: false
values:
controller:
type: cronjob
cronjob:
concurrencyPolicy: Forbid
schedule: "15 0 * * *"
restartPolicy: OnFailure
image:
repository: ghcr.io/auricom/rclone
tag: 1.60.1@sha256:c581a148e7f93a4ddb4fed421ea123028ff9761ddfc84b2e4a32f8de08af1c23
command: ["/bin/bash", "/app/minio-rclone.sh"]
service:
main:
enabled: false
persistence:
config:
enabled: true
type: configMap
name: truenas-minio-rclone-configmap
subPath: minio-rclone.sh
mountPath: /app/minio-rclone.sh
defaultMode: 0775
readOnly: true
age:
enabled: true
type: secret
name: truenas-minio-rclone-secret
subPath: AGE_KEY
mountPath: /app/age_key
readOnly: true

View File

@@ -0,0 +1,16 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- ./helmrelease.yaml
- ./secret.sops.yaml
configMapGenerator:
- name: truenas-minio-rclone-configmap
files:
- ./minio-rclone.sh
generatorOptions:
disableNameSuffixHash: true
annotations:
kustomize.toolkit.fluxcd.io/substitute: disabled

View File

@@ -0,0 +1,20 @@
#!/usr/bin/env bash
set -o nounset
set -o errexit
echo "Download rclone config file ..."
curl -fsSL \
--output "/tmp/rclone.conf.age" \
"https://raw.githubusercontent.com/auricom/dotfiles/main/private_dot_config/rclone/encrypted_private_rclone.conf.age"
echo "Decrypt rclone config file ..."
age --decrypt \
-i /app/age_key \
/tmp/rclone.conf.age > /tmp/rclone.conf
echo "Sync minio buckets with encrypted remote gdrive-homelab-backups ..."
rclone --config /tmp/rclone.conf sync minio: gdrive-homelab-backups:

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: Secret
metadata:
name: truenas-minio-rclone-secret
namespace: default
type: Opaque
stringData:
AGE_KEY: ENC[AES256_GCM,data:4xNBIadPDtcizBd02RW/JN1KiOIwkED4NtXAvuI6hxaOOzpfWh8hC2jrn8MLej0e+yXEcODe0KCUsx4p+GQEARSqOvrFWJ96XgoC1batFUmzGk8/WGdbaGt+zXxwsAPpJeEIYElPqy/XLgu+k1xdc/vvN78+RPnRXEWoxbSXonxuy9DJg1VQVaP2V9lKnHcIlYtQaz2xtdTBhOVAyaVKJxo11ievv96ZFY7eyX2YmaBtOfmU9pNH9InYqU+L,iv:ahXvBl2CgjOxB6MmcjMXBryf+MwahtII/NTxYIFa3DQ=,tag:+AriTfQEhOrfJCRnfes/Cw==,type:str]
sops:
kms: []
gcp_kms: []
azure_kv: []
hc_vault: []
age:
- recipient: age1hhurqwmfvl9m3vh3hk8urulfzcdsrep2ax2neazqt435yhpamu3qj20asg
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmMEhOUTJMcUYvNFozRDNr
WnhJTEYzVWN4V1VXemhtWWU1SmMvUmljNFFNCk91aHhXRVBDSzhhcjIzalQ5SEpN
cTJIOGVVYWNYRGdtMm5nZUZ5Q0EzTE0KLS0tIFRMYnNGakdrSktjT2ZoNk1sN21C
YlhlTVhRdDFJUVZiMTdtVXlveWNDWE0KG7MKLp5tUCm7KpuhpmsvAWDrreBuHSEp
zyH6hY1i7jgjh020qZI32zNDHeTIJhi+mHur/jvBJhEGLMz6JYUPrg==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2023-01-02T22:33:41Z"
mac: ENC[AES256_GCM,data:DLH8O96zF76gLpyPBoN4vJz3iFfLTlJVovM5URp1LtaN3JxlMGoldhsbeCTWK2O90TTkzAh6BB+2nWa4yEx+VL1pOD8XSYDz5qZS3EpQ5Gf4yr9qSziSg/uLuw39T2OxQkWw5FVCK1mzbF+Pw7IUIasUQFDmM2xBiuYH4M2OYyI=,iv:481eBWmOpRB74G1y4ntMqHS2+DKC0+OOtOEO8eKspfA=,tag:/Be7ik2B+Ya9k9cQH3iVZw==,type:str]
pgp: []
encrypted_regex: ^(data|stringData)$
version: 3.7.3

View File

@@ -0,0 +1,128 @@
# truenas
## truenas-backup S3 Configuration
1. Create `~/.mc/config.json`
```json
{
"version": "10",
"aliases": {
"minio": {
"url": "https://s3.<domain>",
"accessKey": "<access-key>",
"secretKey": "<secret-key>",
"api": "S3v4",
"path": "auto"
}
}
}
```
2. Create the truenas user and password
```sh
mc admin user add minio truenas <super-secret-password>
```
3. Create the truenas bucket
```sh
mc mb minio/truenas
```
4. Create `truenas-user-policy.json`
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:ListBucket",
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject"
],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::truenas/*", "arn:aws:s3:::truenas"],
"Sid": ""
}
]
}
```
5. Apply the bucket policies
```sh
mc admin policy add minio truenas-private truenas-user-policy.json
```
6. Associate private policy with the user
```sh
mc admin policy set minio truenas-private user=truenas
```
7. Create a retention policy
```sh
mc ilm add minio/truenas --expire-days "90"
```
## minio-rclone S3 Configuration
1. Create `~/.mc/config.json`
```json
{
"version": "10",
"aliases": {
"minio": {
"url": "https://s3.<domain>",
"accessKey": "<access-key>",
"secretKey": "<secret-key>",
"api": "S3v4",
"path": "auto"
}
}
}
```
2. Create the rclone user and password
```sh
mc admin user add minio rclone <super-secret-password>
```
3. Create `rclone-user-policy.json`
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:ListBucket",
"s3:GetObject"
],
"Effect": "Allow",
"Resource": ["arn:aws:s3:::opnsense/*", "arn:aws:s3:::opnsense","arn:aws:s3:::truenas/*", "arn:aws:s3:::truenas"],
"Sid": ""
}
]
}
```
4. Apply the bucket policies
```sh
mc admin policy add minio rclone-private rclone-user-policy.json
```
5. Associate private policy with the user
```sh
mc admin policy set minio rclone-private user=rclone
```