File: cloud-init-script.go

package info (click to toggle)
packer 1.6.6%2Bds1-2
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 32,016 kB
  • sloc: sh: 1,154; python: 619; makefile: 251; ruby: 205; xml: 97
file content (81 lines) | stat: -rw-r--r-- 1,928 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
// CODE GENERATED. DO NOT EDIT
package yandexexport

var (
	CloudInitScript = `#!/usr/bin/env bash

GetMetadata() {
    curl -f -H "Metadata-Flavor: Google" http://169.254.169.254/computeMetadata/v1/instance/attributes/$1 2>/dev/null
}

[[ "$(GetMetadata debug)" == "1"  || "$(GetMetadata debug)" == "true" ]] && set -x

InstallPackages() {
    sudo apt-get update -qq && sudo apt-get install -y qemu-utils awscli
}

WaitFile() {
    local RETRIES=60
    while [[ ${RETRIES} -gt 0 ]]; do
        echo "Wait ${1}"
        if [ -e "${1}" ]; then
            echo "[${1}] has been found"
            return 0
        fi
        RETRIES=$((RETRIES-1))
        sleep 5
    done
    echo "[${1}] not found"
    return 1
}

PATHS=$(GetMetadata paths)
S3_ENDPOINT="https://storage.yandexcloud.net"
DISK_EXPORT_PATH="/dev/disk/by-id/virtio-doexport"
export AWS_SHARED_CREDENTIALS_FILE="/tmp/aws-credentials"
export AWS_REGION=ru-central1

Exit() {
    for i in ${PATHS}; do
        LOGDEST="${i}.exporter.log"
        echo "Uploading exporter log to ${LOGDEST}..."
        aws s3 --endpoint-url="${S3_ENDPOINT}" cp /var/log/syslog "${LOGDEST}"
    done

    exit $1
}

InstallPackages

echo "####### Export configuration #######"
echo "Export paths - ${PATHS}"
echo "####################################"

if ! WaitFile "${AWS_SHARED_CREDENTIALS_FILE}"; then
    echo "Failed wait credentials"
    Exit 1
fi
udevadm trigger || true

if ! WaitFile "${DISK_EXPORT_PATH}"; then
    echo "Failed wait attach disk"
    Exit 1
fi

echo "Dumping disk..."
if ! qemu-img convert -O qcow2 -o cluster_size=2M "${DISK_EXPORT_PATH}" disk.qcow2; then
    echo "Failed to dump disk to qcow2 image."
    Exit 1
fi

for i in ${PATHS}; do
    echo "Uploading qcow2 disk image to ${i}..."
    if ! aws s3 --endpoint-url="${S3_ENDPOINT}" cp disk.qcow2 "${i}"; then
        echo "Failed to upload image to ${i}."
        FAIL=1
    fi
done

Exit ${FAIL}
`
)