usr/bin/cloud-init000075500000000472147207317430010135 0ustar00#!/usr/bin/python2 # EASY-INSTALL-ENTRY-SCRIPT: 'cloud-init==19.4','console_scripts','cloud-init' __requires__ = 'cloud-init==19.4' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('cloud-init==19.4', 'console_scripts', 'cloud-init')() ) ds-identify000075500000134206147221035500006716 0ustar00#!/bin/sh # shellcheck disable=2015,2039,2162,2166 # # ds-identify is configured via /etc/cloud/ds-identify.cfg # or on the kernel command line. It takes the following inputs: # # datasource: can specify the datasource that should be used. # kernel command line option: ci.datasource= or ci.ds= # example line in /etc/cloud/ds-identify.cfg: # datasource: Ec2 # # policy: a string that indicates how ds-identify should operate. # # The format is: # ,found=value,maybe=value,notfound=value # default setting is: # search,found=all,maybe=all,notfound=disabled # # kernel command line option: ci.di.policy= # example line in /etc/cloud/ds-identify.cfg: # policy: search,found=all,maybe=none,notfound=disabled # # # Mode: # disabled: disable cloud-init # enabled: enable cloud-init. # ds-identify writes no config and just exits success. # the caller (cloud-init-generator) then enables cloud-init to # run just without any aid from ds-identify. # search: determine which source or sources should be used # and write the result (datasource_list) to # /run/cloud-init/cloud.cfg # report: basically 'dry run' for search. results are still written # to the file, but are namespaced under the top level key # 'di_report' Thus cloud-init is not affected, but can still # see the result. # # found,maybe,notfound: # found: (default=all) # first: use the first found do no further checking # all: enable all DS_FOUND # # maybe: (default=all) # if nothing returned 'found', then how to handle maybe. # no network sources are allowed to return 'maybe'. # all: enable all DS_MAYBE # none: ignore any DS_MAYBE # # notfound: (default=disabled) # disabled: disable cloud-init # enabled: enable cloud-init # # ci.datasource.ec2.strict_id: (true|false|warn[,0-9]) # if ec2 datasource does not strictly match, # return not_found if true # return maybe if false or warn*. # set -u set -f UNAVAILABLE="unavailable" CR=" " ERROR="error" DI_ENABLED="enabled" DI_DISABLED="disabled" DI_DEBUG_LEVEL="${DEBUG_LEVEL:-1}" PATH_ROOT=${PATH_ROOT:-""} PATH_RUN=${PATH_RUN:-"${PATH_ROOT}/run"} PATH_SYS_CLASS_DMI_ID=${PATH_SYS_CLASS_DMI_ID:-${PATH_ROOT}/sys/class/dmi/id} PATH_SYS_HYPERVISOR=${PATH_SYS_HYPERVISOR:-${PATH_ROOT}/sys/hypervisor} PATH_SYS_CLASS_BLOCK=${PATH_SYS_CLASS_BLOCK:-${PATH_ROOT}/sys/class/block} PATH_DEV_DISK="${PATH_DEV_DISK:-${PATH_ROOT}/dev/disk}" PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}" PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}" PATH_PROC_UPTIME=${PATH_PROC_UPTIME:-${PATH_ROOT}/proc/uptime} PATH_ETC_CLOUD="${PATH_ETC_CLOUD:-${PATH_ROOT}/etc/cloud}" PATH_ETC_CI_CFG="${PATH_ETC_CI_CFG:-${PATH_ETC_CLOUD}/cloud.cfg}" PATH_ETC_CI_CFG_D="${PATH_ETC_CI_CFG_D:-${PATH_ETC_CI_CFG}.d}" PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} PATH_RUN_DI_RESULT=${PATH_RUN_DI_RESULT:-${PATH_RUN_CI}/.ds-identify.result} DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" _DI_LOGGED="" # set DI_MAIN='noop' in environment to source this file with no main called. DI_MAIN=${DI_MAIN:-main} DI_BLKID_OUTPUT="" DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" DI_DMI_CHASSIS_ASSET_TAG="" DI_DMI_PRODUCT_NAME="" DI_DMI_SYS_VENDOR="" DI_DMI_PRODUCT_SERIAL="" DI_DMI_PRODUCT_UUID="" DI_FS_LABELS="" DI_FS_UUIDS="" DI_ISO9660_DEVS="" DI_KERNEL_CMDLINE="" DI_VIRT="" DI_PID_1_PRODUCT_NAME="" DI_UNAME_KERNEL_NAME="" DI_UNAME_KERNEL_RELEASE="" DI_UNAME_KERNEL_VERSION="" DI_UNAME_MACHINE="" DI_UNAME_NODENAME="" DI_UNAME_OPERATING_SYSTEM="" DI_UNAME_CMD_OUT="" DS_FOUND=0 DS_NOT_FOUND=1 DS_MAYBE=2 DI_DSNAME="" # this has to match the builtin list in cloud-init, it is what will # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" DI_ON_MAYBE="" DI_ON_NOTFOUND="" DI_EC2_STRICT_ID_DEFAULT="true" _IS_IBM_CLOUD="" error() { set -- "ERROR:" "$@"; debug 0 "$@" stderr "$@" } warn() { set -- "WARN:" "$@" debug 0 "$@" stderr "$@" } stderr() { echo "$@" 1>&2; } debug() { local lvl="$1" shift [ "$lvl" -gt "${DI_DEBUG_LEVEL}" ] && return if [ "$_DI_LOGGED" != "$DI_LOG" ]; then # first time here, open file descriptor for append case "$DI_LOG" in stderr) :;; ?*/*) if [ ! -d "${DI_LOG%/*}" ]; then mkdir -p "${DI_LOG%/*}" || { stderr "ERROR:" "cannot write to $DI_LOG" DI_LOG="stderr" } fi esac if [ "$DI_LOG" = "stderr" ]; then exec 3>&2 else ( exec 3>>"$DI_LOG" ) && exec 3>>"$DI_LOG" || { stderr "ERROR: failed writing to $DI_LOG. logging to stderr."; exec 3>&2 DI_LOG="stderr" } fi _DI_LOGGED="$DI_LOG" fi echo "$@" 1>&3 } dmi_decode() { local sys_field="$1" dmi_field="" val="" command -v dmidecode >/dev/null 2>&1 || { warn "No dmidecode program. Cannot read $sys_field." return 1 } case "$1" in sys_vendor) dmi_field="system-manufacturer";; product_name) dmi_field="system-product-name";; product_uuid) dmi_field="system-uuid";; product_serial) dmi_field="system-serial-number";; chassis_asset_tag) dmi_field="chassis-asset-tag";; *) error "Unknown field $sys_field. Cannot call dmidecode." return 1;; esac val=$(dmidecode --quiet "--string=$dmi_field" 2>/dev/null) || return 1 _RET="$val" } get_dmi_field() { local path="${PATH_SYS_CLASS_DMI_ID}/$1" _RET="$UNAVAILABLE" if [ -d "${PATH_SYS_CLASS_DMI_ID}" ]; then if [ -f "$path" ] && [ -r "$path" ]; then read _RET < "${path}" || _RET="$ERROR" return fi # if `/sys/class/dmi/id` exists, but not the object we're looking for, # do *not* fallback to dmidecode! return fi dmi_decode "$1" || _RET="$ERROR" return } block_dev_with_label() { local p="${PATH_DEV_DISK}/by-label/$1" [ -b "$p" ] || return 1 _RET=$p return 0 } ensure_sane_path() { local t for t in /sbin /usr/sbin /bin /usr/bin; do case ":$PATH:" in *:$t:*|*:$t/:*) continue;; esac PATH="${PATH:+${PATH}:}$t" done } read_fs_info() { cached "${DI_BLKID_OUTPUT}" && return 0 # do not rely on links in /dev/disk which might not be present yet. # Note that blkid < 2.22 (centos6, trusty) do not output DEVNAME. # that means that DI_ISO9660_DEVS will not be set. if is_container; then # blkid will in a container, or at least currently in lxd # not provide useful information. DI_FS_LABELS="$UNAVAILABLE:container" DI_ISO9660_DEVS="$UNAVAILABLE:container" return fi local oifs="$IFS" line="" delim="," local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" uuids="" out=$(blkid -c /dev/null -o export) || { ret=$? error "failed running [$ret]: blkid -c /dev/null -o export" DI_FS_LABELS="$UNAVAILABLE:error" DI_ISO9660_DEVS="$UNAVAILABLE:error" return $ret } # 'set --' will collapse multiple consecutive entries in IFS for # whitespace characters (\n, tab, " ") so we cannot rely on getting # empty lines in "$@" below. # shellcheck disable=2086 { IFS="$CR"; set -- $out; IFS="$oifs"; } for line in "$@"; do case "${line}" in DEVNAME=*) [ -n "$dev" -a "$ftype" = "iso9660" ] && isodevs="${isodevs},${dev}=$label" ftype=""; dev=""; label=""; dev=${line#DEVNAME=};; LABEL=*) label="${line#LABEL=}"; labels="${labels}${line#LABEL=}${delim}";; TYPE=*) ftype=${line#TYPE=};; UUID=*) uuids="${uuids}${line#UUID=}$delim";; esac done [ -n "$dev" -a "$ftype" = "iso9660" ] && isodevs="${isodevs},${dev}=$label" DI_FS_LABELS="${labels%${delim}}" DI_FS_UUIDS="${uuids%${delim}}" DI_ISO9660_DEVS="${isodevs#,}" } cached() { [ -n "$1" ] && _RET="$1" && return || return 1 } detect_virt() { local virt="${UNAVAILABLE}" r="" out="" if [ -d /run/systemd ]; then out=$(systemd-detect-virt 2>&1) r=$? if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then virt="$out" fi elif [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" ]; then # Map FreeBSD's vm_guest names to those systemd-detect-virt that # don't match up. See # https://github.com/freebsd/freebsd/blob/master/sys/kern/subr_param.c#L144-L160 # https://www.freedesktop.org/software/systemd/man/systemd-detect-virt.html # # systemd | kern.vm_guest # ---------------------+--------------- # none | none # kvm | kvm # vmware | vmware # microsoft | hv # oracle | vbox # xen | xen # parallels | parallels # bhyve | bhyve # vm-other | generic out=$(sysctl -qn kern.vm_guest 2>/dev/null) && { case "$out" in hv) virt="microsoft" ;; vbox) virt="oracle" ;; generic) "vm-other";; *) virt="$out" esac } fi _RET="$virt" } read_virt() { cached "$DI_VIRT" && return 0 detect_virt DI_VIRT=${_RET} } is_container() { case "${DI_VIRT}" in container-other|lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;; *) return 1;; esac } read_kernel_cmdline() { cached "${DI_KERNEL_CMDLINE}" && return local cmdline="" fpath="${PATH_PROC_CMDLINE}" if is_container; then local p1path="${PATH_PROC_1_CMDLINE}" x="" cmdline="${UNAVAILABLE}:container" if [ -f "$p1path" ] && x=$(tr '\0' ' ' < "$p1path"); then cmdline=$x fi elif [ -f "$fpath" ]; then read cmdline <"$fpath" else cmdline="${UNAVAILABLE}:no-cmdline" fi DI_KERNEL_CMDLINE="$cmdline" } read_dmi_chassis_asset_tag() { cached "${DI_DMI_CHASSIS_ASSET_TAG}" && return get_dmi_field chassis_asset_tag DI_DMI_CHASSIS_ASSET_TAG="$_RET" } read_dmi_sys_vendor() { cached "${DI_DMI_SYS_VENDOR}" && return get_dmi_field sys_vendor DI_DMI_SYS_VENDOR="$_RET" } read_dmi_product_name() { cached "${DI_DMI_PRODUCT_NAME}" && return get_dmi_field product_name DI_DMI_PRODUCT_NAME="$_RET" } read_dmi_product_uuid() { cached "${DI_DMI_PRODUCT_UUID}" && return get_dmi_field product_uuid DI_DMI_PRODUCT_UUID="$_RET" } read_dmi_product_serial() { cached "${DI_DMI_PRODUCT_SERIAL}" && return get_dmi_field product_serial DI_DMI_PRODUCT_SERIAL="$_RET" } # shellcheck disable=2034 read_uname_info() { # run uname, and parse output. # uname is tricky to parse as it outputs always in a given order # independent of option order. kernel-version is known to have spaces. # 1 -s kernel-name # 2 -n nodename # 3 -r kernel-release # 4.. -v kernel-version(whitespace) # N-2 -m machine # N-1 -o operating-system cached "${DI_UNAME_CMD_OUT}" && return local out="${1:-}" ret=0 buf="" if [ -z "$out" ]; then out=$(uname -snrvmo) || { ret=$? error "failed reading uname with 'uname -snrvmo'" return $ret } fi # shellcheck disable=2086 set -- $out DI_UNAME_KERNEL_NAME="$1" DI_UNAME_NODENAME="$2" DI_UNAME_KERNEL_RELEASE="$3" shift 3 while [ $# -gt 2 ]; do buf="$buf $1" shift done DI_UNAME_KERNEL_VERSION="${buf# }" DI_UNAME_MACHINE="$1" DI_UNAME_OPERATING_SYSTEM="$2" DI_UNAME_CMD_OUT="$out" return 0 } parse_yaml_array() { # parse a yaml single line array value ([1,2,3], not key: [1,2,3]). # supported with or without leading and closing brackets # ['1'] or [1] # '1', '2' local val="$1" oifs="$IFS" ret="" tok="" # i386/14.04 (dash=0.5.7-4ubuntu1): the following outputs "[foo" # sh -c 'n="$1"; echo ${n#[}' -- "[foo" # the fix was to quote the open bracket (val=${val#"["}) (LP: #1689648) val=${val#"["} val=${val%"]"} # shellcheck disable=2086 { IFS=","; set -- $val; IFS="$oifs"; } for tok in "$@"; do trim "$tok" unquote "$_RET" ret="${ret} $_RET" done _RET="${ret# }" } read_datasource_list() { cached "$DI_DSLIST" && return local dslist="" # if DI_DSNAME is set directly, then avoid parsing config. if [ -n "${DI_DSNAME}" ]; then dslist="${DI_DSNAME}" fi # LP: #1582323. cc:{'datasource_list': ['name']} # more generically cc:[end_cc] local cb="]" ob="[" case "$DI_KERNEL_CMDLINE" in *cc:*datasource_list*) t=${DI_KERNEL_CMDLINE##*datasource_list} t=${t%%$cb*} t=${t##*$ob} parse_yaml_array "$t" dslist=${_RET} ;; esac if [ -z "$dslist" ] && check_config datasource_list; then debug 1 "$_RET_fname set datasource_list: $_RET" parse_yaml_array "$_RET" dslist=${_RET} fi if [ -z "$dslist" ]; then dslist=${DI_DSLIST_DEFAULT} debug 1 "no datasource_list found, using default: $dslist" fi DI_DSLIST=$dslist return 0 } read_pid1_product_name() { local oifs="$IFS" out="" tok="" key="" val="" product_name="${UNAVAILABLE}" cached "${DI_PID_1_PRODUCT_NAME}" && return [ -r "${PATH_PROC_1_ENVIRON}" ] || return out=$(tr '\0' '\n' <"${PATH_PROC_1_ENVIRON}") # shellcheck disable=2086 { IFS="$CR"; set -- $out; IFS="$oifs"; } for tok in "$@"; do key=${tok%%=*} [ "$key" != "$tok" ] || continue val=${tok#*=} [ "$key" = "product_name" ] && product_name="$val" && break done DI_PID_1_PRODUCT_NAME="$product_name" } dmi_chassis_asset_tag_matches() { is_container && return 1 case "${DI_DMI_CHASSIS_ASSET_TAG}" in $1) return 0;; esac return 1 } dmi_product_name_matches() { is_container && return 1 case "${DI_DMI_PRODUCT_NAME}" in $1) return 0;; esac return 1 } dmi_product_serial_matches() { is_container && return 1 case "${DI_DMI_PRODUCT_SERIAL}" in $1) return 0;; esac return 1 } dmi_sys_vendor_is() { is_container && return 1 [ "${DI_DMI_SYS_VENDOR}" = "$1" ] } has_fs_with_uuid() { case ",${DI_FS_UUIDS}," in *,$1,*) return 0;; esac return 1 } has_fs_with_label() { # has_fs_with_label(label1[ ,label2 ..]) # return 0 if a there is a filesystem that matches any of the labels. local label="" for label in "$@"; do case ",${DI_FS_LABELS}," in *,$label,*) return 0;; esac done return 1 } nocase_equal() { # nocase_equal(a, b) # return 0 if case insenstive comparision a.lower() == b.lower() # different lengths [ "${#1}" = "${#2}" ] || return 1 # case sensitive equal [ "$1" = "$2" ] && return 0 local delim="-delim-" # shellcheck disable=2018,2019 out=$(echo "$1${delim}$2" | tr A-Z a-z) [ "${out#*${delim}}" = "${out%${delim}*}" ] } check_seed_dir() { # check_seed_dir(name, [required]) # check the seed dir /var/lib/cloud/seed/ for 'required' # required defaults to 'meta-data' local name="$1" local dir="${PATH_VAR_LIB_CLOUD}/seed/$name" [ -d "$dir" ] || return 1 shift if [ $# -eq 0 ]; then set -- meta-data fi local f="" for f in "$@"; do [ -f "$dir/$f" ] || return 1 done return 0 } check_writable_seed_dir() { # ubuntu core bind-mounts /writable/system-data/var/lib/cloud # over the top of /var/lib/cloud, but the mount might not be done yet. local wdir="/writable/system-data" [ -d "${PATH_ROOT}$wdir" ] || return 1 local sdir="${PATH_ROOT}$wdir${PATH_VAR_LIB_CLOUD#${PATH_ROOT}}" local PATH_VAR_LIB_CLOUD="$sdir" check_seed_dir "$@" } probe_floppy() { cached "${STATE_FLOPPY_PROBED}" && return "${STATE_FLOPPY_PROBED}" local fpath=/dev/floppy [ -b "$fpath" ] || { STATE_FLOPPY_PROBED=1; return 1; } modprobe --use-blacklist floppy >/dev/null 2>&1 || { STATE_FLOPPY_PROBED=1; return 1; } udevadm settle "--exit-if-exists=$fpath" || { STATE_FLOPPY_PROBED=1; return 1; } [ -b "$fpath" ] STATE_FLOPPY_PROBED=$? return "${STATE_FLOPPY_PROBED}" } dscheck_CloudStack() { is_container && return ${DS_NOT_FOUND} dmi_product_name_matches "CloudStack*" && return $DS_FOUND return $DS_NOT_FOUND } dscheck_Exoscale() { dmi_product_name_matches "Exoscale*" && return $DS_FOUND return $DS_NOT_FOUND } dscheck_CloudSigma() { # http://paste.ubuntu.com/23624795/ dmi_product_name_matches "CloudSigma" && return $DS_FOUND return $DS_NOT_FOUND } check_config() { # check_config(key [,file_globs]) # somewhat hackily read through file_globs for 'key' # file_globs are expanded via path expansion and # default to /etc/cloud/cloud.cfg /etc/cloud/cloud.cfg.d/*.cfg # currently does not respect any hierarchy in searching for key. local key="$1" files="" shift if [ $# -eq 0 ]; then files="${PATH_ETC_CI_CFG} ${PATH_ETC_CI_CFG_D}/*.cfg" else files="$*" fi # shellcheck disable=2086 { set +f; set -- $files; set -f; } if [ "$1" = "$files" -a ! -f "$1" ]; then return 1 fi local fname="" line="" ret="" found=0 found_fn="" # shellcheck disable=2094 for fname in "$@"; do [ -f "$fname" ] || continue while read line; do line=${line%%#*} case "$line" in $key:\ *|$key:) ret=${line#*:}; ret=${ret# }; found=$((found+1)) found_fn="$fname";; esac done <"$fname" done if [ $found -ne 0 ]; then _RET="$ret" _RET_fname="$found_fn" return 0 fi return 1 } dscheck_MAAS() { is_container && return "${DS_NOT_FOUND}" # heuristic check for ephemeral boot environment # for maas that do not set 'ci.dsname=' in the ephemeral environment # these have iscsi root and cloud-config-url on the cmdline. local maasiqn="iqn.2004-05.com.ubuntu:maas" case "${DI_KERNEL_CMDLINE}" in *cloud-config-url=*${maasiqn}*|*${maasiqn}*cloud-config-url=*) return ${DS_FOUND} ;; esac # check config files written by maas for installed system. if check_config "MAAS"; then return "${DS_FOUND}" fi return ${DS_NOT_FOUND} } dscheck_NoCloud() { local fslabel="cidata CIDATA" d="" case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac case " ${DI_DMI_PRODUCT_SERIAL} " in *\ ds=nocloud*) return ${DS_FOUND};; esac for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done if has_fs_with_label $fslabel; then return ${DS_FOUND} fi return ${DS_NOT_FOUND} } is_ds_enabled() { local name="$1" pad=" ${DI_DSLIST} " [ "${pad#* $name }" != "${pad}" ] } check_configdrive_v2() { # look in /config-drive /seed/config_drive for a directory # openstack/YYYY-MM-DD format with a file meta_data.json local d="" local vlc_config_drive_path="${PATH_VAR_LIB_CLOUD}/seed/config_drive" for d in /config-drive $vlc_config_drive_path; do set +f; set -- "$d/openstack/"2???-??-??/meta_data.json; set -f; [ -f "$1" ] && return ${DS_FOUND} done # at least one cloud (softlayer) seeds config drive with only 'latest'. local lpath="openstack/latest/meta_data.json" if [ -e "$vlc_config_drive_path/$lpath" ]; then debug 1 "config drive seeded directory had only 'latest'" return ${DS_FOUND} fi local ibm_enabled=false is_ds_enabled "IBMCloud" && ibm_enabled=true debug 1 "is_ds_enabled(IBMCloud) = $ibm_enabled." [ "$ibm_enabled" = "true" ] && is_ibm_cloud && return ${DS_NOT_FOUND} if has_fs_with_label CONFIG-2 config-2; then return ${DS_FOUND} fi return ${DS_NOT_FOUND} } check_configdrive_v1() { # FIXME: this has to check any file system that is vfat... # for now, just return not found. return ${DS_NOT_FOUND} } dscheck_ConfigDrive() { local ret="" check_configdrive_v2 ret=$? [ $DS_FOUND -eq $ret ] && return $ret check_configdrive_v1 } dscheck_DigitalOcean() { dmi_sys_vendor_is DigitalOcean && return ${DS_FOUND} return ${DS_NOT_FOUND} } dscheck_OpenNebula() { check_seed_dir opennebula && return ${DS_FOUND} has_fs_with_label "CONTEXT" && return ${DS_FOUND} return ${DS_NOT_FOUND} } dscheck_RbxCloud() { has_fs_with_label "CLOUDMD" "cloudmd" && return ${DS_FOUND} return ${DS_NOT_FOUND} } ovf_vmware_guest_customization() { # vmware guest customization # virt provider must be vmware [ "${DI_VIRT}" = "vmware" ] || return 1 # we have to have the plugin to do vmware customization local found="" pkg="" pre="${PATH_ROOT}/usr/lib" local ppath="plugins/vmsvc/libdeployPkgPlugin.so" for pkg in vmware-tools open-vm-tools; do if [ -f "$pre/$pkg/$ppath" -o -f "${pre}64/$pkg/$ppath" ]; then found="$pkg"; break; fi done [ -n "$found" ] || return 1 # vmware customization is disabled by default # (disable_vmware_customization=true). If it is set to false, then # user has requested customization. local key="disable_vmware_customization" if check_config "$key"; then debug 2 "${_RET_fname} set $key to $_RET" case "$_RET" in 0|false|False) return 0;; *) return 1;; esac fi return 1 } ovf_vmware_transport_guestinfo() { [ "${DI_VIRT}" = "vmware" ] || return 1 command -v vmware-rpctool >/dev/null 2>&1 || return 1 local out="" ret="" out=$(vmware-rpctool "info-get guestinfo.ovfEnv" 2>&1) ret=$? if [ $ret -ne 0 ]; then debug 1 "Running on vmware but rpctool query returned $ret: $out" return 1 fi case "$out" in "=label,=label2 # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then local oifs="$IFS" # shellcheck disable=2086 { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; } for tok in "$@"; do is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return 0 done fi return 1 } dscheck_OVF() { check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" [ "${DI_VIRT}" = "none" ] && return ${DS_NOT_FOUND} # Azure provides ovf. Skip false positive by dis-allowing. is_azure_chassis && return $DS_NOT_FOUND ovf_vmware_transport_guestinfo && return "${DS_FOUND}" has_ovf_cdrom && return "${DS_FOUND}" ovf_vmware_guest_customization && return "${DS_FOUND}" return ${DS_NOT_FOUND} } is_azure_chassis() { local azure_chassis="7783-7084-3265-9085-8269-3286-77" dmi_chassis_asset_tag_matches "${azure_chassis}" } dscheck_Azure() { # http://paste.ubuntu.com/23630873/ # $ grep /sr0 /run/blkid/blkid.tab # /dev/sr0 # is_azure_chassis && return $DS_FOUND check_seed_dir azure ovf-env.xml && return ${DS_FOUND} [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} has_fs_with_label "rd_rdfe_*" && return ${DS_FOUND} return ${DS_NOT_FOUND} } dscheck_Bigstep() { # bigstep is activated by presense of seed file 'url' [ -f "${PATH_VAR_LIB_CLOUD}/data/seed/bigstep/url" ] && return ${DS_FOUND} return ${DS_NOT_FOUND} } ec2_read_strict_setting() { # the 'strict_id' setting for Ec2 controls behavior when # the platform does not identify itself directly as Ec2. # order of precedence is: # 1. builtin setting here cloud-init/ds-identify builtin # 2. ds-identify config # 3. system config (/etc/cloud/cloud.cfg.d/*Ec2*.cfg) # 4. kernel command line (undocumented) # 5. user-data or vendor-data (not available here) local default="$1" key="ci.datasource.ec2.strict_id" val="" # 4. kernel command line case " ${DI_KERNEL_CMDLINE} " in *\ $key=*\ ) val=${DI_KERNEL_CMDLINE##*$key=} val=${val%% *}; _RET=${val:-$default} return 0 esac # 3. look for the key 'strict_id' (datasource/Ec2/strict_id) # only in cloud.cfg or cloud.cfg.d/EC2.cfg (case insensitive) local cfg="${PATH_ETC_CI_CFG}" cfg_d="${PATH_ETC_CI_CFG_D}" if check_config strict_id "$cfg" "$cfg_d/*[Ee][Cc]2*.cfg"; then debug 2 "${_RET_fname} set strict_id to $_RET" return 0 fi # 2. ds-identify config (datasource.ec2.strict) local config="${PATH_DI_CONFIG}" if [ -f "$config" ]; then if _read_config "$key" < "$config"; then _RET=${_RET:-$default} return 0 fi fi # 1. Default _RET=$default return 0 } ec2_identify_platform() { local default="$1" local serial="${DI_DMI_PRODUCT_SERIAL}" case "$serial" in *.brightbox.com) _RET="Brightbox"; return 0;; esac local asset_tag="${DI_DMI_CHASSIS_ASSET_TAG}" case "$asset_tag" in *.zstack.io) _RET="ZStack"; return 0;; esac local vendor="${DI_DMI_SYS_VENDOR}" case "$vendor" in e24cloud) _RET="E24cloud"; return 0;; esac # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && [ "${uuid#ec2}" != "$uuid" ]; then _RET="AWS" return 0 fi # product uuid and product serial start with case insensitive local uuid="${DI_DMI_PRODUCT_UUID}" case "$uuid:$serial" in [Ee][Cc]2*:[Ee][Cc]2*) # both start with ec2, now check for case insenstive equal nocase_equal "$uuid" "$serial" && { _RET="AWS"; return 0; };; esac _RET="$default" return 0; } dscheck_Ec2() { check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} is_container && return ${DS_NOT_FOUND} local unknown="Unknown" platform="" if ec2_identify_platform "$unknown"; then platform="$_RET" else warn "Failed to identify ec2 platform. Using '$unknown'." platform=$unknown fi debug 1 "ec2 platform is '$platform'." if [ "$platform" != "$unknown" ]; then return $DS_FOUND fi local default="${DI_EC2_STRICT_ID_DEFAULT}" if ec2_read_strict_setting "$default"; then strict="$_RET" else debug 1 "ec2_read_strict returned non-zero: $?. using '$default'." strict="$default" fi local key="datasource/Ec2/strict_id" case "$strict" in true|false|warn|warn,[0-9]*) :;; *) warn "$key was set to invalid '$strict'. using '$default'" strict="$default";; esac _RET_excfg="datasource: {Ec2: {strict_id: \"$strict\"}}" if [ "$strict" = "true" ]; then return $DS_NOT_FOUND else return $DS_MAYBE fi } dscheck_GCE() { if dmi_product_name_matches "Google Compute Engine"; then return ${DS_FOUND} fi # product name is not guaranteed (LP: #1674861) if dmi_product_serial_matches "GoogleCloud-*"; then return ${DS_FOUND} fi return ${DS_NOT_FOUND} } dscheck_OpenStack() { # the openstack metadata http service # if there is a config drive, then do not check metadata # FIXME: if config drive not in the search list, then we should not # do this check. check_configdrive_v2 if [ $? -eq ${DS_FOUND} ]; then return ${DS_NOT_FOUND} fi local nova="OpenStack Nova" compute="OpenStack Compute" if dmi_product_name_matches "$nova"; then return ${DS_FOUND} fi if dmi_product_name_matches "$compute"; then # RDO installed nova (LP: #1675349). return ${DS_FOUND} fi if [ "${DI_PID_1_PRODUCT_NAME}" = "$nova" ]; then return ${DS_FOUND} fi if dmi_chassis_asset_tag_matches "OpenTelekomCloud"; then return ${DS_FOUND} fi # LP: #1669875 : allow identification of OpenStack by asset tag if dmi_chassis_asset_tag_matches "$nova"; then return ${DS_FOUND} fi if dmi_chassis_asset_tag_matches "$compute"; then return ${DS_FOUND} fi # LP: #1715241 : arch other than intel are not identified properly. case "$DI_UNAME_MACHINE" in i?86|x86_64) :;; *) return ${DS_MAYBE};; esac return ${DS_NOT_FOUND} } dscheck_AliYun() { check_seed_dir "AliYun" meta-data user-data && return ${DS_FOUND} if dmi_product_name_matches "Alibaba Cloud ECS"; then return $DS_FOUND fi return $DS_NOT_FOUND } dscheck_AltCloud() { # ctype: either the dmi product name, or contents of # /etc/sysconfig/cloud-info # if ctype == "vsphere" # device = device with label 'CDROM' # elif ctype == "rhev" # device = /dev/floppy # then, filesystem on that device must have # user-data.txt or deltacloud-user-data.txt local ctype="" dev="" local match_rhev="[Rr][Hh][Ee][Vv]" local match_vsphere="[Vv][Ss][Pp][Hh][Ee][Rr][Ee]" local cinfo="${PATH_ROOT}/etc/sysconfig/cloud-info" if [ -f "$cinfo" ]; then read ctype < "$cinfo" else ctype="${DI_DMI_PRODUCT_NAME}" fi case "$ctype" in ${match_rhev}) probe_floppy || return ${DS_NOT_FOUND} dev="/dev/floppy" ;; ${match_vsphere}) block_dev_with_label CDROM || return ${DS_NOT_FOUND} dev="$_RET" ;; *) return ${DS_NOT_FOUND};; esac # FIXME: need to check $dev for user-data.txt or deltacloud-user-data.txt : "$dev" return $DS_MAYBE } dscheck_SmartOS() { # joyent cloud has two virt types: kvm and container # on kvm, product name on joyent public cloud shows 'SmartDC HVM' # on the container platform, uname's version has: BrandZ virtual linux # for container, we also verify that the socketfile exists to protect # against embedded containers (lxd running on brandz) local smartdc_kver="BrandZ virtual linux" local metadata_sockfile="${PATH_ROOT}/native/.zonecontrol/metadata.sock" dmi_product_name_matches "SmartDC*" && return $DS_FOUND [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] && [ -e "${metadata_sockfile}" ] && return ${DS_FOUND} return ${DS_NOT_FOUND} } dscheck_None() { return ${DS_NOT_FOUND} } dscheck_Scaleway() { if [ "${DI_DMI_SYS_VENDOR}" = "Scaleway" ]; then return $DS_FOUND fi case " ${DI_KERNEL_CMDLINE} " in *\ scaleway\ *) return ${DS_FOUND};; esac if [ -f "${PATH_ROOT}/var/run/scaleway" ]; then return ${DS_FOUND} fi return ${DS_NOT_FOUND} } dscheck_Hetzner() { dmi_sys_vendor_is Hetzner && return ${DS_FOUND} return ${DS_NOT_FOUND} } dscheck_Oracle() { local asset_tag="OracleCloud.com" dmi_chassis_asset_tag_matches "${asset_tag}" && return ${DS_FOUND} return ${DS_NOT_FOUND} } is_ibm_provisioning() { local pcfg="${PATH_ROOT}/root/provisioningConfiguration.cfg" local logf="${PATH_ROOT}/root/swinstall.log" local is_prov=false msg="config '$pcfg' did not exist." if [ -f "$pcfg" ]; then msg="config '$pcfg' exists." is_prov=true if [ -f "$logf" ]; then if [ "$logf" -nt "$PATH_PROC_1_ENVIRON" ]; then msg="$msg log '$logf' from current boot." else is_prov=false msg="$msg log '$logf' from previous boot." fi else msg="$msg log '$logf' did not exist." fi fi debug 2 "ibm_provisioning=$is_prov: $msg" [ "$is_prov" = "true" ] } is_ibm_cloud() { cached "${_IS_IBM_CLOUD}" && return ${_IS_IBM_CLOUD} local ret=1 if [ "$DI_VIRT" = "xen" ]; then if is_ibm_provisioning; then ret=0 elif has_fs_with_label METADATA metadata; then ret=0 elif has_fs_with_uuid 9796-932E && has_fs_with_label CONFIG-2 config-2; then ret=0 fi fi _IS_IBM_CLOUD=$ret return $ret } dscheck_IBMCloud() { if is_ibm_provisioning; then debug 1 "cloud-init disabled during provisioning on IBMCloud" return ${DS_NOT_FOUND} fi is_ibm_cloud && return ${DS_FOUND} return ${DS_NOT_FOUND} } collect_info() { read_virt read_pid1_product_name read_kernel_cmdline read_uname_info read_config read_datasource_list read_dmi_sys_vendor read_dmi_chassis_asset_tag read_dmi_product_name read_dmi_product_serial read_dmi_product_uuid read_fs_info } print_info() { collect_info _print_info } _print_info() { local n="" v="" vars="" vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG" vars="$vars FS_LABELS ISO9660_DEVS KERNEL_CMDLINE VIRT" vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" vars="$vars DSNAME DSLIST" vars="$vars MODE ON_FOUND ON_MAYBE ON_NOTFOUND" for v in ${vars}; do eval n='${DI_'"$v"'}' echo "$v=$n" done echo "pid=$$ ppid=$PPID" is_container && echo "is_container=true" || echo "is_container=false" } write_result() { local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" pre="" { if [ "$DI_MODE" = "report" ]; then echo "di_report:" pre=" " fi for line in "$@"; do echo "${pre}$line"; done } > "$runcfg" ret=$? [ $ret -eq 0 ] || { error "failed to write to ${runcfg}" return $ret } return 0 } record_notfound() { # in report mode, report nothing was found. # if not report mode: only report the negative result. # reporting an empty list would mean cloud-init would not search # any datasources. if [ "$DI_MODE" = "report" ]; then found -- elif [ "$DI_MODE" = "search" ]; then local msg="# reporting not found result. notfound=${DI_ON_NOTFOUND}." local DI_MODE="report" found -- "$msg" fi } found() { # found(ds1, [ds2 ...], [-- [extra lines]]) local list="" ds="" while [ $# -ne 0 ]; do if [ "$1" = "--" ]; then shift break fi list="${list:+${list}, }$1" shift done if [ $# -eq 1 ] && [ -z "$1" ]; then # do not pass an empty line through. shift fi # if None is not already in the list, then add it last. case " $list " in *\ None,\ *|*\ None\ ) :;; *) list=${list:+${list}, None};; esac write_result "datasource_list: [ $list ]" "$@" return } trim() { # shellcheck disable=2048,2086 set -- $* _RET="$*" } unquote() { # remove quotes from quoted value local quote='"' tick="'" local val="$1" case "$val" in ${quote}*${quote}|${tick}*${tick}) val=${val#?}; val=${val%?};; esac _RET="$val" } _read_config() { # reads config from stdin, # if no parameters are set, modifies _rc scoped environment vars. # if keyname is provided, then returns found value of that key. local keyname="${1:-_unset}" local line="" hash="#" key="" val="" while read line; do line=${line%%${hash}*} key="${line%%:*}" # no : in the line. [ "$key" = "$line" ] && continue trim "$key" key=${_RET} [ "$keyname" != "_unset" ] && [ "$keyname" != "$key" ] && continue val="${line#*:}" trim "$val" unquote "${_RET}" val=${_RET} if [ "$keyname" = "$key" ]; then _RET="$val" return 0 fi case "$key" in datasource) _rc_dsname="$val";; policy) _rc_policy="$val";; esac done if [ "$keyname" = "_unset" ]; then return 1 fi _RET="" return 0 } parse_warn() { echo "WARN: invalid value '$2' for key '$1'. Using $1=$3." 1>&2 } parse_def_policy() { local _rc_mode="" _rc_report="" _rc_found="" _rc_maybe="" _rc_notfound="" local ret="" parse_policy "$@" ret=$? _def_mode=$_rc_mode _def_report=$_rc_report _def_found=$_rc_found _def_maybe=$_rc_maybe _def_notfound=$_rc_notfound return $ret } parse_policy() { # parse_policy(policy, default) # parse a policy string. sets # _rc_mode (enabled|disabled|search|report) # _rc_report true|false # _rc_found first|all # _rc_maybe all|none # _rc_notfound enabled|disabled local def="" case "$DI_UNAME_MACHINE" in # these have dmi data i?86|x86_64) def=${DI_DEFAULT_POLICY};; # aarch64 has dmi, but not currently used (LP: #1663304) aarch64) def=${DI_DEFAULT_POLICY_NO_DMI};; *) def=${DI_DEFAULT_POLICY_NO_DMI};; esac local policy="$1" local _def_mode="" _def_report="" _def_found="" _def_maybe="" local _def_notfound="" if [ $# -eq 1 ] || [ "$2" != "-" ]; then def=${2:-${def}} parse_def_policy "$def" - fi local mode="" report="" found="" maybe="" notfound="" local oifs="$IFS" tok="" val="" # shellcheck disable=2086 { IFS=","; set -- $policy; IFS="$oifs"; } for tok in "$@"; do val=${tok#*=} case "$tok" in $DI_ENABLED|$DI_DISABLED|search|report) mode=$tok;; found=all|found=first) found=$val;; maybe=all|maybe=none) maybe=$val;; notfound=$DI_ENABLED|notfound=$DI_DISABLED) notfound=$val;; found=*) parse_warn found "$val" "${_def_found}" found=${_def_found};; maybe=*) parse_warn maybe "$val" "${_def_maybe}" maybe=${_def_maybe};; notfound=*) parse_warn notfound "$val" "${_def_notfound}" notfound=${_def_notfound};; esac done report=${report:-${_def_report:-false}} _rc_report=${report} _rc_mode=${mode:-${_def_mode}} _rc_found=${found:-${_def_found}} _rc_maybe=${maybe:-${_def_maybe}} _rc_notfound=${notfound:-${_def_notfound}} } read_config() { local config="${PATH_DI_CONFIG}" local _rc_dsname="" _rc_policy="" ret="" if [ -f "$config" ]; then _read_config < "$config" ret=$? elif [ -e "$config" ]; then error "$config exists but is not a file!" ret=1 fi local tok="" key="" val="" for tok in ${DI_KERNEL_CMDLINE}; do key=${tok%%=*} val=${tok#*=} case "$key" in ci.ds) _rc_dsname="$val";; ci.datasource) _rc_dsname="$val";; ci.di.policy) _rc_policy="$val";; esac done local _rc_mode _rc_report _rc_found _rc_maybe _rc_notfound parse_policy "${_rc_policy}" debug 1 "policy loaded: mode=${_rc_mode} report=${_rc_report}" \ "found=${_rc_found} maybe=${_rc_maybe} notfound=${_rc_notfound}" DI_MODE=${_rc_mode} DI_ON_FOUND=${_rc_found} DI_ON_MAYBE=${_rc_maybe} DI_ON_NOTFOUND=${_rc_notfound} DI_DSNAME="${_rc_dsname}" return $ret } manual_clean_and_existing() { [ -f "${PATH_VAR_LIB_CLOUD}/instance/manual-clean" ] } read_uptime() { local up _ _RET="${UNAVAILABLE}" [ -f "$PATH_PROC_UPTIME" ] && read up _ < "$PATH_PROC_UPTIME" && _RET="$up" return } _main() { local dscheck_fn="" ret_dis=1 ret_en=0 read_uptime debug 1 "[up ${_RET}s]" "ds-identify $*" collect_info if [ "$DI_LOG" = "stderr" ]; then _print_info 1>&2 else _print_info >> "$DI_LOG" fi case "$DI_MODE" in $DI_DISABLED) debug 1 "mode=$DI_DISABLED. returning $ret_dis" return $ret_dis ;; $DI_ENABLED) debug 1 "mode=$DI_ENABLED. returning $ret_en" return $ret_en;; search|report) :;; esac if [ -n "${DI_DSNAME}" ]; then debug 1 "datasource '$DI_DSNAME' specified." found "$DI_DSNAME" return fi if manual_clean_and_existing; then debug 1 "manual_cache_clean enabled. Not writing datasource_list." write_result "# manual_cache_clean." return fi # shellcheck disable=2086 set -- $DI_DSLIST # if there is only a single entry in $DI_DSLIST if [ $# -eq 1 ] || [ $# -eq 2 -a "$2" = "None" ] ; then debug 1 "single entry in datasource_list ($DI_DSLIST) use that." found "$@" return fi local found="" ret="" ds="" maybe="" _RET_excfg="" local exfound_cfg="" exmaybe_cfg="" for ds in ${DI_DSLIST}; do dscheck_fn="dscheck_${ds}" debug 2 "Checking for datasource '$ds' via '$dscheck_fn'" if ! type "$dscheck_fn" >/dev/null 2>&1; then warn "No check method '$dscheck_fn' for datasource '$ds'" continue fi _RET_excfg="" $dscheck_fn ret="$?" case "$ret" in $DS_FOUND) debug 1 "check for '$ds' returned found"; exfound_cfg="${exfound_cfg:+${exfound_cfg}${CR}}${_RET_excfg}" found="${found} $ds";; $DS_MAYBE) debug 1 "check for '$ds' returned maybe"; exmaybe_cfg="${exmaybe_cfg:+${exmaybe_cfg}${CR}}${_RET_excfg}" maybe="${maybe} $ds";; *) debug 2 "check for '$ds' returned not-found[$ret]";; esac done debug 2 "found=${found# } maybe=${maybe# }" # shellcheck disable=2086 set -- $found if [ $# -ne 0 ]; then if [ $# -eq 1 ]; then debug 1 "Found single datasource: $1" else # found=all debug 1 "Found $# datasources found=${DI_ON_FOUND}: $*" if [ "${DI_ON_FOUND}" = "first" ]; then set -- "$1" fi fi found "$@" -- "${exfound_cfg}" return fi # shellcheck disable=2086 set -- $maybe if [ $# -ne 0 -a "${DI_ON_MAYBE}" != "none" ]; then debug 1 "$# datasources returned maybe: $*" found "$@" -- "${exmaybe_cfg}" return fi # record the empty result. record_notfound local basemsg="No ds found [mode=$DI_MODE, notfound=$DI_ON_NOTFOUND]." local msg="" ret=3 case "$DI_MODE:$DI_ON_NOTFOUND" in report:$DI_DISABLED) msg="$basemsg Would disable cloud-init [$ret_dis]" ret=$ret_en;; report:$DI_ENABLED) msg="$basemsg Would enable cloud-init [$ret_en]" ret=$ret_en;; search:$DI_DISABLED) msg="$basemsg Disabled cloud-init [$ret_dis]" ret=$ret_dis;; search:$DI_ENABLED) msg="$basemsg Enabled cloud-init [$ret_en]" ret=$ret_en;; *) error "Unexpected result";; esac debug 1 "$msg" return "$ret" } main() { local ret="" ensure_sane_path [ -d "$PATH_RUN_CI" ] || mkdir -p "$PATH_RUN_CI" if [ "${1:+$1}" != "--force" ] && [ -f "$PATH_RUN_CI_CFG" ] && [ -f "$PATH_RUN_DI_RESULT" ]; then if read ret < "$PATH_RUN_DI_RESULT"; then if [ "$ret" = "0" ] || [ "$ret" = "1" ]; then debug 2 "used cached result $ret. pass --force to re-run." return "$ret"; fi debug 1 "previous run returned unexpected '$ret'. Re-running." else error "failed to read result from $PATH_RUN_DI_RESULT!" fi fi _main "$@" ret=$? echo "$ret" > "$PATH_RUN_DI_RESULT" read_uptime debug 1 "[up ${_RET}s]" "returning $ret" return "$ret" } noop() { : } case "${DI_MAIN}" in main|print_info|noop) "${DI_MAIN}" "$@";; *) error "unexpected value for DI_MAIN"; exit 1;; esac # vi: syntax=sh ts=4 expandtab var-lib-cloud.txt000064400000003540147221477530007763 0ustar00/var/lib/cloud has the following structure: - scripts/ per-instance/ per-boot/ per-once/ files in these directories will be run by 'run-parts' once per instance, once per boot, and once per *ever*. - seed/ / sys-user-data user-data meta-data The 'seed/' directory allows you to seed a specific datasource For example, to seed the 'nocloud' datasource you would need to populate seed/nocloud/user-data seed/nocloud/meta-data - instance -> instances/i-abcde This is a symlink to the current instance/ directory created/updated on boot - instances/ i-abcdefgh/ scripts/ # all scripts in scripts are per-instance sem/ config-puppet config-ssh set-hostname cloud-config.txt user-data.txt user-data.txt.i obj.pkl handlers/ data/ # just a per-instance data location to be used boot-finished # this file indicates when "boot" is finished # it is created by the 'final_message' cloud-config datasource # a file containing the class and string of datasource - sem/ scripts.once These are the cloud-specific semaphores. The only thing that would go here are files to mark that a "per-once" script has run. - handlers/ "persistent" handlers (not per-instance). Same as handlers from user-data, just will be cross-instance id - data/ this is a persistent data location. cloud-init won't really use it, but something else (a handler or script could) previous-datasource previous-instance-id previous-hostname to clear out the current instance's data as if to force a "new run" on reboot do: ( cd /var/lib/cloud/instance && sudo rm -Rf * ) status.txt000064400000003370147221477530006647 0ustar00cloud-init will keep a 'status' file up to date for other applications wishing to use it to determine cloud-init status. It will manage 2 files: status.json result.json The files will be written to /var/lib/cloud/data/ . A symlink will be created in /run/cloud-init. The link from /run is to ensure that if the file exists, it is not stale for this boot. status.json's format is: { 'v1': { 'init': { errors: [] # list of strings for each error that occurred start: float # time.time() that this stage started or None end: float # time.time() that this stage finished or None }, 'init-local': { 'errors': [], 'start': , 'end' # (same as 'init' above) }, 'modules-config': { 'errors': [], 'start': , 'end' # (same as 'init' above) }, 'modules-final': { 'errors': [], 'start': , 'end' # (same as 'init' above) }, 'datasource': string describing datasource found or None 'stage': string representing stage that is currently running ('init', 'init-local', 'modules-final', 'modules-config', None) if None, then no stage is running. Reader must read the start/end of each of the above stages to determine the state. } result.json's format is: { 'v1': { 'datasource': string describing the datasource found 'errors': [] # list of errors reported } } Thus, to determine if cloud-init is finished: fin = "/run/cloud-init/result.json" if os.path.exists(fin): ret = json.load(open(fin, "r")) if len(ret['v1']['errors']): print "Finished with errors:" + "\n".join(ret['v1']['errors']) else: print "Finished no errors" else: print "Not Finished" examples/cloud-config-ssh-keys.txt000064400000006331147221477530013257 0ustar00#cloud-config # add each entry to ~/.ssh/authorized_keys for the configured user or the # first user defined in the user definition directive. ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies # Send pre-generated ssh private keys to the server # If these are present, they will be written to /etc/ssh and # new random keys will not be generated # in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported ssh_keys: rsa_private: | -----BEGIN RSA PRIVATE KEY----- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qcon2LZS/x 1cydPZ4pQpfjEha6WxZ6o8ci/Ea/w0n+0HGPwaxlEG2Z9inNtj3pgFrYcRztfECb 1j6HCibZbAzYtwIBIwJgO8h72WjcmvcpZ8OvHSvTwAguO2TkR6mPgHsgSaKy6GJo PUJnaZRWuba/HX0KGyhz19nPzLpzG5f0fYahlMJAyc13FV7K6kMBPXTRR6FxgHEg L0MPC7cdqAwOVNcPY6A7AjEA1bNaIjOzFN2sfZX0j7OMhQuc4zP7r80zaGc5oy6W p58hRAncFKEvnEq2CeL3vtuZAjEAwNBHpbNsBYTRPCHM7rZuG/iBtwp8Rxhc9I5w ixvzMgi+HpGLWzUIBS+P/XhekIjPAjA285rVmEP+DR255Ls65QbgYhJmTzIXQ2T9 luLvcmFBC6l35Uc4gTgg4ALsmXLn71MCMGMpSWspEvuGInayTCL+vEjmNBT+FAdO W7D4zCpI43jRS9U06JVOeSc9CDk2lwiA3wIwCTB/6uc8Cq85D9YqpM10FuHjKpnP REPPOyrAspdeOAV+6VKRavstea7+2DZmSUgE -----END RSA PRIVATE KEY----- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost dsa_private: | -----BEGIN DSA PRIVATE KEY----- MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv 99iziAH0KBMVbxy03Trz -----END DSA PRIVATE KEY----- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost examples/upstart-cloud-config.txt000064400000000266147221477530013214 0ustar00#upstart-job description "My test job" start on cloud-config console output task script echo "====BEGIN=======" echo "HELLO WORLD: $UPSTART_JOB" echo "=====END========" end script examples/include.txt000064400000000355147221477530010565 0ustar00#include # entries are one url per line. comment lines beginning with '#' are allowed # urls are passed to urllib.urlopen, so the format must be supported there http://www.ubuntu.com/robots.txt http://www.w3schools.com/html/lastpage.htm examples/cloud-config-apt.txt000064400000031024147221477530012272 0ustar00# apt_pipelining (configure Acquire::http::Pipeline-Depth) # Default: disables HTTP pipelining. Certain web servers, such # as S3 do not pipeline properly (LP: #948461). # Valid options: # False/default: Disables pipelining for APT # None/Unchanged: Use OS default # Number: Set pipelining to some number (not recommended) apt_pipelining: False ## apt config via system_info: # under the 'system_info', you can customize cloud-init's interaction # with apt. # system_info: # apt_get_command: [command, argument, argument] # apt_get_upgrade_subcommand: dist-upgrade # # apt_get_command: # To specify a different 'apt-get' command, set 'apt_get_command'. # This must be a list, and the subcommand (update, upgrade) is appended to it. # default is: # ['apt-get', '--option=Dpkg::Options::=--force-confold', # '--option=Dpkg::options::=--force-unsafe-io', '--assume-yes', '--quiet'] # # apt_get_upgrade_subcommand: "dist-upgrade" # Specify a different subcommand for 'upgrade. The default is 'dist-upgrade'. # This is the subcommand that is invoked for package_upgrade. # # apt_get_wrapper: # command: eatmydata # enabled: [True, False, "auto"] # # Install additional packages on first boot # # Default: none # # if packages are specified, this apt_update will be set to true packages: ['pastebinit'] apt: # The apt config consists of two major "areas". # # On one hand there is the global configuration for the apt feature. # # On one hand (down in this file) there is the source dictionary which allows # to define various entries to be considered by apt. ############################################################################## # Section 1: global apt configuration # # The following examples number the top keys to ease identification in # discussions. # 1.1 preserve_sources_list # # Preserves the existing /etc/apt/sources.list # Default: false - do overwrite sources_list. If set to true then any # "mirrors" configuration will have no effect. # Set to true to avoid affecting sources.list. In that case only # "extra" source specifications will be written into # /etc/apt/sources.list.d/* preserve_sources_list: true # 1.2 disable_suites # # This is an empty list by default, so nothing is disabled. # # If given, those suites are removed from sources.list after all other # modifications have been made. # Suites are even disabled if no other modification was made, # but not if is preserve_sources_list is active. # There is a special alias "$RELEASE" as in the sources that will be replace # by the matching release. # # To ease configuration and improve readability the following common ubuntu # suites will be automatically mapped to their full definition. # updates => $RELEASE-updates # backports => $RELEASE-backports # security => $RELEASE-security # proposed => $RELEASE-proposed # release => $RELEASE # # There is no harm in specifying a suite to be disabled that is not found in # the source.list file (just a no-op then) # # Note: Lines don't get deleted, but disabled by being converted to a comment. # The following example disables all usual defaults except $RELEASE-security. # On top it disables a custom suite called "mysuite" disable_suites: [$RELEASE-updates, backports, $RELEASE, mysuite] # 1.3 primary/security archives # # Default: none - instead it is auto select based on cloud metadata # so if neither "uri" nor "search", nor "search_dns" is set (the default) # then use the mirror provided by the DataSource found. # In EC2, that means using .ec2.archive.ubuntu.com # # define a custom (e.g. localized) mirror that will be used in sources.list # and any custom sources entries for deb / deb-src lines. # # One can set primary and security mirror to different uri's # the child elements to the keys primary and secondary are equivalent primary: # arches is list of architectures the following config applies to # the special keyword "default" applies to any architecture not explicitly # listed. - arches: [amd64, i386, default] # uri is just defining the target as-is uri: http://us.archive.ubuntu.com/ubuntu # # via search one can define lists that are tried one by one. # The first with a working DNS resolution (or if it is an IP) will be # picked. That way one can keep one configuration for multiple # subenvironments that select the working one. search: - http://cool.but-sometimes-unreachable.com/ubuntu - http://us.archive.ubuntu.com/ubuntu # if no mirror is provided by uri or search but 'search_dns' is # true, then search for dns names '-mirror' in each of # - fqdn of this host per cloud metadata # - localdomain # - no domain (which would search domains listed in /etc/resolv.conf) # If there is a dns entry for -mirror, then it is assumed that # there is a distro mirror at http://-mirror./ # # That gives the cloud provider the opportunity to set mirrors of a distro # up and expose them only by creating dns entries. # # if none of that is found, then the default distro mirror is used search_dns: true # # If multiple of a category are given # 1. uri # 2. search # 3. search_dns # the first defining a valid mirror wins (in the order as defined here, # not the order as listed in the config). # - arches: [s390x, arm64] # as above, allowing to have one config for different per arch mirrors # security is optional, if not defined it is set to the same value as primary security: uri: http://security.ubuntu.com/ubuntu # If search_dns is set for security the searched pattern is: # -security-mirror # if no mirrors are specified at all, or all lookups fail it will try # to get them from the cloud datasource and if those neither provide one fall # back to: # primary: http://archive.ubuntu.com/ubuntu # security: http://security.ubuntu.com/ubuntu # 1.4 sources_list # # Provide a custom template for rendering sources.list # without one provided cloud-init uses builtin templates for # ubuntu and debian. # Within these sources.list templates you can use the following replacement # variables (all have sane Ubuntu defaults, but mirrors can be overwritten # as needed (see above)): # => $RELEASE, $MIRROR, $PRIMARY, $SECURITY sources_list: | # written by cloud-init custom template deb $MIRROR $RELEASE main restricted deb-src $MIRROR $RELEASE main restricted deb $PRIMARY $RELEASE universe restricted deb $SECURITY $RELEASE-security multiverse # 1.5 conf # # Any apt config string that will be made available to apt # see the APT.CONF(5) man page for details what can be specified conf: | # APT config APT { Get { Assume-Yes "true"; Fix-Broken "true"; }; }; # 1.6 (http_|ftp_|https_)proxy # # Proxies are the most common apt.conf option, so that for simplified use # there is a shortcut for those. Those get automatically translated into the # correct Acquire::*::Proxy statements. # # note: proxy actually being a short synonym to http_proxy proxy: http://[[user][:pass]@]host[:port]/ http_proxy: http://[[user][:pass]@]host[:port]/ ftp_proxy: ftp://[[user][:pass]@]host[:port]/ https_proxy: https://[[user][:pass]@]host[:port]/ # 1.7 add_apt_repo_match # # 'source' entries in apt-sources that match this python regex # expression will be passed to add-apt-repository # The following example is also the builtin default if nothing is specified add_apt_repo_match: '^[\w-]+:\w' ############################################################################## # Section 2: source list entries # # This is a dictionary (unlike most block/net which are lists) # # The key of each source entry is the filename and will be prepended by # /etc/apt/sources.list.d/ if it doesn't start with a '/'. # If it doesn't end with .list it will be appended so that apt picks up it's # configuration. # # Whenever there is no content to be written into such a file, the key is # not used as filename - yet it can still be used as index for merging # configuration. # # The values inside the entries consost of the following optional entries: # 'source': a sources.list entry (some variable replacements apply) # 'keyid': providing a key to import via shortid or fingerprint # 'key': providing a raw PGP key # 'keyserver': specify an alternate keyserver to pull keys from that # were specified by keyid # This allows merging between multiple input files than a list like: # cloud-config1 # sources: # s1: {'key': 'key1', 'source': 'source1'} # cloud-config2 # sources: # s2: {'key': 'key2'} # s1: {'keyserver': 'foo'} # This would be merged to # sources: # s1: # keyserver: foo # key: key1 # source: source1 # s2: # key: key2 # # The following examples number the subfeatures per sources entry to ease # identification in discussions. sources: curtin-dev-ppa.list: # 2.1 source # # Creates a file in /etc/apt/sources.list.d/ for the sources list entry # based on the key: "/etc/apt/sources.list.d/curtin-dev-ppa.list" source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main" # 2.2 keyid # # Importing a gpg key for a given key id. Used keyserver defaults to # keyserver.ubuntu.com keyid: F430BBA5 # GPG key ID published on a key server ignored1: # 2.3 PPA shortcut # # Setup correct apt sources.list line and Auto-Import the signing key # from LP # # See https://help.launchpad.net/Packaging/PPA for more information # this requires 'add-apt-repository'. This will create a file in # /etc/apt/sources.list.d automatically, therefore the key here is # ignored as filename in those cases. source: "ppa:curtin-dev/test-archive" # Quote the string my-repo2.list: # 2.4 replacement variables # # sources can use $MIRROR, $PRIMARY, $SECURITY and $RELEASE replacement # variables. # They will be replaced with the default or specified mirrors and the # running release. # The entry below would be possibly turned into: # source: deb http://archive.ubuntu.com/ubuntu xenial multiverse source: deb $MIRROR $RELEASE multiverse my-repo3.list: # this would have the same end effect as 'ppa:curtin-dev/test-archive' source: "deb http://ppa.launchpad.net/curtin-dev/test-archive/ubuntu xenial main" keyid: F430BBA5 # GPG key ID published on the key server filename: curtin-dev-ppa.list ignored2: # 2.5 key only # # this would only import the key without adding a ppa or other source spec # since this doesn't generate a source.list file the filename key is ignored keyid: F430BBA5 # GPG key ID published on a key server ignored3: # 2.6 key id alternatives # # Keyid's can also be specified via their long fingerprints keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77 ignored4: # 2.7 alternative keyservers # # One can also specify alternative keyservers to fetch keys from. keyid: B59D 5F15 97A5 04B7 E230 6DCA 0620 BBCF 0368 3F77 keyserver: pgp.mit.edu my-repo4.list: # 2.8 raw key # # The apt signing key can also be specified by providing a pgp public key # block. Providing the PGP key this way is the most robust method for # specifying a key, as it removes dependency on a remote key server. # # As with keyid's this can be specified with or without some actual source # content. key: | # The value needs to start with -----BEGIN PGP PUBLIC KEY BLOCK----- -----BEGIN PGP PUBLIC KEY BLOCK----- Version: SKS 1.0.10 mI0ESpA3UQEEALdZKVIMq0j6qWAXAyxSlF63SvPVIgxHPb9Nk0DZUixn+akqytxG4zKCONz6 qLjoBBfHnynyVLfT4ihg9an1PqxRnTO+JKQxl8NgKGz6Pon569GtAOdWNKw15XKinJTDLjnj 9y96ljJqRcpV9t/WsIcdJPcKFR5voHTEoABE2aEXABEBAAG0GUxhdW5jaHBhZCBQUEEgZm9y IEFsZXN0aWOItgQTAQIAIAUCSpA3UQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAAAoJEA7H 5Qi+CcVxWZ8D/1MyYvfj3FJPZUm2Yo1zZsQ657vHI9+pPouqflWOayRR9jbiyUFIn0VdQBrP t0FwvnOFArUovUWoKAEdqR8hPy3M3APUZjl5K4cMZR/xaMQeQRZ5CHpS4DBKURKAHC0ltS5o uBJKQOZm5iltJp15cgyIkBkGe8Mx18VFyVglAZey =Y2oI -----END PGP PUBLIC KEY BLOCK----- examples/cloud-config-archive.txt000064400000000364147221477530013132 0ustar00#cloud-config-archive - type: foo/wark filename: bar content: | This is my payload hello - this is also payload - | multi line payload here - type: text/upstart-job filename: my-upstart.conf content: | whats this, yo? examples/part-handler.txt000064400000001502147221477530011516 0ustar00#part-handler # vi: syntax=python ts=4 def list_types(): # return a list of mime-types that are handled by this module return(["text/plain", "text/go-cubs-go"]) def handle_part(data,ctype,filename,payload): # data: the cloudinit object # ctype: '__begin__', '__end__', or the specific mime-type of the part # filename: the filename for the part, or dynamically generated part if # no filename is given attribute is present # payload: the content of the part (empty for begin or end) if ctype == "__begin__": print "my handler is beginning" return if ctype == "__end__": print "my handler is ending" return print "==== received ctype=%s filename=%s ====" % (ctype,filename) print payload print "==== end ctype=%s filename=%s" % (ctype, filename) examples/cloud-config-mcollective.txt000064400000004705147221477530014022 0ustar00#cloud-config # # This is an example file to automatically setup and run mcollective # when the instance boots for the first time. # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. mcollective: # Every key present in the conf object will be added to server.cfg: # key: value # # For example the configuration below will have the following key # added to server.cfg: # plugin.stomp.host: dbhost conf: plugin.stomp.host: dbhost # This will add ssl certs to mcollective # WARNING WARNING WARNING # The ec2 metadata service is a network service, and thus is readable # by non-root users on the system (ie: 'ec2metadata --user-data') # If you want security for this, please use include-once + SSL urls public-cert: | -----BEGIN CERTIFICATE----- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d -----END CERTIFICATE----- private-cert: | -----BEGIN CERTIFICATE----- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d -----END CERTIFICATE----- examples/cloud-config-phone-home.txt000064400000000554147221477530013551 0ustar00#cloud-config # phone_home: if this dictionary is present, then the phone_home # cloud-config module will post specified data back to the given # url # default: none # phone_home: # url: http://my.foo.bar/$INSTANCE/ # post: all # tries: 10 # phone_home: url: http://my.example.com/$INSTANCE_ID/ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ] examples/cloud-config-reporting.txt000064400000000625147221477530013522 0ustar00#cloud-config ## ## The following sets up 2 reporting end points. ## A 'webhook' and a 'log' type. ## It also disables the built in default 'log' reporting: smtest: type: webhook endpoint: "http://myhost:8000/" consumer_key: "ckey_foo" consumer_secret: "csecret_foo" token_key: "tkey_foo" token_secret: "tkey_foo" smlogger: type: log level: WARN log: null examples/include-once.txt000064400000000541147221477530011504 0ustar00#include-once # entries are one url per line. comment lines beginning with '#' are allowed # urls are passed to urllib.urlopen, so the format must be supported there # This entries will just be processed ONE TIME by cloud-init, any further # iterations won't process this file http://www.ubuntu.com/robots.txt http://www.w3schools.com/html/lastpage.htm examples/cloud-config-resolv-conf.txt000064400000001017147221477530013742 0ustar00#cloud-config # # This is an example file to automatically configure resolv.conf when the # instance boots for the first time. # # Ensure that your yaml is valid and pass this as user-data when starting # the instance. Also be sure that your cloud.cfg file includes this # configuration module in the appropriate section. # manage_resolv_conf: true resolv_conf: nameservers: ['8.8.4.4', '8.8.8.8'] searchdomains: - foo.example.com - bar.example.com domain: example.com options: rotate: true timeout: 1 examples/cloud-config-boot-cmds.txt000064400000001172147221477530013376 0ustar00#cloud-config # boot commands # default: none # this is very similar to runcmd, but commands run very early # in the boot process, only slightly after a 'boothook' would run. # bootcmd should really only be used for things that could not be # done later in the boot process. bootcmd is very much like # boothook, but possibly with more friendly. # - bootcmd will run on every boot # - the INSTANCE_ID variable will be set to the current instance id. # - you can use 'cloud-init-per' command to help only run once bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com >> /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] examples/upstart-rclocal.txt000064400000000331147221477530012253 0ustar00#upstart-job description "a test upstart job" start on stopped rc RUNLEVEL=[2345] console output task script echo "====BEGIN=======" echo "HELLO RC.LOCAL LIKE WORLD: $UPSTART_JOB" echo "=====END========" end script examples/cloud-config-update-packages.txt000064400000000217147221477530014544 0ustar00#cloud-config # Upgrade the instance on first boot # (ie run apt-get upgrade) # # Default: false # Aliases: apt_upgrade package_upgrade: true examples/cloud-config-yum-repo.txt000064400000001253147221477530013264 0ustar00#cloud-config # vim: syntax=yaml # # Add yum repository configuration to the system # # The following example adds the file /etc/yum.repos.d/epel_testing.repo # which can then subsequently be used by yum for later operations. yum_repos: # The name of the repository epel-testing: # Any repository configuration options # See: man yum.conf # # This one is required! baseurl: http://download.fedoraproject.org/pub/epel/testing/5/$basearch enabled: false failovermethod: priority gpgcheck: true gpgkey: file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL name: Extra Packages for Enterprise Linux 5 - Testing examples/cloud-config-ca-certs.txt000064400000002110147221477530013201 0ustar00#cloud-config # # This is an example file to configure an instance's trusted CA certificates # system-wide for SSL/TLS trust establishment when the instance boots for the # first time. # # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. ca-certs: # If present and set to True, the 'remove-defaults' parameter will remove # all the default trusted CA certificates that are normally shipped with # Ubuntu. # This is mainly for paranoid admins - most users will not need this # functionality. remove-defaults: true # If present, the 'trusted' parameter should contain a certificate (or list # of certificates) to add to the system as trusted CA certificates. # Pay close attention to the YAML multiline list syntax. The example shown # here is for a list of multiline certificates. trusted: - | -----BEGIN CERTIFICATE----- YOUR-ORGS-TRUSTED-CA-CERT-HERE -----END CERTIFICATE----- - | -----BEGIN CERTIFICATE----- YOUR-ORGS-TRUSTED-CA-CERT-HERE -----END CERTIFICATE----- examples/plain-ignored.txt000064400000000104147221477530011662 0ustar00#ignored Nothing will be done with this part by the UserDataHandler examples/cloud-config-mount-points.txt000064400000002727147221477530014172 0ustar00#cloud-config # set up mount points # 'mounts' contains a list of lists # the inner list are entries for an /etc/fstab line # ie : [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ] # # default: # mounts: # - [ ephemeral0, /mnt ] # - [ swap, none, swap, sw, 0, 0 ] # # in order to remove a previously listed mount (ie, one from defaults) # list only the fs_spec. For example, to override the default, of # mounting swap: # - [ swap ] # or # - [ swap, null ] # # - if a device does not exist at the time, an entry will still be # written to /etc/fstab. # - '/dev' can be ommitted for device names that begin with: xvd, sd, hd, vd # - if an entry does not have all 6 fields, they will be filled in # with values from 'mount_default_fields' below. # # Note, that you should set 'nofail' (see man fstab) for volumes that may not # be attached at instance boot (or reboot). # mounts: - [ ephemeral0, /mnt, auto, "defaults,noexec" ] - [ sdc, /opt/data ] - [ xvdh, /opt/data, "auto", "defaults,nofail", "0", "0" ] - [ dd, /dev/zero ] # mount_default_fields # These values are used to fill in any entries in 'mounts' that are not # complete. This must be an array, and must have 7 fields. mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ] # swap can also be set up by the 'mounts' module # default is to not create any swap files, because 'size' is set to 0 swap: filename: /swap.img size: "auto" # or size in bytes maxsize: size in bytes examples/cloud-config-launch-index.txt000064400000001122147221477530014061 0ustar00#cloud-config # vim: syntax=yaml # # This is the configuration syntax that can be provided to have # a given set of cloud config data show up on a certain launch # index (and not other launches) by provided a key here which # will act as a filter on the instances userdata. When # this key is left out (or non-integer) then the content # of this file will always be used for all launch-indexes # (ie the previous behavior). launch-index: 5 # Upgrade the instance on first boot # (ie run apt-get upgrade) # # Default: false # apt_upgrade: true # Other yaml keys below... # ....... # ....... examples/cloud-config-user-groups.txt000064400000013053147221477530014003 0ustar00#cloud-config # Add groups to the system # The following example adds the ubuntu group with members 'root' and 'sys' # and the empty group cloud-users. groups: - ubuntu: [root,sys] - cloud-users # Add users to the system. Users are added after groups are added. users: - default - name: foobar gecos: Foo B. Bar primary_group: foobar groups: users selinux_user: staff_u expiredate: 2012-09-01 ssh_import_id: foobar lock_passwd: false passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ - name: barfoo gecos: Bar B. Foo sudo: ALL=(ALL) NOPASSWD:ALL groups: users, admin ssh_import_id: None lock_passwd: true ssh_authorized_keys: - - - name: cloudy gecos: Magic Cloud App Daemon User inactive: true system: true - name: fizzbuzz sudo: False ssh_authorized_keys: - - - snapuser: joe@joeuser.io - name: nosshlogins ssh_redirect_user: true # Valid Values: # name: The user's login name # gecos: The user name's real name, i.e. "Bob B. Smith" # homedir: Optional. Set to the local path you want to use. Defaults to # /home/ # primary_group: define the primary group. Defaults to a new group created # named after the user. # groups: Optional. Additional groups to add the user to. Defaults to none # selinux_user: Optional. The SELinux user for the user's login, such as # "staff_u". When this is omitted the system will select the default # SELinux user. # lock_passwd: Defaults to true. Lock the password to disable password login # inactive: Create the user as inactive # passwd: The hash -- not the password itself -- of the password you want # to use for this user. You can generate a safe hash via: # mkpasswd --method=SHA-512 --rounds=4096 # (the above command would create from stdin an SHA-512 password hash # with 4096 salt rounds) # # Please note: while the use of a hashed password is better than # plain text, the use of this feature is not ideal. Also, # using a high number of salting rounds will help, but it should # not be relied upon. # # To highlight this risk, running John the Ripper against the # example hash above, with a readily available wordlist, revealed # the true password in 12 seconds on a i7-2620QM. # # In other words, this feature is a potential security risk and is # provided for your convenience only. If you do not fully trust the # medium over which your cloud-config will be transmitted, then you # should use SSH authentication only. # # You have thus been warned. # no_create_home: When set to true, do not create home directory. # no_user_group: When set to true, do not create a group named after the user. # no_log_init: When set to true, do not initialize lastlog and faillog database. # ssh_import_id: Optional. Import SSH ids # ssh_authorized_keys: Optional. [list] Add keys to user's authorized keys file # ssh_redirect_user: Optional. [bool] Set true to block ssh logins for cloud # ssh public keys and emit a message redirecting logins to # use instead. This option only disables cloud # provided public-keys. An error will be raised if ssh_authorized_keys # or ssh_import_id is provided for the same user. # # ssh_authorized_keys. # sudo: Defaults to none. Accepts a sudo rule string, a list of sudo rule # strings or False to explicitly deny sudo usage. Examples: # # Allow a user unrestricted sudo access. # sudo: ALL=(ALL) NOPASSWD:ALL # # Adding multiple sudo rule strings. # sudo: # - ALL=(ALL) NOPASSWD:/bin/mysql # - ALL=(ALL) ALL # # Prevent sudo access for a user. # sudo: False # # Note: Please double check your syntax and make sure it is valid. # cloud-init does not parse/check the syntax of the sudo # directive. # system: Create the user as a system user. This means no home directory. # snapuser: Create a Snappy (Ubuntu-Core) user via the snap create-user # command available on Ubuntu systems. If the user has an account # on the Ubuntu SSO, specifying the email will allow snap to # request a username and any public ssh keys and will import # these into the system with username specifed by SSO account. # If 'username' is not set in SSO, then username will be the # shortname before the email domain. # # Default user creation: # # Unless you define users, you will get a 'ubuntu' user on ubuntu systems with the # legacy permission (no password sudo, locked user, etc). If however, you want # to have the 'ubuntu' user in addition to other users, you need to instruct # cloud-init that you also want the default user. To do this use the following # syntax: # users: # - default # - bob # - .... # foobar: ... # # users[0] (the first user in users) overrides the user directive. # # The 'default' user above references the distro's config: # system_info: # default_user: # name: Ubuntu # plain_text_passwd: 'ubuntu' # home: /home/ubuntu # shell: /bin/bash # lock_passwd: True # gecos: Ubuntu # groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev] examples/cloud-config-final-message.txt000064400000000374147221477530014225 0ustar00#cloud-config # final_message # default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds # this message is written by cloud-final when the system is finished # its first boot final_message: "The system is finally up, after $UPTIME seconds" examples/user-script.txt000064400000000175147221477530011422 0ustar00#!/bin/sh cat < rsyslog/config_filename ## rsyslog_dir -> rsyslog/config_dir ## rsyslog -> rsyslog/configs # rsyslog: # - "*.* @@192.158.1.1" # - content: "*.* @@192.0.2.1:10514" # filename: 01-example.conf # - content: | # *.* @@syslogd.example.com # rsyslog_filename: 20-cloud-config.conf # rsyslog_dir: /etc/rsyslog.d ## to configure rsyslog to accept remote logging on Ubuntu ## write the following into /etc/rsyslog.d/20-remote-udp.conf ## $ModLoad imudp ## $UDPServerRun 514 ## $template LogRemote,"/var/log/maas/rsyslog/%HOSTNAME%/messages" ## :fromhost-ip, !isequal, "127.0.0.1" ?LogRemote ## then: ## sudo service rsyslog restart examples/cloud-config-install-packages.txt000064400000000572147221477530014734 0ustar00#cloud-config # Install additional packages on first boot # # Default: none # # if packages are specified, this apt_update will be set to true # # packages may be supplied as a single package name or as a list # with the format [, ] wherein the specifc # package version will be installed. packages: - pwgen - pastebinit - [libpython2.7, 2.7.3-0ubuntu3.1] examples/cloud-config-chef.txt000064400000010173147221477530012415 0ustar00#cloud-config # # This is an example file to automatically install chef-client and run a # list of recipes when the instance boots for the first time. # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. # # This example assumes the instance is 16.04 (xenial) # The default is to install from packages. # Key from https://packages.chef.io/chef.asc apt: sources: source1: "deb http://packages.chef.io/repos/apt/stable $RELEASE main" key: | -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1.4.12 (Darwin) Comment: GPGTools - http://gpgtools.org mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99 dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu0IENIRUYgUGFja2FnZXMg PHBhY2thZ2VzQGNoZWYuaW8+iGIEExECACIFAlQwYFECGwMGCwkIBwMCBhUIAgkK CwQWAgMBAh4BAheAAAoJEClAq6mD74JqX94An26z99XOHWpLN8ahzm7cp13t4Xid AJ9wVcgoUBzvgg91lKfv/34cmemZn7kCDQRKaQu0EAgAg7ZLCVGVTmLqBM6njZEd Zbv+mZbvwLBSomdiqddE6u3eH0X3GuwaQfQWHUVG2yedyDMiG+EMtCdEeeRebTCz SNXQ8Xvi22hRPoEsBSwWLZI8/XNg0n0f1+GEr+mOKO0BxDB2DG7DA0nnEISxwFkK OFJFebR3fRsrWjj0KjDxkhse2ddU/jVz1BY7Nf8toZmwpBmdozETMOTx3LJy1HZ/ Te9FJXJMUaB2lRyluv15MVWCKQJro4MQG/7QGcIfrIZNfAGJ32DDSjV7/YO+IpRY IL4CUBQ65suY4gYUG4jhRH6u7H1p99sdwsg5OIpBe/v2Vbc/tbwAB+eJJAp89Zeu twADBQf/ZcGoPhTGFuzbkcNRSIz+boaeWPoSxK2DyfScyCAuG41CY9+g0HIw9Sq8 DuxQvJ+vrEJjNvNE3EAEdKl/zkXMZDb1EXjGwDi845TxEMhhD1dDw2qpHqnJ2mtE WpZ7juGwA3sGhi6FapO04tIGacCfNNHmlRGipyq5ZiKIRq9mLEndlECr8cwaKgkS 0wWu+xmMZe7N5/t/TK19HXNh4tVacv0F3fYK54GUjt2FjCQV75USnmNY4KPTYLXA dzC364hEMlXpN21siIFgB04w+TXn5UF3B4FfAy5hevvr4DtV4MvMiGLu0oWjpaLC MpmrR3Ny2wkmO0h+vgri9uIP06ODWIhJBBgRAgAJBQJKaQu0AhsMAAoJEClAq6mD 74Jq4hIAoJ5KrYS8kCwj26SAGzglwggpvt3CAJ0bekyky56vNqoegB+y4PQVDv4K zA== =IxPr -----END PGP PUBLIC KEY BLOCK----- chef: # Valid values are 'gems' and 'packages' and 'omnibus' install_type: "packages" # Boolean: run 'install_type' code even if chef-client # appears already installed. force_install: false # Chef settings server_url: "https://chef.yourorg.com" # Node Name # Defaults to the instance-id if not present node_name: "your-node-name" # Environment # Defaults to '_default' if not present environment: "production" # Default validation name is chef-validator validation_name: "yourorg-validator" # if validation_cert's value is "system" then it is expected # that the file already exists on the system. validation_cert: | -----BEGIN RSA PRIVATE KEY----- YOUR-ORGS-VALIDATION-KEY-HERE -----END RSA PRIVATE KEY----- # A run list for a first boot json, an example (not required) run_list: - "recipe[apache2]" - "role[db]" # Specify a list of initial attributes used by the cookbooks initial_attributes: apache: prefork: maxclients: 100 keepalive: "off" # if install_type is 'omnibus', change the url to download omnibus_url: "https://www.chef.io/chef/install.sh" # if install_type is 'omnibus', pass pinned version string # to the install script omnibus_version: "12.3.0" # If encrypted data bags are used, the client needs to have a secrets file # configured to decrypt them encrypted_data_bag_secret: "/etc/chef/encrypted_data_bag_secret" # Capture all subprocess output into a logfile # Useful for troubleshooting cloud-init issues output: {all: '| tee -a /var/log/cloud-init-output.log'} examples/cloud-config-add-apt-repos.txt000064400000003052147221477530014146 0ustar00#cloud-config # Add primary apt repositories # # To add 3rd party repositories, see cloud-config-apt.txt or the # Additional apt configuration and repositories section. # # # Default: auto select based on cloud metadata # in ec2, the default is .archive.ubuntu.com # apt: # primary: # - arches [default] # uri: # use the provided mirror # search: # search the list for the first mirror. # this is currently very limited, only verifying that # the mirror is dns resolvable or an IP address # # if neither mirror is set (the default) # then use the mirror provided by the DataSource found. # In EC2, that means using .ec2.archive.ubuntu.com # # if no mirror is provided by the DataSource, but 'search_dns' is # true, then search for dns names '-mirror' in each of # - fqdn of this host per cloud metadata # - localdomain # - no domain (which would search domains listed in /etc/resolv.conf) # If there is a dns entry for -mirror, then it is assumed that there # is a distro mirror at http://-mirror./ # # That gives the cloud provider the opportunity to set mirrors of a distro # up and expose them only by creating dns entries. # # if none of that is found, then the default distro mirror is used apt: primary: - arches: [default] uri: http://us.archive.ubuntu.com/ubuntu/ # or apt: primary: - arches: [default] search: - http://local-mirror.mydomain - http://archive.ubuntu.com # or apt: primary: - arches: [default] search_dns: True examples/cloud-config-salt-minion.txt000064400000004545147221477530013750 0ustar00#cloud-config # # This is an example file to automatically setup and run a salt # minion when the instance boots for the first time. # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. salt_minion: # conf contains all the directives to be assigned in /etc/salt/minion. conf: # Set the location of the salt master server, if the master server cannot be # resolved, then the minion will fail to start. master: salt.example.com # Salt keys are manually generated by: salt-key --gen-keys=GEN_KEYS, # where GEN_KEYS is the name of the keypair, e.g. 'minion'. The keypair # will be copied to /etc/salt/pki on the minion instance. public_key: | -----BEGIN PUBLIC KEY----- MIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEAwI4yqk1Y12zVmu9Ejlua h2FD6kjrt+N9XfGqZUUVNeRb7CA0Sj5Q6NtgoaiXuIrSea2sLda6ivqAGmtxMMrP zpf3FwsYWxBUNF7D4YeLmYjvcTbfr3bCOIRnPNXZ+4isuvvEiM02u2cO0okZSgeb dofNa1NbTLYAQr9jZZb7GPKrTO4CKy0xzBih/A+sl6dL9PNDmqXQEjyJS6PXG1Vj PvD5jpSrxuIl5Ms/+2Ro3ALgvC8dgoY/3m3csnd06afumGKv5YOGtf+bnWLhc0bf 6Sk8Q6i5t0Bl+HAULSPr+B9x/I0rN76ZnPvTj1+hJ0zTof4d0hOLx/K5OQyt7AKo 4wIBAQ== -----END PUBLIC KEY----- private_key: | -----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED DEK-Info: AES-128-CBC,ECE30DBBA56E2DF06B7BC415F8870994 YQOE5HIsghqjRsxPQqiWMH/VHmyFH6xIpBcmzxzispEHwBojlvLXviwvR66YhgNw 7smwE10Ik4/cwwiHTZqCk++jPATPygBiqQkUijCWzcT9kfaxmqdP4PL+hu9g7kGC KrD2Bm8/oO08s957aThuHC1sABRcJ1V3FRzJT6Za4fwweyvHVYRnmgaDA6zH0qV8 NqBSB2hnNXKEdh6UFz9QGcrQxnRjfdIaW64zoEX7jT7gYYL7FkGXBa3XdMOA4fnl adRwLFMs0jfilisZv8oUbPdZ6J6x3o8p8LVecCF8tdZt1zkcLSIXKnoDFpHSISGs BD9aqD+E4ejynM/tPaVFq4IHzT8viN6h6WcH8fbpClFZ66Iyy9XL3/CjAY7Jzhh9 fnbc4Iq28cdbmO/vkR7JyVOgEMWe1BcSqtro70XoUNRY8uDJUPqohrhm/9AigFRA Pwyf3LqojxRnwXjHsZtGltUtEAPZzgh3fKJnx9MyRR7DPXBRig7TAHU7n2BFRhHA TYThy29bK6NkIc/cKc2kEQVo98Cr04PO8jVxZM332FlhiVlP0kpAp+tFj7aMzPTG sJumb9kPbMsgpEuTCONm3yyoufGEBFMrIJ+Po48M2RlYOh50VkO09pI+Eu7FPtVB H4gKzoJIpZZ/7vYXQ3djM8s9hc5gD5CVExTZV4drbsXt6ITiwHuxZ6CNHRBPL5AY wmF8QZz4oivv1afdSe6E6OGC3uVmX3Psn5CVq2pE8VlRDKFy1WqfU2enRAijSS2B rtJs263fOJ8ZntDzMVMPgiAlzzfA285KUletpAeUmz+peR1gNzkE0eKSG6THOCi0 rfmR8SeEzyNvin0wQ3qgYiiHjHbbFhJIMAQxoX+0hDSooM7Wo5wkLREULpGuesTg A6Fe3CiOivMDraNGA7H6Yg== -----END RSA PRIVATE KEY----- examples/part-handler-v2.txt000064400000003040147221477530012042 0ustar00#part-handler # vi: syntax=python ts=4 # this is an example of a version 2 part handler. # the differences between the initial part-handler version # and v2 is: # * handle_part receives a 5th argument, 'frequency' # frequency will be either 'always' or 'per-instance' # * handler_version must be set # # A handler declaring version 2 will be called on all instance boots, with a # different 'frequency' argument. handler_version = 2 def list_types(): # return a list of mime-types that are handled by this module return(["text/plain", "text/go-cubs-go"]) def handle_part(data,ctype,filename,payload,frequency): # data: the cloudinit object # ctype: '__begin__', '__end__', or the specific mime-type of the part # filename: the filename for the part, or dynamically generated part if # no filename is given attribute is present # payload: the content of the part (empty for begin or end) # frequency: the frequency that this cloud-init run is running for # this is either 'per-instance' or 'always'. 'per-instance' # will be invoked only on the first boot. 'always' will # will be called on subsequent boots. if ctype == "__begin__": print "my handler is beginning, frequency=%s" % frequency return if ctype == "__end__": print "my handler is ending, frequency=%s" % frequency return print "==== received ctype=%s filename=%s ====" % (ctype,filename) print payload print "==== end ctype=%s filename=%s" % (ctype, filename) examples/cloud-config-vendor-data.txt000064400000001061147221477530013710 0ustar00#cloud-config # # This explains how to control vendordata via a cloud-config # # On select Datasources, vendors have a channel for the consumptions # of all support user-data types via a special channel called # vendordata. Users of the end system are given ultimate control. # vendor_data: enabled: True prefix: /usr/bin/ltrace # enabled: whether it is enabled or not # prefix: the command to run before any vendor scripts. # Note: this is a fairly weak method of containment. It should # be used to profile a script, not to prevent its run examples/cloud-config-rh_subscription.txt000064400000002667147221477530014736 0ustar00#cloud-config # register your Red Hat Enterprise Linux based operating system # # this cloud-init plugin is capable of registering by username # and password *or* activation and org. Following a successfully # registration you can: # - auto-attach subscriptions # - set the service level # - add subscriptions based on its pool ID # - enable yum repositories based on its repo id # - disable yum repositories based on its repo id # - alter the rhsm_baseurl and server-hostname in the # /etc/rhsm/rhs.conf file rh_subscription: username: joe@foo.bar ## Quote your password if it has symbols to be safe password: '1234abcd' ## If you prefer, you can use the activation key and ## org instead of username and password. Be sure to ## comment out username and password #activation-key: foobar #org: 12345 ## Uncomment to auto-attach subscriptions to your system #auto-attach: True ## Uncomment to set the service level for your ## subscriptions #service-level: self-support ## Uncomment to add pools (needs to be a list of IDs) #add-pool: [] ## Uncomment to add or remove yum repos ## (needs to be a list of repo IDs) #enable-repo: [] #disable-repo: [] ## Uncomment to alter the baseurl in /etc/rhsm/rhsm.conf #rhsm-baseurl: http://url ## Uncomment to alter the server hostname in ## /etc/rhsm/rhsm.conf #server-hostname: foo.bar.com examples/cloud-config-update-apt.txt000064400000000374147221477530013556 0ustar00#cloud-config # Update apt database on first boot (run 'apt-get update'). # Note, if packages are given, or package_upgrade is true, then # update will be done independent of this setting. # # Default: false # Aliases: apt_update package_update: false examples/cloud-config-chef-oneiric.txt000064400000006672147221477530014054 0ustar00#cloud-config # # This is an example file to automatically install chef-client and run a # list of recipes when the instance boots for the first time. # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. # # This example assumes the instance is 11.10 (oneiric) # The default is to install from packages. # Key from http://apt.opscode.com/packages@opscode.com.gpg.key apt: sources: - source: "deb http://apt.opscode.com/ $RELEASE-0.10 main" key: | -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1.4.9 (GNU/Linux) mQGiBEppC7QRBADfsOkZU6KZK+YmKw4wev5mjKJEkVGlus+NxW8wItX5sGa6kdUu twAyj7Yr92rF+ICFEP3gGU6+lGo0Nve7KxkN/1W7/m3G4zuk+ccIKmjp8KS3qn99 dxy64vcji9jIllVa+XXOGIp0G8GEaj7mbkixL/bMeGfdMlv8Gf2XPpp9vwCgn/GC JKacfnw7MpLKUHOYSlb//JsEAJqao3ViNfav83jJKEkD8cf59Y8xKia5OpZqTK5W ShVnNWS3U5IVQk10ZDH97Qn/YrK387H4CyhLE9mxPXs/ul18ioiaars/q2MEKU2I XKfV21eMLO9LYd6Ny/Kqj8o5WQK2J6+NAhSwvthZcIEphcFignIuobP+B5wNFQpe DbKfA/0WvN2OwFeWRcmmd3Hz7nHTpcnSF+4QX6yHRF/5BgxkG6IqBIACQbzPn6Hm sMtm/SVf11izmDqSsQptCrOZILfLX/mE+YOl+CwWSHhl+YsFts1WOuh1EhQD26aO Z84HuHV5HFRWjDLw9LriltBVQcXbpfSrRP5bdr7Wh8vhqJTPjrQnT3BzY29kZSBQ YWNrYWdlcyA8cGFja2FnZXNAb3BzY29kZS5jb20+iGAEExECACAFAkppC7QCGwMG CwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRApQKupg++Caj8sAKCOXmdG36gWji/K +o+XtBfvdMnFYQCfTCEWxRy2BnzLoBBFCjDSK6sJqCu5Ag0ESmkLtBAIAIO2SwlR lU5i6gTOp42RHWW7/pmW78CwUqJnYqnXROrt3h9F9xrsGkH0Fh1FRtsnncgzIhvh DLQnRHnkXm0ws0jV0PF74ttoUT6BLAUsFi2SPP1zYNJ9H9fhhK/pjijtAcQwdgxu wwNJ5xCEscBZCjhSRXm0d30bK1o49Cow8ZIbHtnXVP41c9QWOzX/LaGZsKQZnaMx EzDk8dyyctR2f03vRSVyTFGgdpUcpbr9eTFVgikCa6ODEBv+0BnCH6yGTXwBid9g w0o1e/2DviKUWCC+AlAUOubLmOIGFBuI4UR+rux9affbHcLIOTiKQXv79lW3P7W8 AAfniSQKfPWXrrcAAwUH/2XBqD4Uxhbs25HDUUiM/m6Gnlj6EsStg8n0nMggLhuN QmPfoNByMPUqvA7sULyfr6xCYzbzRNxABHSpf85FzGQ29RF4xsA4vOOU8RDIYQ9X Q8NqqR6pydprRFqWe47hsAN7BoYuhWqTtOLSBmnAnzTR5pURoqcquWYiiEavZixJ 3ZRAq/HMGioJEtMFrvsZjGXuzef7f0ytfR1zYeLVWnL9Bd32CueBlI7dhYwkFe+V Ep5jWOCj02C1wHcwt+uIRDJV6TdtbIiBYAdOMPk15+VBdweBXwMuYXr76+A7VeDL zIhi7tKFo6WiwjKZq0dzctsJJjtIfr4K4vbiD9Ojg1iISQQYEQIACQUCSmkLtAIb DAAKCRApQKupg++CauISAJ9CxYPOKhOxalBnVTLeNUkAHGg2gACeIsbobtaD4ZHG 0GLl8EkfA8uhluM= =zKAm -----END PGP PUBLIC KEY BLOCK----- chef: # 11.10 will fail if install_type is "gems" (LP: #960576) install_type: "packages" # Chef settings server_url: "https://chef.yourorg.com:4000" # Node Name # Defaults to the instance-id if not present node_name: "your-node-name" # Environment # Defaults to '_default' if not present environment: "production" # Default validation name is chef-validator validation_name: "yourorg-validator" # value of validation_cert is not used if validation_key defined, # but variable needs to be defined (LP: #960547) validation_cert: "unused" validation_key: | -----BEGIN RSA PRIVATE KEY----- YOUR-ORGS-VALIDATION-KEY-HERE -----END RSA PRIVATE KEY----- # A run list for a first boot json run_list: - "recipe[apache2]" - "role[db]" # Specify a list of initial attributes used by the cookbooks initial_attributes: apache: prefork: maxclients: 100 keepalive: "off" # Capture all subprocess output into a logfile # Useful for troubleshooting cloud-init issues output: {all: '| tee -a /var/log/cloud-init-output.log'} examples/cloud-config.txt000064400000052352147221477530011517 0ustar00#cloud-config # Update apt database on first boot # (ie run apt-get update) # # Default: true # Aliases: apt_update package_update: false # Upgrade the instance on first boot # (ie run apt-get upgrade) # # Default: false # Aliases: apt_upgrade package_upgrade: true # Reboot after package install/update if necessary # Default: false # Aliases: apt_reboot_if_required package_reboot_if_required: true # For 'apt' specific config, see cloud-config-apt.txt packages: - pwgen - pastebinit # set up mount points # 'mounts' contains a list of lists # the inner list are entries for an /etc/fstab line # ie : [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ] # # default: # mounts: # - [ ephemeral0, /mnt ] # - [ swap, none, swap, sw, 0, 0 ] # # in order to remove a previously listed mount (ie, one from defaults) # list only the fs_spec. For example, to override the default, of # mounting swap: # - [ swap ] # or # - [ swap, null ] # # - if a device does not exist at the time, an entry will still be # written to /etc/fstab. # - '/dev' can be ommitted for device names that begin with: xvd, sd, hd, vd # - if an entry does not have all 6 fields, they will be filled in # with values from 'mount_default_fields' below. # # Note, that you should set 'nofail' (see man fstab) for volumes that may # not be attached at instance boot (or reboot) # mounts: - [ ephemeral0, /mnt, auto, "defaults,noexec" ] - [ sdc, /opt/data ] - [ xvdh, /opt/data, "auto", "defaults,nofail", "0", "0" ] - [ dd, /dev/zero ] # mount_default_fields # These values are used to fill in any entries in 'mounts' that are not # complete. This must be an array, and must have 7 fields. mount_default_fields: [ None, None, "auto", "defaults,nofail", "0", "2" ] # add each entry to ~/.ssh/authorized_keys for the configured user or the # first user defined in the user definition directive. ssh_authorized_keys: - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies # Send pre-generated ssh private keys to the server # If these are present, they will be written to /etc/ssh and # new random keys will not be generated # in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported ssh_keys: rsa_private: | -----BEGIN RSA PRIVATE KEY----- MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qcon2LZS/x 1cydPZ4pQpfjEha6WxZ6o8ci/Ea/w0n+0HGPwaxlEG2Z9inNtj3pgFrYcRztfECb 1j6HCibZbAzYtwIBIwJgO8h72WjcmvcpZ8OvHSvTwAguO2TkR6mPgHsgSaKy6GJo PUJnaZRWuba/HX0KGyhz19nPzLpzG5f0fYahlMJAyc13FV7K6kMBPXTRR6FxgHEg L0MPC7cdqAwOVNcPY6A7AjEA1bNaIjOzFN2sfZX0j7OMhQuc4zP7r80zaGc5oy6W p58hRAncFKEvnEq2CeL3vtuZAjEAwNBHpbNsBYTRPCHM7rZuG/iBtwp8Rxhc9I5w ixvzMgi+HpGLWzUIBS+P/XhekIjPAjA285rVmEP+DR255Ls65QbgYhJmTzIXQ2T9 luLvcmFBC6l35Uc4gTgg4ALsmXLn71MCMGMpSWspEvuGInayTCL+vEjmNBT+FAdO W7D4zCpI43jRS9U06JVOeSc9CDk2lwiA3wIwCTB/6uc8Cq85D9YqpM10FuHjKpnP REPPOyrAspdeOAV+6VKRavstea7+2DZmSUgE -----END RSA PRIVATE KEY----- rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost dsa_private: | -----BEGIN DSA PRIVATE KEY----- MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv 99iziAH0KBMVbxy03Trz -----END DSA PRIVATE KEY----- dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost # remove access to the ec2 metadata service early in boot via null route # the null route can be removed (by root) with: # route del -host 169.254.169.254 reject # default: false (service available) disable_ec2_metadata: true # run commands # default: none # runcmd contains a list of either lists or a string # each item will be executed in order at rc.local like level with # output to the console # - if the item is a list, the items will be properly executed as if # passed to execve(3) (with the first arg as the command). # - if the item is a string, it will be simply written to the file and # will be interpreted by 'sh' # # Note, that the list has to be proper yaml, so you have to escape # any characters yaml would eat (':' can be problematic) runcmd: - [ ls, -l, / ] - [ sh, -xc, "echo $(date) ': hello world!'" ] - [ sh, -c, echo "=========hello world'=========" ] - ls -l /root # Note: Don't write files to /tmp from cloud-init use /run/somedir instead. # Early boot environments can race systemd-tmpfiles-clean LP: #1707222. - mkdir /run/mydir - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ] # boot commands # default: none # this is very similar to runcmd above, but commands run very early # in the boot process, only slightly after a 'boothook' would run. # bootcmd should really only be used for things that could not be # done later in the boot process. bootcmd is very much like # boothook, but possibly with more friendly. # * bootcmd will run on every boot # * the INSTANCE_ID variable will be set to the current instance id. # * you can use 'cloud-init-per' command to help only run once bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] # cloud_config_modules: # default: # cloud_config_modules: # - mounts # - ssh # - apt-update-upgrade # - puppet # - updates-check # - disable-ec2-metadata # - runcmd # # This is an array of arrays or strings. # if item is a string, then it is read as a module name # if the item is an array it is of the form: # name, frequency, arguments # where 'frequency' is one of: # once-per-instance # always # a python file in the CloudConfig/ module directory named # cc_.py # example: cloud_config_modules: - mounts - ssh-import-id - ssh - grub-dpkg - [ apt-update-upgrade, always ] - puppet - updates-check - disable-ec2-metadata - runcmd - byobu # unverified_modules: [] # if a config module declares a set of distros as supported then it will be # skipped if running on a different distro. to override this sanity check, # provide a list of modules that should be run anyway in 'unverified_modules'. # The default is an empty list (ie, trust modules). # # Example: # unverified_modules: ['apt-update-upgrade'] # default: [] # ssh_import_id: [ user1, user2 ] # ssh_import_id will feed the list in that variable to # ssh-import-id, so that public keys stored in launchpad # can easily be imported into the configured user # This can be a single string ('smoser') or a list ([smoser, kirkland]) ssh_import_id: [smoser] # Provide debconf answers / debian preseed values # # See debconf-set-selections man page. # # Default: none # debconf_selections: | # Need to preserve newlines # Force debconf priority to critical. debconf debconf/priority select critical # Override default frontend to readline, but allow user to select. debconf debconf/frontend select readline debconf debconf/frontend seen false # manage byobu defaults # byobu_by_default: # 'user' or 'enable-user': set byobu 'launch-by-default' for the default user # 'system' or 'enable-system' or 'enable': # enable 'launch-by-default' for all users, do not modify default user # 'disable': disable both default user and system # 'disable-system': disable system # 'disable-user': disable for default user # not-set: no changes made byobu_by_default: system # disable ssh access as root. # if you want to be able to ssh in to the system as the root user # rather than as the 'ubuntu' user, then you must set this to false # default: true disable_root: false # disable_root_opts: the value of this variable will prefix the # respective key in /root/.ssh/authorized_keys if disable_root is true # see 'man authorized_keys' for more information on what you can do here # # The string '$USER' will be replaced with the username of the default user. # The string '$DISABLE_USER' will be replaced with the username to disable. # # disable_root_opts: no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command="echo 'Please login as the user \"$USER\" rather than the user \"$DISABLE_USER\".';echo;sleep 10" # disable ssh access for non-root-users # To disable ssh access for non-root users, ssh_redirect_user: true can be # provided for any use in the 'users' list. This will prompt any ssh login # attempts as that user with a message like that in disable_root_opts which # redirects the person to login as # This option can not be combined with either ssh_authorized_keys or # ssh_import_id. users: - default - name: blockeduser ssh_redirect_user: true # set the locale to a given locale # default: en_US.UTF-8 locale: en_US.UTF-8 # render template default-locale.tmpl to locale_configfile locale_configfile: /etc/default/locale # add entries to rsyslog configuration # The first occurance of a given filename will truncate. # subsequent entries will append. # if value is a scalar, its content is assumed to be 'content', and the # default filename is used. # if filename is not provided, it will default to 'rsylog_filename' # if filename does not start with a '/', it will be put in 'rsyslog_dir' # rsyslog_dir default: /etc/rsyslog.d # rsyslog_filename default: 20-cloud-config.conf rsyslog: - ':syslogtag, isequal, "[CLOUDINIT]" /var/log/cloud-foo.log' - content: "*.* @@192.0.2.1:10514" - filename: 01-examplecom.conf content: "*.* @@syslogd.example.com" # resize_rootfs should the / filesytem be resized on first boot # this allows you to launch an instance with a larger disk / partition # and have the instance automatically grow / to accomoddate it # set to 'False' to disable # by default, the resizefs is done early in boot, and blocks # if resize_rootfs is set to 'noblock', then it will be run in parallel resize_rootfs: True ## hostname and /etc/hosts management # cloud-init can handle updating some entries in /etc/hosts, # and can set your hostname for you. # # if you do nothing you'll end up with: # * /etc/hostname (and `hostname`) managed via: 'preserve_hostame: false' # if you do not change /etc/hostname, it will be updated with the cloud # provided hostname on each boot. If you make a change, then manual # maintenance takes over, and cloud-init will not modify it. # # * /etc/hosts managed via: 'manage_etc_hosts: false' # cloud-init will not manage /etc/hosts at all. It is in full manual # maintenance mode. # # You can change the above behavior with the following config variables: # Remember that these can be set in cloud-config via user-data, # /etc/cloud/cloud.cfg or any file in /etc/cloud/cloud.cfg.d/ # # == Hostname management (via /etc/hostname) == # * preserve_hostname: # default: False # If this option is set to True, then /etc/hostname will never updated # The default behavior is to update it if it has not been modified by # the user. # # * hostname: # this option will be used wherever the 'hostname' is needed # simply substitute it in the description above. # ** If you wish to set your hostname, set it here ** # default: 'hostname' as returned by the metadata service # on EC2, the hostname portion of 'local-hostname' is used # which is something like 'ip-10-244-170-199' # # * fqdn: # this option will be used wherever 'fqdn' is needed. # simply substitue it in the description above. # default: fqdn as returned by the metadata service. on EC2 'hostname' # is used, so this is like: ip-10-244-170-199.ec2.internal # # == /etc/hosts management == # # The cloud-config variable that covers management of /etc/hosts is # 'manage_etc_hosts' # # By default, its value is 'false' (boolean False) # # * manage_etc_hosts: # default: false # # false: # cloud-init will not modify /etc/hosts at all. # * Whatever is present at instance boot time will be present after boot. # * User changes will not be overwritten # # true or 'template': # on every boot, /etc/hosts will be re-written from # /etc/cloud/templates/hosts.tmpl. # The strings '$hostname' and '$fqdn' are replaced in the template # with the appropriate values. # To make modifications persistant across a reboot, you must make # modificatoins to /etc/cloud/templates/hosts.tmpl # # localhost: # This option ensures that an entry is present for fqdn as described in # section 5.1.2 of the debian manual # http://www.debian.org/doc/manuals/debian-reference/ch05.en.html # # cloud-init will generally own the 127.0.1.1 entry, and will update # it to the hostname and fqdn on every boot. All other entries will # be left as is. 'ping `hostname`' will ping 127.0.1.1 # # If you want a fqdn entry with aliases other than 'hostname' to resolve # to a localhost interface, you'll need to use something other than # 127.0.1.1. For example: # 127.0.1.2 myhost.fqdn.example.com myhost whatup.example.com # final_message # default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds # this message is written by cloud-final when the system is finished # its first boot. # This message is rendered as if it were a template. If you # want jinja, you have to start the line with '## template:jinja\n' final_message: "The system is finally up, after $UPTIME seconds" # configure where output will go # 'output' entry is a dict with 'init', 'config', 'final' or 'all' # entries. Each one defines where # cloud-init, cloud-config, cloud-config-final or all output will go # each entry in the dict can be a string, list or dict. # if it is a string, it refers to stdout and stderr # if it is a list, entry 0 is stdout, entry 1 is stderr # if it is a dict, it is expected to have 'output' and 'error' fields # default is to write to console only # the special entry "&1" for an error means "same location as stdout" # (Note, that '&1' has meaning in yaml, so it must be quoted) output: init: "> /var/log/my-cloud-init.log" config: [ ">> /tmp/foo.out", "> /tmp/foo.err" ] final: output: "| tee /tmp/final.stdout | tee /tmp/bar.stdout" error: "&1" # phone_home: if this dictionary is present, then the phone_home # cloud-config module will post specified data back to the given # url # default: none # phone_home: # url: http://my.foo.bar/$INSTANCE/ # post: all # tries: 10 # phone_home: url: http://my.example.com/$INSTANCE_ID/ post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ] # timezone: set the timezone for this instance # the value of 'timezone' must exist in /usr/share/zoneinfo timezone: US/Eastern # def_log_file and syslog_fix_perms work together # if # - logging is set to go to a log file 'L' both with and without syslog # - and 'L' does not exist # - and syslog is configured to write to 'L' # then 'L' will be initially created with root:root ownership (during # cloud-init), and then at cloud-config time (when syslog is available) # the syslog daemon will be unable to write to the file. # # to remedy this situation, 'def_log_file' can be set to a filename # and syslog_fix_perms to a string containing ":" # if syslog_fix_perms is a list, it will iterate through and use the # first pair that does not raise error. # # 'def_log_file' will be created with mode 'def_log_file_mode', which # is specified as a numeric value and defaults to 0600. # # the default values are '/var/log/cloud-init.log' and 'syslog:adm' # the value of 'def_log_file' should match what is configured in logging # if either is empty, then no change of ownership will be done def_log_file: /var/log/my-logging-file.log def_log_file_mode: 0600 syslog_fix_perms: syslog:root # you can set passwords for a user or multiple users # this is off by default. # to set the default user's password, use the 'password' option. # if set, to 'R' or 'RANDOM', then a random password will be # generated and written to stdout (the console) # password: passw0rd # # also note, that this will expire the password, forcing a change # on first login. If you do not want to expire, see 'chpasswd' below. # # By default in the UEC images password authentication is disabled # Thus, simply setting 'password' as above will only allow you to login # via the console. # # in order to enable password login via ssh you must set # 'ssh_pwauth'. # If it is set, to 'True' or 'False', then sshd_config will be updated # to ensure the desired function. If not set, or set to '' or 'unchanged' # then sshd_config will not be updated. # ssh_pwauth: True # # there is also an option to set multiple users passwords, using 'chpasswd' # That looks like the following, with 'expire' set to 'True' by default. # to not expire users passwords, set 'expire' to 'False'. Also possible # to set hashed password, here account 'user3' has a password it set to # 'cloud-init', hashed with SHA-256: # chpasswd: # list: | # user1:password1 # user2:RANDOM # user3:$5$eriogqzq$Dg7PxHsKGzziuEGkZgkLvacjuEFeljJ.rLf.hZqKQLA # expire: True # ssh_pwauth: [ True, False, "" or "unchanged" ] # # Hashed passwords can be generated in multiple ways, example with python3: # python3 -c 'import crypt,getpass; print(crypt.crypt(getpass.getpass(), crypt.mksalt(crypt.METHOD_SHA512)))' # Newer versions of 'mkpasswd' will also work: mkpasswd -m sha-512 password # # So, a simple working example to allow login via ssh, and not expire # for the default user would look like: password: passw0rd chpasswd: { expire: False } ssh_pwauth: True # manual cache clean. # By default, the link from /var/lib/cloud/instance to # the specific instance in /var/lib/cloud/instances/ is removed on every # boot. The cloud-init code then searches for a DataSource on every boot # if your DataSource will not be present on every boot, then you can set # this option to 'True', and maintain (remove) that link before the image # will be booted as a new instance. # default is False manual_cache_clean: False # When cloud-init is finished running including having run # cloud_init_modules, then it will run this command. The default # is to emit an upstart signal as shown below. If the value is a # list, it will be passed to Popen. If it is a string, it will be # invoked through 'sh -c'. # # default value: # cc_ready_cmd: [ initctl, emit, cloud-config, CLOUD_CFG=/var/lib/instance//cloud-config.txt ] # example: # cc_ready_cmd: [ sh, -c, 'echo HI MOM > /tmp/file' ] ## configure interaction with ssh server # ssh_svcname: ssh # set the name of the option to 'service restart' # in order to restart the ssh daemon. For fedora, use 'sshd' # default: ssh # ssh_deletekeys: True # boolean indicating if existing ssh keys should be deleted on a # per-instance basis. On a public image, this should absolutely be set # to 'True' # ssh_genkeytypes: ['rsa', 'dsa', 'ecdsa'] # a list of the ssh key types that should be generated # These are passed to 'ssh-keygen -t' ## configuration of ssh keys output to console # ssh_fp_console_blacklist: [] # ssh_key_console_blacklist: [ssh-dss] # A list of key types (first token of a /etc/ssh/ssh_key_*.pub file) # that should be skipped when outputting key fingerprints and keys # to the console respectively. ## poweroff or reboot system after finished # default: none # # power_state can be used to make the system shutdown, reboot or # halt after boot is finished. This same thing can be acheived by # user-data scripts or by runcmd by simply invoking 'shutdown'. # # Doing it this way ensures that cloud-init is entirely finished with # modules that would be executed, and avoids any error/log messages # that may go to the console as a result of system services like # syslog being taken down while cloud-init is running. # # delay: form accepted by shutdown. default is 'now'. other format # accepted is +m (m in minutes) # mode: required. must be one of 'poweroff', 'halt', 'reboot' # message: provided as the message argument to 'shutdown'. default is none. power_state: delay: 30 mode: poweroff message: Bye Bye examples/kernel-cmdline.txt000064400000001507147221477530012033 0ustar00cloud-config can be provided via the kernel command line. configuration that comes from the kernel command line has higher priority than configuration in /etc/cloud/cloud.cfg The format is: cc: [end_cc] cloud-config will consider any content after 'cc:' to be cloud-config data. If an 'end_cc' string is present, then it will stop reading there. otherwise it considers everthing after 'cc:' to be cloud-config content. In order to allow carriage returns, you must enter '\\n', literally, on the command line two backslashes followed by a letter 'n'. Here are some examples: root=/dev/sda1 cc: ssh_import_id: [smoser, kirkland]\\n root=LABEL=uec-rootfs cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc root=/dev/sda1 examples/cloud-config-growpart.txt000064400000001724147221477530013357 0ustar00#cloud-config # # growpart entry is a dict, if it is not present at all # in config, then the default is used ({'mode': 'auto', 'devices': ['/']}) # # mode: # values: # * auto: use any option possible (any available) # if none are available, do not warn, but debug. # * growpart: use growpart to grow partitions # if growpart is not available, this is an error. # * off, false # # devices: # a list of things to resize. # items can be filesystem paths or devices (in /dev) # examples: # devices: [/, /dev/vdb1] # # ignore_growroot_disabled: # a boolean, default is false. # if the file /etc/growroot-disabled exists, then cloud-init will not grow # the root partition. This is to allow a single file to disable both # cloud-initramfs-growroot and cloud-init's growroot support. # # true indicates that /etc/growroot-disabled should be ignored # growpart: mode: auto devices: ['/'] ignore_growroot_disabled: false examples/cloud-config-landscape.txt000064400000001413147221477530013437 0ustar00# Landscape-client configuration # # Anything under the top 'landscape: client' entry # will be basically rendered into a ConfigObj formated file # under the '[client]' section of /etc/landscape/client.conf # # Note: 'tags' should be specified as a comma delimited string # rather than a list. # # You can get example key/values by running 'landscape-config', # answer question, then look at /etc/landscape/client.config landscape: client: url: "https://landscape.canonical.com/message-system" ping_url: "http://landscape.canonical.com/ping" data_path: "/var/lib/landscape/client" http_proxy: "http://my.proxy.com/foobar" tags: "server,cloud" computer_title: footitle https_proxy: fooproxy registration_key: fookey account_name: fooaccount examples/cloud-config-power-state.txt000064400000003123147221477530013757 0ustar00#cloud-config ## poweroff or reboot system after finished # default: none # # power_state can be used to make the system shutdown, reboot or # halt after boot is finished. This same thing can be acheived by # user-data scripts or by runcmd by simply invoking 'shutdown'. # # Doing it this way ensures that cloud-init is entirely finished with # modules that would be executed, and avoids any error/log messages # that may go to the console as a result of system services like # syslog being taken down while cloud-init is running. # # If you delay '+5' (5 minutes) and have a timeout of # 120 (2 minutes), then the max time until shutdown will be 7 minutes. # cloud-init will invoke 'shutdown +5' after the process finishes, or # when 'timeout' seconds have elapsed. # # delay: form accepted by shutdown. default is 'now'. other format # accepted is +m (m in minutes) # mode: required. must be one of 'poweroff', 'halt', 'reboot' # message: provided as the message argument to 'shutdown'. default is none. # timeout: the amount of time to give the cloud-init process to finish # before executing shutdown. # condition: apply state change only if condition is met. # May be boolean True (always met), or False (never met), # or a command string or list to be executed. # command's exit code indicates: # 0: condition met # 1: condition not met # other exit codes will result in 'not met', but are reserved # for future use. # power_state: delay: "+30" mode: poweroff message: Bye Bye timeout: 30 condition: True examples/cloud-config-archive-launch-index.txt000064400000001442147221477530015505 0ustar00#cloud-config-archive # This is an example of a cloud archive # format which includes a set of launch indexes # that will be filtered on (thus only showing # up in instances with that launch index), this # is done by adding the 'launch-index' key which # maps to the integer 'launch-index' that the # corresponding content should be used with. # # It is possible to leave this value out which # will mean that the content will be applicable # for all instances - type: foo/wark filename: bar content: | This is my payload hello launch-index: 1 # I will only be used on launch-index 1 - this is also payload - | multi line payload here - type: text/upstart-job filename: my-upstart.conf content: | whats this, yo? launch-index: 0 # I will only be used on launch-index 0 examples/seed/README000064400000001264147221477530010201 0ustar00This directory is an example of a 'seed' directory. copying these files inside an instance's /var/lib/cloud/seed/nocloud or /var/lib/cloud/seed/nocloud-net will cause the 'DataSourceNoCloud' and 'DataSourceNoCloudNet' modules to enable and read the given data. The directory must have both files. - user-data: This is the user data, as would be consumed from ec2's metadata service see examples in doc/examples. - meta-data: This file is yaml formated data similar to what is in the ec2 metadata service under meta-data/. See the example, or, on an ec2 instance, run: python -c 'import boto.utils, yaml; print( yaml.dump(boto.utils.get_instance_metadata()))' examples/seed/meta-data000064400000002573147221477530011105 0ustar00# this is yaml formated data # it is expected to be roughly what you would get from running the following # on an ec2 instance: # python -c 'import boto.utils, yaml; print(yaml.dump(boto.utils.get_instance_metadata()))' ami-id: ami-fd4aa494 ami-launch-index: '0' ami-manifest-path: ubuntu-images-us/ubuntu-lucid-10.04-amd64-server-20100427.1.manifest.xml block-device-mapping: {ami: sda1, ephemeral0: sdb, ephemeral1: sdc, root: /dev/sda1} hostname: domU-12-31-38-07-19-44.compute-1.internal instance-action: none instance-id: i-87018aed instance-type: m1.large kernel-id: aki-c8b258a1 local-hostname: domU-12-31-38-07-19-44.compute-1.internal local-ipv4: 10.223.26.178 placement: {availability-zone: us-east-1d} public-hostname: ec2-184-72-174-120.compute-1.amazonaws.com public-ipv4: 184.72.174.120 public-keys: ec2-keypair.us-east-1: [ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCD9dlT00vOUC8Ttq6YH8RzUCVqPQl6HaSfWSTKYnZiVCpTBj1CaRZPLRLmkSB9Nziy4aRJa/LZMbBHXytQKnB1psvNknqC2UNlrXXMk+Vx5S4vg21MXYYimK4uZEY0Qz29QUiTyNsx18jpAaF4ocUpTpRhxPEBCcSCDmMbc27MU2XuTbasM2NjW/w0bBF3ZFhdH68dZICXdTxS2jUrtrCnc1D/QXVZ5kQO3jsmSyJg8E0nE+6Onpx2YRoVRSwjpGzVZ+BlXPnN5xBREBG8XxzhNFHJbek+RgK5TfL+k4yD4XhnVZuZu53cBAFhj+xPKhtisSd+YmaEq+Jt9uS0Ekd5 ec2-keypair.us-east-1, ''] reservation-id: r-e2225889 security-groups: default # of the fields above: # required: # instance-id # suggested: # local-hostname # public-keys examples/seed/user-data000064400000000133147221477530011123 0ustar00#cloud-config runcmd: - [ sh, -c, 'echo ==== $(date) ====; echo HI WORLD; echo =======' ] examples/cloud-config-run-cmds.txt000064400000001704147221477530013240 0ustar00#cloud-config # run commands # default: none # runcmd contains a list of either lists or a string # each item will be executed in order at rc.local like level with # output to the console # - runcmd only runs during the first boot # - if the item is a list, the items will be properly executed as if # passed to execve(3) (with the first arg as the command). # - if the item is a string, it will be simply written to the file and # will be interpreted by 'sh' # # Note, that the list has to be proper yaml, so you have to quote # any characters yaml would eat (':' can be problematic) runcmd: - [ ls, -l, / ] - [ sh, -xc, "echo $(date) ': hello world!'" ] - [ sh, -c, echo "=========hello world'=========" ] - ls -l /root # Note: Don't write files to /tmp from cloud-init use /run/somedir instead. # Early boot environments can race systemd-tmpfiles-clean LP: #1707222. - mkdir /run/mydir - [ wget, "http://slashdot.org", -O, /run/mydir/index.html ] examples/cloud-config-gluster.txt000064400000000710147221477530013171 0ustar00#cloud-config # vim: syntax=yaml # Mounts volfile exported by glusterfsd running on # "volfile-server-hostname" onto the local mount point '/mnt/data' # # In reality, replace 'volfile-server-hostname' with one of your nodes # running glusterfsd. # packages: - glusterfs-client mounts: - [ 'volfile-server-hostname:6996', /mnt/data, glusterfs, "defaults,nofail", "0", "2" ] runcmd: - [ modprobe, fuse ] - [ mkdir, '-p', /mnt/data ] - [ mount, '-a' ] examples/cloud-config-seed-random.txt000064400000002376147221477530013714 0ustar00#cloud-config # # random_seed is a dictionary. # # The config module will write seed data from the datasource # to 'file' described below. # # Entries in this dictionary are: # file: the file to write random data to (default is /dev/urandom) # data: this data will be written to 'file' before data from # the datasource # encoding: this will be used to decode 'data' provided. # allowed values are 'encoding', 'raw', 'base64', 'b64' # 'gzip', or 'gz'. Default is 'raw' # # command: execute this command to seed random. # the command will have RANDOM_SEED_FILE in its environment # set to the value of 'file' above. # command_required: default False # if true, and 'command' is not available to be run # then exception is raised and cloud-init will record failure. # Otherwise, only debug error is mentioned. # # Note: command could be ['pollinate', # '--server=http://local.pollinate.server'] # which would have pollinate populate /dev/urandom from provided server random_seed: file: '/dev/urandom' data: 'my random string' encoding: 'raw' command: ['sh', '-c', 'dd if=/dev/urandom of=$RANDOM_SEED_FILE'] command_required: True examples/cloud-config-lxd.txt000064400000003414147221477530012277 0ustar00#cloud-config # configure lxd # default: none # all options default to none if not specified # lxd: config sections for lxd # init: dict of options for lxd init, see 'man lxd' # network_address: address for lxd to listen on # network_port: port for lxd to listen on # storage_backend: either 'zfs' or 'dir' # storage_create_device: device based storage using specified device # storage_create_loop: set up loop based storage with size in GB # storage_pool: name of storage pool to use or create # trust_password: password required to add new clients # bridge: dict of options for the lxd bridge # mode: one of "new", "existing" or "none". Defaults to "new" # name: the name of the bridge. Defaults to "lxdbr0" # ipv4_address: an IPv4 address (e.g. 10.0.8.1) # ipv4_netmask: a CIDR mask value (e.g. 24) # ipv4_dhcp_first: the first IP of the DHCP range (e.g. 10.0.8.2) # ipv4_dhcp_last: the last IP of the DHCP range (e.g. 10.0.8.254) # ipv4_dhcp_leases: the size of the DHCP pool (e.g. 250) # ipv4_nat: either "true" or "false" # ipv6_address: an IPv6 address (e.g. fd98:9e0:3744::1) # ipv6_netmask: a CIDR mask value (e.g. 64) # ipv6_nat: either "true" or "false" # domain: domain name to use for the bridge lxd: init: network_address: 0.0.0.0 network_port: 8443 storage_backend: zfs storage_pool: datapool storage_create_loop: 10 bridge: mode: new name: lxdbr0 ipv4_address: 10.0.8.1 ipv4_netmask: 24 ipv4_dhcp_first: 10.0.8.2 ipv4_dhcp_last: 10.0.8.3 ipv4_dhcp_leases: 250 ipv4_nat: true ipv6_address: fd98:9e0:3744::1 ipv6_netmask: 64 ipv6_nat: true domain: lxd # The simplist working configuration is # lxd: # init: # storage_backend: dir examples/cloud-config-ntp.txt000064400000001503147221477530012306 0ustar00#cloud-config # ntp: configure ntp services # servers: List of NTP servers with which to sync # pools: List of NTP pool servers with which to sync (pools are typically # DNS hostnames which resolve to different specific servers to load # balance a set of services) # # Each server in the list will be added in list-order in the following format: # # [pool|server] iburst # # # If no servers or pools are defined but ntp is enabled, then cloud-init will # render the distro default list of pools # # pools = [ # '0.{distro}.pool.ntp.org', # '1.{distro}.pool.ntp.org', # '2.{distro}.pool.ntp.org', # '3.{distro}.pool.ntp.org', # ] # ntp: pools: ['0.company.pool.ntp.org', '1.company.pool.ntp.org', 'ntp.myorg.org'] servers: ['my.ntp.server.local', 'ntp.ubuntu.com', '192.168.23.2'] examples/cloud-config-puppet.txt000064400000004212147221477530013022 0ustar00#cloud-config # # This is an example file to automatically setup and run puppetd # when the instance boots for the first time. # Make sure that this file is valid yaml before starting instances. # It should be passed as user-data when starting the instance. puppet: # Every key present in the conf object will be added to puppet.conf: # [name] # subkey=value # # For example the configuration below will have the following section # added to puppet.conf: # [puppetd] # server=puppetmaster.example.org # certname=i-0123456.ip-X-Y-Z.cloud.internal # # The puppmaster ca certificate will be available in # /var/lib/puppet/ssl/certs/ca.pem conf: agent: server: "puppetmaster.example.org" # certname supports substitutions at runtime: # %i: instanceid # Example: i-0123456 # %f: fqdn of the machine # Example: ip-X-Y-Z.cloud.internal # # NB: the certname will automatically be lowercased as required by puppet certname: "%i.%f" # ca_cert is a special case. It won't be added to puppet.conf. # It holds the puppetmaster certificate in pem format. # It should be a multi-line string (using the | yaml notation for # multi-line strings). # The puppetmaster certificate is located in # /var/lib/puppet/ssl/ca/ca_crt.pem on the puppetmaster host. # ca_cert: | -----BEGIN CERTIFICATE----- MIICCTCCAXKgAwIBAgIBATANBgkqhkiG9w0BAQUFADANMQswCQYDVQQDDAJjYTAe Fw0xMDAyMTUxNzI5MjFaFw0xNTAyMTQxNzI5MjFaMA0xCzAJBgNVBAMMAmNhMIGf MA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCu7Q40sm47/E1Pf+r8AYb/V/FWGPgc b014OmNoX7dgCxTDvps/h8Vw555PdAFsW5+QhsGr31IJNI3kSYprFQcYf7A8tNWu 1MASW2CfaEiOEi9F1R3R4Qlz4ix+iNoHiUDTjazw/tZwEdxaQXQVLwgTGRwVa+aA qbutJKi93MILLwIDAQABo3kwdzA4BglghkgBhvhCAQ0EKxYpUHVwcGV0IFJ1Ynkv T3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNhdGUwDwYDVR0TAQH/BAUwAwEB/zAd BgNVHQ4EFgQUu4+jHB+GYE5Vxo+ol1OAhevspjAwCwYDVR0PBAQDAgEGMA0GCSqG SIb3DQEBBQUAA4GBAH/rxlUIjwNb3n7TXJcDJ6MMHUlwjr03BDJXKb34Ulndkpaf +GAlzPXWa7bO908M9I8RnPfvtKnteLbvgTK+h+zX1XCty+S2EQWk29i2AdoqOTxb hppiGMp0tT5Havu4aceCXiy2crVcudj3NFciy8X66SoECemW9UYDCb9T5D0d -----END CERTIFICATE----- examples/cloud-config-write-files.txt000064400000002156147221477530013744 0ustar00#cloud-config # vim: syntax=yaml # # This is the configuration syntax that the write_files module # will know how to understand. encoding can be given b64 or gzip or (gz+b64). # The content will be decoded accordingly and then written to the path that is # provided. # # Note: Content strings here are truncated for example purposes. write_files: - encoding: b64 content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4... owner: root:root path: /etc/sysconfig/selinux permissions: '0644' - content: | # My new /etc/sysconfig/samba file SMBDOPTIONS="-D" path: /etc/sysconfig/samba - content: !!binary | f0VMRgIBAQAAAAAAAAAAAAIAPgABAAAAwARAAAAAAABAAAAAAAAAAJAVAAAAAAAAAAAAAEAAOAAI AEAAHgAdAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgA AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA .... path: /bin/arch permissions: '0555' - encoding: gzip content: !!binary | H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA= path: /usr/bin/hello permissions: '0755' examples/cloud-config-disk-setup.txt000064400000022162147221477530013601 0ustar00# Cloud-init supports the creation of simple partition tables and file systems # on devices. # Default disk definitions for AWS # -------------------------------- # (Not implemented yet, but provided for future documentation) disk_setup: ephmeral0: table_type: 'mbr' layout: True overwrite: False fs_setup: - label: None, filesystem: ext3 device: ephemeral0 partition: auto # Default disk definitions for Microsoft Azure # ------------------------------------------ device_aliases: {'ephemeral0': '/dev/sdb'} disk_setup: ephemeral0: table_type: mbr layout: True overwrite: False fs_setup: - label: ephemeral0 filesystem: ext4 device: ephemeral0.1 replace_fs: ntfs # Data disks definitions for Microsoft Azure # ------------------------------------------ disk_setup: /dev/disk/azure/scsi1/lun0: table_type: gpt layout: True overwrite: True fs_setup: - device: /dev/disk/azure/scsi1/lun0 partition: 1 filesystem: ext4 # Default disk definitions for SmartOS # ------------------------------------ device_aliases: {'ephemeral0': '/dev/vdb'} disk_setup: ephemeral0: table_type: mbr layout: False overwrite: False fs_setup: - label: ephemeral0 filesystem: ext4 device: ephemeral0.0 # Cavaut for SmartOS: if ephemeral disk is not defined, then the disk will # not be automatically added to the mounts. # The default definition is used to make sure that the ephemeral storage is # setup properly. # "disk_setup": disk partitioning # -------------------------------- # The disk_setup directive instructs Cloud-init to partition a disk. The format is: disk_setup: ephmeral0: table_type: 'mbr' layout: 'auto' /dev/xvdh: table_type: 'mbr' layout: - 33 - [33, 82] - 33 overwrite: True # The format is a list of dicts of dicts. The first value is the name of the # device and the subsequent values define how to create and layout the # partition. # The general format is: # disk_setup: # : # table_type: 'mbr' # layout: # overwrite: # # Where: # : The name of the device. 'ephemeralX' and 'swap' are special # values which are specific to the cloud. For these devices # Cloud-init will look up what the real devices is and then # use it. # # For other devices, the kernel device name is used. At this # time only simply kernel devices are supported, meaning # that device mapper and other targets may not work. # # Note: At this time, there is no handling or setup of # device mapper targets. # # table_type=: Currently the following are supported: # 'mbr': default and setups a MS-DOS partition table # 'gpt': setups a GPT partition table # # Note: At this time only 'mbr' and 'gpt' partition tables # are allowed. It is anticipated in the future that # we'll also have "RAID" to create a mdadm RAID. # # layout={...}: The device layout. This is a list of values, with the # percentage of disk that partition will take. # Valid options are: # [, [, is the _percentage_ of the disk to use, while # is the numerical value of the partition type. # # The following setups two partitions, with the first # partition having a swap label, taking 1/3 of the disk space # and the remainder being used as the second partition. # /dev/xvdh': # table_type: 'mbr' # layout: # - [33,82] # - 66 # overwrite: True # # When layout is "true" it means single partition the entire # device. # # When layout is "false" it means don't partition or ignore # existing partitioning. # # If layout is set to "true" and overwrite is set to "false", # it will skip partitioning the device without a failure. # # overwrite=: This describes whether to ride with saftey's on and # everything holstered. # # 'false' is the default, which means that: # 1. The device will be checked for a partition table # 2. The device will be checked for a file system # 3. If either a partition of file system is found, then # the operation will be _skipped_. # # 'true' is cowboy mode. There are no checks and things are # done blindly. USE with caution, you can do things you # really, really don't want to do. # # # fs_setup: Setup the file system # ------------------------------- # # fs_setup describes the how the file systems are supposed to look. fs_setup: - label: ephemeral0 filesystem: 'ext3' device: 'ephemeral0' partition: 'auto' - label: mylabl2 filesystem: 'ext4' device: '/dev/xvda1' - cmd: mkfs -t %(filesystem)s -L %(label)s %(device)s label: mylabl3 filesystem: 'btrfs' device: '/dev/xvdh' # The general format is: # fs_setup: # - label: