aboutsummaryrefslogtreecommitdiff
path: root/libvirt
diff options
context:
space:
mode:
authorFlavio Castelli <fcastelli@suse.com>2016-06-27 14:41:46 +0200
committerFlavio Castelli <fcastelli@suse.com>2016-06-27 15:29:42 +0200
commitdb065ff077164db6fedefeb17ad6b2dafa7d54ad (patch)
tree0af1bfb2ab11a3400104c584a8711e466b062232 /libvirt
parentaf5f25ca9f8d903cf7f365e5284bdc4743fa965c (diff)
downloadterraform-provider-libvirt-db065ff077164db6fedefeb17ad6b2dafa7d54ad.tar
terraform-provider-libvirt-db065ff077164db6fedefeb17ad6b2dafa7d54ad.tar.gz
Associate a cloud-init with a domain
Allow the usage of a cloud-init ISO with a Domain. Signed-off-by: Flavio Castelli <fcastelli@suse.com>
Diffstat (limited to 'libvirt')
-rw-r--r--libvirt/cloudinit_def.go304
-rw-r--r--libvirt/disk_def.go13
-rw-r--r--libvirt/provider.go5
-rw-r--r--libvirt/resource_cloud_init_iso.go99
-rw-r--r--libvirt/resource_libvirt_domain.go62
5 files changed, 480 insertions, 3 deletions
diff --git a/libvirt/cloudinit_def.go b/libvirt/cloudinit_def.go
new file mode 100644
index 00000000..a0572b5f
--- /dev/null
+++ b/libvirt/cloudinit_def.go
@@ -0,0 +1,304 @@
+package libvirt
+
+import (
+ "encoding/xml"
+ "fmt"
+ libvirt "github.com/dmacvicar/libvirt-go"
+ "github.com/hooklift/iso9660"
+ "gopkg.in/yaml.v2"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "time"
+)
+
+// names of the files expected by cloud-init
+const USERDATA string = "user-data"
+const METADATA string = "meta-data"
+
+type defCloudInit struct {
+ Name string
+ PoolName string
+ Metadata struct {
+ LocalHostname string `yaml:"local-hostname"`
+ InstanceID string `yaml:"instance-id"`
+ }
+ UserData struct {
+ SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
+ }
+}
+
+// Creates a new cloudinit with the defaults
+// the provider uses
+func newCloudInitDef() defCloudInit {
+ ci := defCloudInit{}
+ ci.Metadata.InstanceID = fmt.Sprintf("created-at-%s", time.Now().String())
+
+ return ci
+}
+
+// Create a ISO file based on the contents of the CloudInit instance and
+// uploads it to the libVirt pool
+// Returns a string holding the ID of the volume
+func (ci *defCloudInit) CreateAndUpload(virConn *libvirt.VirConnection) (string, error) {
+ iso, err := ci.createISO()
+ if err != nil {
+ return "", err
+ }
+
+ pool, err := virConn.LookupStoragePoolByName(ci.PoolName)
+ if err != nil {
+ return "", fmt.Errorf("can't find storage pool '%s'", ci.PoolName)
+ }
+ defer pool.Free()
+
+ // Refresh the pool of the volume so that libvirt knows it is
+ // not longer in use.
+ WaitForSuccess("Error refreshing pool for volume", func() error {
+ return pool.Refresh(0)
+ })
+
+ volumeDef := newDefVolume()
+ volumeDef.Name = ci.Name
+
+ // an existing image was given, this mean we can't choose size
+ img, err := newImage(iso)
+ if err != nil {
+ return "", err
+ }
+ defer func() {
+ // Remove the tmp directory holding the ISO
+ if err = os.RemoveAll(filepath.Dir(iso)); err != nil {
+ log.Printf("Error while removing tmp directory holding the ISO file: %s", err)
+ }
+ }()
+
+ size, err := img.Size()
+ if err != nil {
+ return "", err
+ }
+
+ volumeDef.Capacity.Unit = "B"
+ volumeDef.Capacity.Amount = size
+
+ volumeDefXml, err := xml.Marshal(volumeDef)
+ if err != nil {
+ return "", fmt.Errorf("Error serializing libvirt volume: %s", err)
+ }
+
+ // create the volume
+ volume, err := pool.StorageVolCreateXML(string(volumeDefXml), 0)
+ if err != nil {
+ return "", fmt.Errorf("Error creating libvirt volume: %s", err)
+ }
+ defer volume.Free()
+
+ // upload ISO file
+ stream, err := libvirt.NewVirStream(virConn, 0)
+ if err != nil {
+ return "", err
+ }
+ defer stream.Close()
+
+ volume.Upload(stream, 0, uint64(volumeDef.Capacity.Amount), 0)
+ err = img.WriteToStream(stream)
+ if err != nil {
+ return "", err
+ }
+
+ key, err := volume.GetKey()
+ if err != nil {
+ return "", fmt.Errorf("Error retrieving volume key: %s", err)
+ }
+
+ return key, nil
+}
+
+// Create the ISO holding all the cloud-init data
+// Returns a string with the full path to the ISO file
+func (ci *defCloudInit) createISO() (string, error) {
+ log.Print("Creating new ISO")
+ tmpDir, err := ci.createFiles()
+ if err != nil {
+ return "", err
+ }
+
+ isoDestination := filepath.Join(tmpDir, ci.Name)
+ cmd := exec.Command(
+ "genisoimage",
+ "-output",
+ isoDestination,
+ "-volid",
+ "cidata",
+ "-joliet",
+ "-rock",
+ filepath.Join(tmpDir, USERDATA),
+ filepath.Join(tmpDir, METADATA))
+
+ log.Print("About to execute cmd: %+v", cmd)
+ if err = cmd.Start(); err != nil {
+ return "", fmt.Errorf("Error while starting the creation of CloudInit's ISO image: %s", err)
+ }
+ if err = cmd.Wait(); err != nil {
+ return "", fmt.Errorf("Error while creating CloudInit's ISO image: %s", err)
+ }
+ log.Print("ISO created at %s", isoDestination)
+
+ return isoDestination, nil
+}
+
+// Dumps the userdata and the metadata into two dedicated yaml files.
+// The files are created inside of a temporary directory
+// Returns a string containing the name of the temporary directory and an error
+// object
+func (ci *defCloudInit) createFiles() (string, error) {
+ log.Print("Creating ISO contents")
+ tmpDir, err := ioutil.TempDir("", "cloudinit")
+ if err != nil {
+ return "", fmt.Errorf("Cannot create tmp directory for cloudinit ISO generation: %s",
+ err)
+ }
+
+ // Create files required by ISO file
+ ud, err := yaml.Marshal(&ci.UserData)
+ if err != nil {
+ return "", fmt.Errorf("Error dumping cloudinit's user data: %s", err)
+ }
+ userdata := fmt.Sprintf("#cloud-config\n%s", string(ud))
+ if err = ioutil.WriteFile(
+ filepath.Join(tmpDir, USERDATA),
+ []byte(userdata),
+ os.ModePerm); err != nil {
+ return "", fmt.Errorf("Error while writing user-data to file: %s", err)
+ }
+
+ metadata, err := yaml.Marshal(&ci.Metadata)
+ if err != nil {
+ return "", fmt.Errorf("Error dumping cloudinit's meta data: %s", err)
+ }
+ if err = ioutil.WriteFile(filepath.Join(tmpDir, METADATA), metadata, os.ModePerm); err != nil {
+ return "", fmt.Errorf("Error while writing meta-data to file: %s", err)
+ }
+
+ log.Print("ISO contents created")
+
+ return tmpDir, nil
+}
+
+func newCloudInitDefFromRemoteISO(virConn *libvirt.VirConnection, key string) (defCloudInit, error) {
+ ci := defCloudInit{}
+
+ volume, err := virConn.LookupStorageVolByKey(key)
+ if err != nil {
+ return ci, fmt.Errorf("Can't retrieve volume %s", key)
+ }
+ defer volume.Free()
+
+ ci.Name, err = volume.GetName()
+ if err != nil {
+ return ci, fmt.Errorf("Error retrieving volume name: %s", err)
+ }
+
+ volPool, err := volume.LookupPoolByVolume()
+ if err != nil {
+ return ci, fmt.Errorf("Error retrieving pool for volume: %s", err)
+ }
+ defer volPool.Free()
+
+ ci.PoolName, err = volPool.GetName()
+ if err != nil {
+ return ci, fmt.Errorf("Error retrieving pool name: %s", err)
+ }
+
+ file, err := downloadISO(virConn, volume)
+ if file != nil {
+ defer os.Remove(file.Name())
+ defer file.Close()
+ }
+ if err != nil {
+ return ci, err
+ }
+
+ // read ISO contents
+ isoReader, err := iso9660.NewReader(file)
+ if err != nil {
+ return ci, fmt.Errorf("Error initializing ISO reader: %s", err)
+ }
+
+ for {
+ f, err := isoReader.Next()
+ if err == io.EOF {
+ break
+ }
+
+ if err != nil {
+ return ci, err
+ }
+
+ log.Printf("ISO reader: processing file %s", f.Name())
+
+ //TODO: the iso9660 has a bug...
+ if f.Name() == "/user_dat." {
+ data, err := ioutil.ReadAll(f.Sys().(io.Reader))
+ if err != nil {
+ return ci, fmt.Errorf("Error while reading %s: %s", USERDATA, err)
+ }
+ if err := yaml.Unmarshal(data, &ci.UserData); err != nil {
+ return ci, fmt.Errorf("Error while unmarshalling user-data: %s", err)
+ }
+ }
+
+ //TODO: the iso9660 has a bug...
+ if f.Name() == "/meta_dat." {
+ data, err := ioutil.ReadAll(f.Sys().(io.Reader))
+ if err != nil {
+ return ci, fmt.Errorf("Error while reading %s: %s", METADATA, err)
+ }
+ if err := yaml.Unmarshal(data, &ci.Metadata); err != nil {
+ return ci, fmt.Errorf("Error while unmarshalling user-data: %s", err)
+ }
+ }
+ }
+
+ log.Printf("Read cloud-init from file: %+v", ci)
+
+ return ci, nil
+}
+
+// Downloads the ISO identified by `key` to a local tmp file.
+// Returns a pointer to the ISO file. Note well: you have to close this file
+// pointer when you are done.
+func downloadISO(virConn *libvirt.VirConnection, volume libvirt.VirStorageVol) (*os.File, error) {
+ // get Volume info (required to get size later)
+ info, err := volume.GetInfo()
+ if err != nil {
+ return nil, fmt.Errorf("Error retrieving info for volume: %s", err)
+ }
+
+ // create tmp file for the ISO
+ file, err := ioutil.TempFile("", "cloudinit")
+ if err != nil {
+ return nil, fmt.Errorf("Cannot create tmp file: %s", err)
+ }
+
+ // download ISO file
+ stream, err := libvirt.NewVirStream(virConn, 0)
+ if err != nil {
+ return file, err
+ }
+ defer stream.Close()
+
+ volume.Download(stream, 0, info.GetCapacityInBytes(), 0)
+
+ n, err := io.Copy(file, stream)
+ if err != nil {
+ return file, fmt.Errorf("Error while copying remote volume to local disk: %s", err)
+ }
+ file.Seek(0, 0)
+ log.Printf("%d bytes downloaded", n)
+
+ return file, nil
+}
diff --git a/libvirt/disk_def.go b/libvirt/disk_def.go
index 4c677c5d..28fc7159 100644
--- a/libvirt/disk_def.go
+++ b/libvirt/disk_def.go
@@ -48,3 +48,16 @@ func newDefDisk() defDisk {
return disk
}
+
+func newCDROM() defDisk {
+ disk := defDisk{}
+ disk.Type = "volume"
+ disk.Device = "cdrom"
+ disk.Target.Dev = "hda"
+ disk.Target.Bus = "ide"
+
+ disk.Driver.Name = "qemu"
+ disk.Driver.Type = "raw"
+
+ return disk
+}
diff --git a/libvirt/provider.go b/libvirt/provider.go
index 82dc1a39..e4a8d900 100644
--- a/libvirt/provider.go
+++ b/libvirt/provider.go
@@ -17,8 +17,9 @@ func Provider() terraform.ResourceProvider {
},
ResourcesMap: map[string]*schema.Resource{
- "libvirt_domain": resourceLibvirtDomain(),
- "libvirt_volume": resourceLibvirtVolume(),
+ "libvirt_domain": resourceLibvirtDomain(),
+ "libvirt_volume": resourceLibvirtVolume(),
+ "libvirt_cloud_init_iso": resourceCloudInitISO(),
},
ConfigureFunc: providerConfigure,
diff --git a/libvirt/resource_cloud_init_iso.go b/libvirt/resource_cloud_init_iso.go
new file mode 100644
index 00000000..40c0dc16
--- /dev/null
+++ b/libvirt/resource_cloud_init_iso.go
@@ -0,0 +1,99 @@
+package libvirt
+
+import (
+ "fmt"
+ "github.com/hashicorp/terraform/helper/schema"
+ "log"
+)
+
+func resourceCloudInitISO() *schema.Resource {
+ return &schema.Resource{
+ Create: resourceCloudInitISOCreate,
+ Read: resourceCloudInitISORead,
+ Delete: resourceCloudInitISODelete,
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "pool": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Default: "default",
+ ForceNew: true,
+ },
+ "local_hostname": &schema.Schema{
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "ssh_authorized_key": &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ },
+ },
+ }
+}
+
+func resourceCloudInitISOCreate(d *schema.ResourceData, meta interface{}) error {
+ virConn := meta.(*Client).libvirt
+ if virConn == nil {
+ return fmt.Errorf("The libvirt connection was nil.")
+ }
+
+ cloudInit := newCloudInitDef()
+ cloudInit.Metadata.LocalHostname = d.Get("local_hostname").(string)
+
+ if _, ok := d.GetOk("ssh_authorized_key"); ok {
+ sshKey := d.Get("ssh_authorized_key").(string)
+ cloudInit.UserData.SSHAuthorizedKeys = append(
+ cloudInit.UserData.SSHAuthorizedKeys,
+ sshKey)
+ }
+
+ cloudInit.Name = d.Get("name").(string)
+ cloudInit.PoolName = d.Get("pool").(string)
+
+ log.Printf("[INFO] cloudInit: %+v", cloudInit)
+
+ key, err := cloudInit.CreateAndUpload(virConn)
+ if err != nil {
+ return err
+ }
+ d.SetId(key)
+
+ return resourceCloudInitISORead(d, meta)
+}
+
+func resourceCloudInitISORead(d *schema.ResourceData, meta interface{}) error {
+ virConn := meta.(*Client).libvirt
+ if virConn == nil {
+ return fmt.Errorf("The libvirt connection was nil.")
+ }
+
+ ci, err := newCloudInitDefFromRemoteISO(virConn, d.Id())
+ d.Set("pool", ci.PoolName)
+ d.Set("name", ci.Name)
+ d.Set("local_hostname", ci.Metadata.LocalHostname)
+
+ if err != nil {
+ return fmt.Errorf("Error while retrieving remote ISO: %s", err)
+ }
+
+ if len(ci.UserData.SSHAuthorizedKeys) == 1 {
+ d.Set("ssh_authorized_key", ci.UserData.SSHAuthorizedKeys[0])
+ }
+
+ return nil
+}
+
+func resourceCloudInitISODelete(d *schema.ResourceData, meta interface{}) error {
+ virConn := meta.(*Client).libvirt
+ if virConn == nil {
+ return fmt.Errorf("The libvirt connection was nil.")
+ }
+
+ return RemoveVolume(virConn, d.Id())
+}
diff --git a/libvirt/resource_libvirt_domain.go b/libvirt/resource_libvirt_domain.go
index 0d10a600..0d2cef52 100644
--- a/libvirt/resource_libvirt_domain.go
+++ b/libvirt/resource_libvirt_domain.go
@@ -45,6 +45,12 @@ func resourceLibvirtDomain() *schema.Resource {
Default: true,
ForceNew: false,
},
+ "cloud_init": &schema.Schema{
+ Type: schema.TypeString,
+ Required: false,
+ Optional: true,
+ ForceNew: false,
+ },
"disk": &schema.Schema{
Type: schema.TypeList,
Optional: true,
@@ -74,7 +80,7 @@ func resourceLibvirtDomainCreate(d *schema.ResourceData, meta interface{}) error
}
disksCount := d.Get("disk.#").(int)
- disks := make([]defDisk, 0, disksCount)
+ var disks []defDisk
for i := 0; i < disksCount; i++ {
prefix := fmt.Sprintf("disk.%d", i)
disk := newDefDisk()
@@ -104,6 +110,14 @@ func resourceLibvirtDomainCreate(d *schema.ResourceData, meta interface{}) error
disks = append(disks, disk)
}
+ if cloudinit, ok := d.GetOk("cloud_init"); ok {
+ disk, err := newDiskForCloudInit(virConn, cloudinit.(string))
+ if err != nil {
+ return err
+ }
+ disks = append(disks, disk)
+ }
+
netIfacesCount := d.Get("network_interface.#").(int)
netIfaces := make([]defNetworkInterface, 0, netIfacesCount)
for i := 0; i < netIfacesCount; i++ {
@@ -230,8 +244,28 @@ func resourceLibvirtDomainUpdate(d *schema.ResourceData, meta interface{}) error
}
}
+ if d.HasChange("cloud_init") {
+ cloudinit, err := newDiskForCloudInit(virConn, d.Get("cloud_init").(string))
+ if err != nil {
+ return err
+ }
+
+ data, err := xml.Marshal(cloudinit)
+ if err != nil {
+ return fmt.Errorf("Error serializing cloudinit disk: %s", err)
+ }
+
+ err = domain.UpdateDeviceFlags(
+ string(data),
+ libvirt.VIR_DOMAIN_AFFECT_CONFIG|libvirt.VIR_DOMAIN_AFFECT_CURRENT|libvirt.VIR_DOMAIN_AFFECT_LIVE)
+ if err != nil {
+ return fmt.Errorf("Error while changing the cloudinit volume: %s", err)
+ }
+ }
+
return nil
}
+
func resourceLibvirtDomainRead(d *schema.ResourceData, meta interface{}) error {
virConn := meta.(*Client).libvirt
if virConn == nil {
@@ -470,3 +504,29 @@ func isDomainRunning(domain libvirt.VirDomain) (bool, error) {
return state[0] == libvirt.VIR_DOMAIN_RUNNING, nil
}
+
+func newDiskForCloudInit(virConn *libvirt.VirConnection, volumeKey string) (defDisk, error) {
+ disk := newCDROM()
+
+ diskVolume, err := virConn.LookupStorageVolByKey(volumeKey)
+ if err != nil {
+ return disk, fmt.Errorf("Can't retrieve volume %s", volumeKey)
+ }
+ diskVolumeName, err := diskVolume.GetName()
+ if err != nil {
+ return disk, fmt.Errorf("Error retrieving volume name: %s", err)
+ }
+ diskPool, err := diskVolume.LookupPoolByVolume()
+ if err != nil {
+ return disk, fmt.Errorf("Error retrieving pool for volume: %s", err)
+ }
+ diskPoolName, err := diskPool.GetName()
+ if err != nil {
+ return disk, fmt.Errorf("Error retrieving pool name: %s", err)
+ }
+
+ disk.Source.Volume = diskVolumeName
+ disk.Source.Pool = diskPoolName
+
+ return disk, nil
+}