vagrant-libvirt-0.0.45/0000755000004100000410000000000013363570025014747 5ustar www-datawww-datavagrant-libvirt-0.0.45/.travis.yml0000644000004100000410000000073013363570025017060 0ustar www-datawww-data--- language: ruby dist: trusty before_install: - sudo apt-get update -qq - sudo apt-get install -y libvirt-dev - gem update --system - gem install bundler install: bundle install script: bundle exec rspec --color --format documentation notifications: email: false rvm: - 2.2.5 - 2.3.3 env: global: - NOKOGIRI_USE_SYSTEM_LIBRARIES=true matrix: - VAGRANT_VERSION=v2.0.1 matrix: allow_failures: - env: VAGRANT_VERSION=master rvm: 2.3.3 vagrant-libvirt-0.0.45/vagrant-libvirt.gemspec0000644000004100000410000000456713363570025021443 0ustar www-datawww-data# -*- encoding: utf-8 -*- # stub: vagrant-libvirt 0.0.42 ruby lib Gem::Specification.new do |s| s.name = "vagrant-libvirt".freeze s.version = "0.0.45" s.required_rubygems_version = Gem::Requirement.new(">= 0".freeze) if s.respond_to? :required_rubygems_version= s.require_paths = ["lib".freeze] s.authors = ["Lukas Stanek".freeze, "Dima Vasilets".freeze, "Brian Pitts".freeze] s.files = `git ls-files`.split($\) s.test_files = s.files.grep(%r{^(test|spec|features)/}) s.description = "libvirt provider for Vagrant.".freeze s.email = ["ls@elostech.cz".freeze, "pronix.service@gmail.com".freeze, "brian@polibyte.com".freeze] s.homepage = "https://github.com/vagrant-libvirt/vagrant-libvirt".freeze s.licenses = ["MIT".freeze] s.rubygems_version = "2.6.14".freeze s.summary = "libvirt provider for Vagrant.".freeze s.installed_by_version = "2.6.14" if s.respond_to? :installed_by_version if s.respond_to? :specification_version then s.specification_version = 4 if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then s.add_development_dependency(%q.freeze, ["~> 3.5.0"]) s.add_development_dependency(%q.freeze, ["~> 3.5.0"]) s.add_development_dependency(%q.freeze, ["~> 3.5.0"]) s.add_runtime_dependency(%q.freeze, [">= 0.3.0"]) s.add_runtime_dependency(%q.freeze, [">= 1.6.0"]) s.add_runtime_dependency(%q.freeze, ["~> 1.43.0"]) s.add_development_dependency(%q.freeze, [">= 0"]) else s.add_dependency(%q.freeze, ["~> 3.5.0"]) s.add_dependency(%q.freeze, ["~> 3.5.0"]) s.add_dependency(%q.freeze, ["~> 3.5.0"]) s.add_dependency(%q.freeze, [">= 0.3.0"]) s.add_dependency(%q.freeze, [">= 1.6.0"]) s.add_dependency(%q.freeze, ["~> 1.43.0"]) s.add_dependency(%q.freeze, [">= 0"]) end else s.add_dependency(%q.freeze, ["~> 3.5.0"]) s.add_dependency(%q.freeze, ["~> 3.5.0"]) s.add_dependency(%q.freeze, ["~> 3.5.0"]) s.add_dependency(%q.freeze, [">= 0.3.0"]) s.add_dependency(%q.freeze, [">= 1.6.0"]) s.add_dependency(%q.freeze, ["~> 1.43.0"]) s.add_dependency(%q.freeze, [">= 0"]) end end vagrant-libvirt-0.0.45/README.md0000644000004100000410000016207413363570025016240 0ustar www-datawww-data# Vagrant Libvirt Provider [![Join the chat at https://gitter.im/vagrant-libvirt/vagrant-libvirt](https://badges.gitter.im/vagrant-libvirt/vagrant-libvirt.svg)](https://gitter.im/vagrant-libvirt/vagrant-libvirt?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://travis-ci.org/vagrant-libvirt/vagrant-libvirt.svg)](https://travis-ci.org/vagrant-libvirt/vagrant-libvirt) [![Coverage Status](https://coveralls.io/repos/github/vagrant-libvirt/vagrant-libvirt/badge.svg?branch=master)](https://coveralls.io/github/vagrant-libvirt/vagrant-libvirt?branch=master) This is a [Vagrant](http://www.vagrantup.com) plugin that adds an [Libvirt](http://libvirt.org) provider to Vagrant, allowing Vagrant to control and provision machines via Libvirt toolkit. **Note:** Actual version is still a development one. Feedback is welcome and can help a lot :-) ## Index - [Features](#features) - [Future work](#future-work) - [Installation](#installation) - [Possible problems with plugin installation on Linux](#possible-problems-with-plugin-installation-on-linux) - [Vagrant Project Preparation](#vagrant-project-preparation) - [Add Box](#add-box) - [Create Vagrantfile](#create-vagrantfile) - [Start VM](#start-vm) - [How Project Is Created](#how-project-is-created) - [Libvirt Configuration](#libvirt-configuration) - [Provider Options](#provider-options) - [Domain Specific Options](#domain-specific-options) - [Reload behavior](#reload-behavior) - [Networks](#networks) - [Private Network Options](#private-network-options) - [Public Network Options](#public-network-options) - [Management Network](#management-network) - [Additional Disks](#additional-disks) - [Reload behavior](#reload-behavior-1) - [CDROMs](#cdroms) - [Input](#input) - [PCI device passthrough](#pci-device-passthrough) - [USB Controller Configuration](#usb-controller-configuration) - [USB Redirector Devices](#usb-redirector-devices) - [Random number generator passthrough](#random-number-generator-passthrough) - [Watchdog·Device](#watchdog-device) - [Smartcard device](#smartcard-device) - [Hypervisor Features](#hypervisor-features) - [CPU Features](#cpu-features) - [No box and PXE boot](#no-box-and-pxe-boot) - [SSH Access To VM](#ssh-access-to-vm) - [Forwarded Ports](#forwarded-ports) - [Synced Folders](#synced-folders) - [QEMU Session Support](#qemu-session-support) - [Customized Graphics](#customized-graphics) - [Box Format](#box-format) - [Create Box](#create-box) - [Development](#development) - [Contributing](#contributing) ## Features * Control local Libvirt hypervisors. * Vagrant `up`, `destroy`, `suspend`, `resume`, `halt`, `ssh`, `reload`, `package` and `provision` commands. * Upload box image (qcow2 format) to Libvirt storage pool. * Create volume as COW diff image for domains. * Create private networks. * Create and boot Libvirt domains. * SSH into domains. * Setup hostname and network interfaces. * Provision domains with any built-in Vagrant provisioner. * Synced folder support via `rsync`, `nfs` or `9p`. * Snapshots via [sahara](https://github.com/jedi4ever/sahara). * Package caching via [vagrant-cachier](http://fgrehm.viewdocs.io/vagrant-cachier/). * Use boxes from other Vagrant providers via [vagrant-mutate](https://github.com/sciurus/vagrant-mutate). * Support VMs with no box for PXE boot purposes (Vagrant 1.6 and up) ## Future work * Take a look at [open issues](https://github.com/vagrant-libvirt/vagrant-libvirt/issues?state=open). ## Installation First, you should have both qemu and libvirt installed if you plan to run VMs on your local system. For instructions, refer to your linux distribution's documentation. **NOTE:** Before you start using Vagrant-libvirt, please make sure your libvirt and qemu installation is working correctly and you are able to create qemu or kvm type virtual machines with `virsh` or `virt-manager`. Next, you must have [Vagrant installed](http://docs.vagrantup.com/v2/installation/index.html). Vagrant-libvirt supports Vagrant 1.5, 1.6, 1.7 and 1.8. *We only test with the upstream version!* If you decide to install your distros version and you run into problems, as a first step you should switch to upstream. Now you need to make sure your have all the build dependencies installed for vagrant-libvirt. This depends on your distro. An overview: * Ubuntu 12.04/14.04/16.04, Debian: ```shell apt-get build-dep vagrant ruby-libvirt apt-get install qemu libvirt-bin ebtables dnsmasq apt-get install libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev ``` (It is possible some users will already have libraries from the third line installed, but this is the way to make it work OOTB.) * CentOS 6, 7, Fedora 21: ```shell yum install qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm ``` * Fedora 22 and up: ```shell dnf -y install qemu libvirt libvirt-devel ruby-devel gcc ``` * Arch linux: please read the related [ArchWiki](https://wiki.archlinux.org/index.php/Vagrant#vagrant-libvirt) page. ```shell pacman -S vagrant ``` Now you're ready to install vagrant-libvirt using standard [Vagrant plugin](http://docs.vagrantup.com/v2/plugins/usage.html) installation methods. ```shell $ vagrant plugin install vagrant-libvirt ``` ### Possible problems with plugin installation on Linux In case of problems with building nokogiri and ruby-libvirt gem, install missing development libraries for libxslt, libxml2 and libvirt. On Ubuntu, Debian, make sure you are running all three of the `apt` commands above with `sudo`. On RedHat, Centos, Fedora, ... ```shell $ sudo dnf install libxslt-devel libxml2-devel libvirt-devel \ libguestfs-tools-c ruby-devel gcc ``` On Arch linux it is recommended to follow [steps from ArchWiki](https://wiki.archlinux.org/index.php/Vagrant#vagrant-libvirt). If have problem with installation - check your linker. It should be `ld.gold`: ```shell sudo alternatives --set ld /usr/bin/ld.gold # OR sudo ln -fs /usr/bin/ld.gold /usr/bin/ld ``` If you have issues building ruby-libvirt, try the following: ```shell CONFIGURE_ARGS='with-ldflags=-L/opt/vagrant/embedded/lib with-libvirt-include=/usr/include/libvirt with-libvirt-lib=/usr/lib' GEM_HOME=~/.vagrant.d/gems GEM_PATH=$GEM_HOME:/opt/vagrant/embedded/gems PATH=/opt/vagrant/embedded/bin:$PATH vagrant plugin install vagrant-libvirt ``` ## Vagrant Project Preparation ### Add Box After installing the plugin (instructions above), the quickest way to get started is to add Libvirt box and specify all the details manually within a `config.vm.provider` block. So first, add Libvirt box using any name you want. You can find more libvirt ready boxes at [Atlas](https://atlas.hashicorp.com/boxes/search?provider=libvirt). For example: ```shell vagrant init fedora/24-cloud-base ``` ### Create Vagrantfile And then make a Vagrantfile that looks like the following, filling in your information where necessary. For example: ```ruby Vagrant.configure("2") do |config| config.vm.define :test_vm do |test_vm| test_vm.vm.box = "fedora/24-cloud-base" end end ``` ### Start VM In prepared project directory, run following command: ```shell $ vagrant up --provider=libvirt ``` Vagrant needs to know that we want to use Libvirt and not default VirtualBox. That's why there is `--provider=libvirt` option specified. Other way to tell Vagrant to use Libvirt provider is to setup environment variable ```shell export VAGRANT_DEFAULT_PROVIDER=libvirt ``` ### How Project Is Created Vagrant goes through steps below when creating new project: 1. Connect to Libvirt localy or remotely via SSH. 2. Check if box image is available in Libvirt storage pool. If not, upload it to remote Libvirt storage pool as new volume. 3. Create COW diff image of base box image for new Libvirt domain. 4. Create and start new domain on Libvirt host. 5. Check for DHCP lease from dnsmasq server. 6. Wait till SSH is available. 7. Sync folders and run Vagrant provisioner on new domain if setup in Vagrantfile. ### Libvirt Configuration ### Provider Options Although it should work without any configuration for most people, this provider exposes quite a few provider-specific configuration options. The following options allow you to configure how vagrant-libvirt connects to libvirt, and are used to generate the [libvirt connection URI](http://libvirt.org/uri.html): * `driver` - A hypervisor name to access. For now only kvm and qemu are supported * `host` - The name of the server, where libvirtd is running * `connect_via_ssh` - If use ssh tunnel to connect to Libvirt. Absolutely needed to access libvirt on remote host. It will not be able to get the IP address of a started VM otherwise. * `username` - Username and password to access Libvirt * `password` - Password to access Libvirt * `id_ssh_key_file` - If not nil, uses this ssh private key to access Libvirt. Default is `$HOME/.ssh/id_rsa`. Prepends `$HOME/.ssh/` if no directory * `socket` - Path to the libvirt unix socket (e.g. `/var/run/libvirt/libvirt-sock`) * `uri` - For advanced usage. Directly specifies what libvirt connection URI vagrant-libvirt should use. Overrides all other connection configuration options Connection-independent options: * `storage_pool_name` - Libvirt storage pool name, where box image and instance snapshots will be stored. For example: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.host = "example.com" end end ``` ### Domain Specific Options * `disk_bus` - The type of disk device to emulate. Defaults to virtio if not set. Possible values are documented in libvirt's [description for _target_](http://libvirt.org/formatdomain.html#elementsDisks). NOTE: this option applies only to disks associated with a box image. To set the bus type on additional disks, see the [Additional Disks](#additional-disks) section. * `disk_device` - The disk device to emulate. Defaults to vda if not set, which should be fine for paravirtualized guests, but some fully virtualized guests may require hda. NOTE: this option also applies only to disks associated with a box image. * `nic_model_type` - parameter specifies the model of the network adapter when you create a domain value by default virtio KVM believe possible values, see the [documentation for libvirt](https://libvirt.org/formatdomain.html#elementsNICSModel). * `memory` - Amount of memory in MBytes. Defaults to 512 if not set. * `cpus` - Number of virtual cpus. Defaults to 1 if not set. * `cputopology` - Number of CPU sockets, cores and threads running per core. All fields of `:sockets`, `:cores` and `:threads` are mandatory, `cpus` domain option must be present and must be equal to total count of **sockets * cores * threads**. For more details see [documentation](https://libvirt.org/formatdomain.html#elementsCPU). ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.cpus = 4 libvirt.cputopology :sockets => '2', :cores => '2', :threads => '1' end end ``` * `nested` - [Enable nested virtualization](https://github.com/torvalds/linux/blob/master/Documentation/virtual/kvm/nested-vmx.txt). Default is false. * `cpu_mode` - [CPU emulation mode](https://libvirt.org/formatdomain.html#elementsCPU). Defaults to 'host-model' if not set. Allowed values: host-model, host-passthrough, custom. * `cpu_model` - CPU Model. Defaults to 'qemu64' if not set and `cpu_mode` is `custom` and to '' otherwise. This can really only be used when setting `cpu_mode` to `custom`. * `cpu_fallback` - Whether to allow libvirt to fall back to a CPU model close to the specified model if features in the guest CPU are not supported on the host. Defaults to 'allow' if not set. Allowed values: `allow`, `forbid`. * `numa_nodes` - Specify an array of NUMA nodes for the guest. The syntax is similar to what would be set in the domain XML. `memory` must be in MB. Symmetrical and asymmetrical topologies are supported but make sure your total count of defined CPUs adds up to `v.cpus`. The sum of all the memory defined here will act as your total memory for your guest VM. **This sum will override what is set in `v.memory`** ``` v.cpus = 4 v.numa_nodes = [ {:cpus => "0-1", :memory => "1024"}, {:cpus => "2-3", :memory => "4096"} ] ``` * `loader` - Sets path to custom UEFI loader. * `volume_cache` - Controls the cache mechanism. Possible values are "default", "none", "writethrough", "writeback", "directsync" and "unsafe". [See driver->cache in libvirt documentation](http://libvirt.org/formatdomain.html#elementsDisks). * `kernel` - To launch the guest with a kernel residing on host filesystems. Equivalent to qemu `-kernel`. * `initrd` - To specify the initramfs/initrd to use for the guest. Equivalent to qemu `-initrd`. * `random_hostname` - To create a domain name with extra information on the end to prevent hostname conflicts. * `cmd_line` - Arguments passed on to the guest kernel initramfs or initrd to use. Equivalent to qemu `-append`, only possible to use in combination with `initrd` and `kernel`. * `graphics_type` - Sets the protocol used to expose the guest display. Defaults to `vnc`. Possible values are "sdl", "curses", "none", "gtk", "vnc" or "spice". * `graphics_port` - Sets the port for the display protocol to bind to. Defaults to 5900. * `graphics_ip` - Sets the IP for the display protocol to bind to. Defaults to "127.0.0.1". * `graphics_passwd` - Sets the password for the display protocol. Working for vnc and spice. by default working without passsword. * `graphics_autoport` - Sets autoport for graphics, libvirt in this case ignores graphics_port value, Defaults to 'yes'. Possible value are "yes" and "no" * `keymap` - Set keymap for vm. default: en-us * `kvm_hidden` - [Hide the hypervisor from the guest](https://libvirt.org/formatdomain.html#elementsFeatures). Useful for [GPU passthrough](#pci-device-passthrough) on stubborn drivers. Default is false. * `video_type` - Sets the graphics card type exposed to the guest. Defaults to "cirrus". [Possible values](http://libvirt.org/formatdomain.html#elementsVideo) are "vga", "cirrus", "vmvga", "xen", "vbox", or "qxl". * `video_vram` - Used by some graphics card types to vary the amount of RAM dedicated to video. Defaults to 9216. * `sound_type` - [Set the virtual sound card](https://libvirt.org/formatdomain.html#elementsSound) Defaults to "ich6". * `machine_type` - Sets machine type. Equivalent to qemu `-machine`. Use `qemu-system-x86_64 -machine help` to get a list of supported machines. * `machine_arch` - Sets machine architecture. This helps libvirt to determine the correct emulator type. Possible values depend on your version of qemu. For possible values, see which emulator executable `qemu-system-*` your system provides. Common examples are `aarch64`, `alpha`, `arm`, `cris`, `i386`, `lm32`, `m68k`, `microblaze`, `microblazeel`, `mips`, `mips64`, `mips64el`, `mipsel`, `moxie`, `or32`, `ppc`, `ppc64`, `ppcemb`, `s390x`, `sh4`, `sh4eb`, `sparc`, `sparc64`, `tricore`, `unicore32`, `x86_64`, `xtensa`, `xtensaeb`. * `machine_virtual_size` - Sets the disk size in GB for the machine overriding the default specified in the box. Allows boxes to defined with a minimal size disk by default and to be grown to a larger size at creation time. Will ignore sizes smaller than the size specified by the box metadata. Note that currently there is no support for automatically resizing the filesystem to take advantage of the larger disk. * `emulator_path` - Explicitly select which device model emulator to use by providing the path, e.g. `/usr/bin/qemu-system-x86_64`. This is especially useful on systems that fail to select it automatically based on `machine_arch` which then results in a capability error. * `boot` - Change the boot order and enables the boot menu. Possible options are "hd", "network", "cdrom". Defaults to "hd" with boot menu disabled. When "network" is set without "hd", only all NICs will be tried; see below for more detail. * `nic_adapter_count` - Defaults to '8'. Only use case for increasing this count is for VMs that virtualize switches such as Cumulus Linux. Max value for Cumulus Linux VMs is 33. * `uuid` - Force a domain UUID. Defaults to autogenerated value by libvirt if not set. * `suspend_mode` - What is done on vagrant suspend. Possible values: 'pause', 'managedsave'. Pause mode executes a la `virsh suspend`, which just pauses execution of a VM, not freeing resources. Managed save mode does a la `virsh managedsave` which frees resources suspending a domain. * `tpm_model` - The model of the TPM to which you wish to connect. * `tpm_type` - The type of TPM device to which you are connecting. * `tpm_path` - The path to the TPM device on the host system. * `dtb` - The device tree blob file, mostly used for non-x86 platforms. In case the device tree isn't added in-line to the kernel, it can be manually specified here. * `autostart` - Automatically start the domain when the host boots. Defaults to 'false'. * `channel` - [libvirt channels](https://libvirt.org/formatdomain.html#elementCharChannel). Configure a private communication channel between the host and guest, e.g. for use by the [qemu guest agent](http://wiki.libvirt.org/page/Qemu_guest_agent) and the Spice/QXL graphics type. * `mgmt_attach` - Decide if VM has interface in mgmt network. If set to 'false' it is not possible to communicate with VM through `vagrant ssh` or run provisioning. Setting to 'false' is only possible when VM doesn't use box. Defaults set to 'true'. Specific domain settings can be set for each domain separately in multi-VM environment. Example below shows a part of Vagrantfile, where specific options are set for dbserver domain. ```ruby Vagrant.configure("2") do |config| config.vm.define :dbserver do |dbserver| dbserver.vm.box = "centos64" dbserver.vm.provider :libvirt do |domain| domain.memory = 2048 domain.cpus = 2 domain.nested = true domain.volume_cache = 'none' end end # ... ``` The following example shows part of a Vagrantfile that enables the VM to boot from a network interface first and a hard disk second. This could be used to run VMs that are meant to be a PXE booted machines. Be aware that if `hd` is not specified as a boot option, it will never be tried. ```ruby Vagrant.configure("2") do |config| config.vm.define :pxeclient do |pxeclient| pxeclient.vm.box = "centos64" pxeclient.vm.provider :libvirt do |domain| domain.boot 'network' domain.boot 'hd' end end # ... ``` #### Reload behavior On `vagrant reload` the following domain specific attributes are updated in defined domain: * `disk_bus` - Is updated only on disks. It skips CDROMs * `nic_model_type` - Updated * `memory` - Updated * `cpus` - Updated * `nested` - Updated * `cpu_mode` - Updated. Pay attention that custom mode is not supported * `graphics_type` - Updated * `graphics_port` - Updated * `graphics_ip` - Updated * `graphics_passwd` - Updated * `graphics_autoport` - Updated * `keymap` - Updated * `video_type` - Updated * `video_vram` - Updated * `tpm_model` - Updated * `tpm_type` - Updated * `tpm_path` - Updated ## Networks Networking features in the form of `config.vm.network` support private networks concept. It supports both the virtual network switch routing types and the point to point Guest OS to Guest OS setting using UDP/Mcast/TCP tunnel interfaces. http://wiki.libvirt.org/page/VirtualNetworking https://libvirt.org/formatdomain.html#elementsNICSTCP http://libvirt.org/formatdomain.html#elementsNICSMulticast http://libvirt.org/formatdomain.html#elementsNICSUDP _(in libvirt v1.2.20 and higher)_ Public Network interfaces are currently implemented using the macvtap driver. The macvtap driver is only available with the Linux Kernel version >= 2.6.24. See the following libvirt documentation for the details of the macvtap usage. http://www.libvirt.org/formatdomain.html#elementsNICSDirect An examples of network interface definitions: ```ruby # Private network using virtual network switching config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :private_network, :ip => "10.20.30.40" end # Private network using DHCP and a custom network config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :private_network, :type => "dhcp", :libvirt__network_address => '10.20.30.0' end # Private network (as above) using a domain name config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :private_network, :ip => "10.20.30.40", :libvirt__domain_name => "test.local" end # Private network. Point to Point between 2 Guest OS using a TCP tunnel # Guest 1 config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :private_network, :libvirt__tunnel_type => 'server', # default is 127.0.0.1 if omitted # :libvirt__tunnel_ip => '127.0.0.1', :libvirt__tunnel_port => '11111' # network with ipv6 support test_vm1.vm.network :private_network, :ip => "10.20.5.42", :libvirt__guest_ipv6 => "yes", :libvirt__ipv6_address => "2001:db8:ca2:6::1", :libvirt__ipv6_prefix => "64" # Guest 2 config.vm.define :test_vm2 do |test_vm2| test_vm2.vm.network :private_network, :libvirt__tunnel_type => 'client', # default is 127.0.0.1 if omitted # :libvirt__tunnel_ip => '127.0.0.1', :libvirt__tunnel_port => '11111' # network with ipv6 support test_vm2.vm.network :private_network, :ip => "10.20.5.45", :libvirt__guest_ipv6 => "yes", :libvirt__ipv6_address => "2001:db8:ca2:6::1", :libvirt__ipv6_prefix => "64" # Public Network config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :public_network, :dev => "virbr0", :mode => "bridge", :type => "bridge" end ``` In example below, one network interface is configured for VM `test_vm1`. After you run `vagrant up`, VM will be accessible on IP address `10.20.30.40`. So if you install a web server via provisioner, you will be able to access your testing server on `http://10.20.30.40` URL. But beware that this address is private to libvirt host only. It's not visible outside of the hypervisor box. If network `10.20.30.0/24` doesn't exist, provider will create it. By default created networks are NATed to outside world, so your VM will be able to connect to the internet (if hypervisor can). And by default, DHCP is offering addresses on newly created networks. The second interface is created and bridged into the physical device `eth0`. This mechanism uses the macvtap Kernel driver and therefore does not require an existing bridge device. This configuration assumes that DHCP and DNS services are being provided by the public network. This public interface should be reachable by anyone with access to the public network. ### Private Network Options *Note: These options are not applicable to public network interfaces.* There is a way to pass specific options for libvirt provider when using `config.vm.network` to configure new network interface. Each parameter name starts with `libvirt__` string. Here is a list of those options: * `:libvirt__network_name` - Name of libvirt network to connect to. By default, network 'default' is used. * `:libvirt__netmask` - Used only together with `:ip` option. Default is '255.255.255.0'. * `:libvirt__network_address` - Used only when `:type` is set to `dhcp`. Only `/24` subnet is supported. Default is `172.28.128.0`. * `:libvirt__host_ip` - Address to use for the host (not guest). Default is first possible address (after network address). * `:libvirt__domain_name` - DNS domain of the DHCP server. Used only when creating new network. * `:libvirt__dhcp_enabled` - If DHCP will offer addresses, or not. Used only when creating new network. Default is true. * `:libvirt__dhcp_start` - First address given out via DHCP. Default is third address in range (after network name and gateway). * `:libvirt__dhcp_stop` - Last address given out via DHCP. Default is last possible address in range (before broadcast address). * `:libvirt__dhcp_bootp_file` - The file to be used for the boot image. Used only when dhcp is enabled. * `:libvirt__dhcp_bootp_server` - The server that runs the DHCP server. Used only when dhcp is enabled.By default is the same host that runs the DHCP server. * `:libvirt__adapter` - Number specifiyng sequence number of interface. * `:libvirt__forward_mode` - Specify one of `veryisolated`, `none`, `nat` or `route` options. This option is used only when creating new network. Mode `none` will create isolated network without NATing or routing outside. You will want to use NATed forwarding typically to reach networks outside of hypervisor. Routed forwarding is typically useful to reach other networks within hypervisor. `veryisolated` described [here](https://libvirt.org/formatnetwork.html#examplesNoGateway). By default, option `nat` is used. * `:libvirt__forward_device` - Name of interface/device, where network should be forwarded (NATed or routed). Used only when creating new network. By default, all physical interfaces are used. * `:libvirt__tunnel_type` - Set to 'udp' if using UDP unicast tunnel mode (libvirt v1.2.20 or higher). Set this to either "server" or "client" for tcp tunneling. Set this to 'mcast' if using multicast tunneling. This configuration type uses tunnels to generate point to point connections between Guests. Useful for Switch VMs like Cumulus Linux. No virtual switch setting like `libvirt__network_name` applies with tunnel interfaces and will be ignored if configured. * `:libvirt__tunnel_ip` - Sets the source IP of the libvirt tunnel interface. By default this is `127.0.0.1` for TCP and UDP tunnels and `239.255.1.1` for Multicast tunnels. It populates the address field in the `` of the interface xml configuration. * `:libvirt__tunnel_port` - Sets the source port the tcp/udp/mcast tunnel with use. This port information is placed in the `` section of interface xml configuration. * `:libvirt__tunnel_local_port` - Sets the local port used by the udp tunnel interface type. It populates the port field in the `` section of the interface xml configuration. _(This feature only works in libvirt 1.2.20 and higher)_ * `:libvirt__tunnel_local_ip` - Sets the local IP used by the udp tunnel interface type. It populates the ip entry of the `` section of the interface xml configuration. _(This feature only works in libvirt 1.2.20 and higher)_ * `:libvirt__guest_ipv6` - Enable or disable guest-to-guest IPv6 communication. See [here](https://libvirt.org/formatnetwork.html#examplesPrivate6), and [here](http://libvirt.org/git/?p=libvirt.git;a=commitdiff;h=705e67d40b09a905cd6a4b8b418d5cb94eaa95a8) for for more information. *Note: takes either 'yes' or 'no' for value* * `:libvirt__ipv6_address` - Define ipv6 address, require also prefix. * `:libvirt__ipv6_prefix` - Define ipv6 prefix. generate string `` * `:libvirt__iface_name` - Define a name for the private network interface. With this feature one can [simulate physical link failures](https://github.com/vagrant-libvirt/vagrant-libvirt/pull/498) * `:mac` - MAC address for the interface. *Note: specify this in lowercase since Vagrant network scripts assume it will be!* * `:libvirt__mtu` - MTU size for the libvirt network, if not defined, the created network will use the libvirt default (1500). VMs still need to set the MTU accordingly. * `:model_type` - parameter specifies the model of the network adapter when you create a domain value by default virtio KVM believe possible values, see the documentation for libvirt * `:libvirt__driver_name` - Define which network driver to use. [More info](https://libvirt.org/formatdomain.html#elementsDriverBackendOptions) * `:libvirt__driver_queues` - Define a number of queues to be used for network interface. Set equal to numer of vCPUs for best performance. [More info](http://www.linux-kvm.org/page/Multiqueue) * `:autostart` - Automatic startup of network by the libvirt daemon. If not specified the default is 'false'. * `:bus` - The bus of the PCI device. Both :bus and :slot have to be defined. * `:slot` - The slot of the PCI device. Both :bus and :slot have to be defined. When the option `:libvirt__dhcp_enabled` is to to 'false' it shouldn't matter whether the virtual network contains a DHCP server or not and vagrant-libvirt should not fail on it. The only situation where vagrant-libvirt should fail is when DHCP is requested but isn't configured on a matching already existing virtual network. ### Public Network Options * `:dev` - Physical device that the public interface should use. Default is 'eth0'. * `:mode` - The mode in which the public interface should operate in. Supported modes are available from the [libvirt documentation](http://www.libvirt.org/formatdomain.html#elementsNICSDirect). Default mode is 'bridge'. * `:type` - is type of interface.(``) * `:mac` - MAC address for the interface. * `:network_name` - Name of libvirt network to connect to. * `:portgroup` - Name of libvirt portgroup to connect to. * `:ovs` - Support to connect to an Open vSwitch bridge device. Default is 'false'. * `:trust_guest_rx_filters` - Support trustGuestRxFilters attribute. Details are listed [here](http://www.libvirt.org/formatdomain.html#elementsNICSDirect). Default is 'false'. ### Management Network vagrant-libvirt uses a private network to perform some management operations on VMs. All VMs will have an interface connected to this network and an IP address dynamically assigned by libvirt unless you set `:mgmt_attach` to 'false'. This is in addition to any networks you configure. The name and address used by this network are configurable at the provider level. * `management_network_name` - Name of libvirt network to which all VMs will be connected. If not specified the default is 'vagrant-libvirt'. * `management_network_address` - Address of network to which all VMs will be connected. Must include the address and subnet mask. If not specified the default is '192.168.121.0/24'. * `management_network_mode` - Network mode for the libvirt management network. Specify one of veryisolated, none, nat or route options. Further documentated under [Private Networks](#private-network-options) * `management_network_guest_ipv6` - Enable or disable guest-to-guest IPv6 communication. See [here](https://libvirt.org/formatnetwork.html#examplesPrivate6), and [here](http://libvirt.org/git/?p=libvirt.git;a=commitdiff;h=705e67d40b09a905cd6a4b8b418d5cb94eaa95a8) for for more information. * `management_network_autostart` - Automatic startup of mgmt network, if not specified the default is 'false'. * `:management_network_pci_bus` - The bus of the PCI device. * `:management_network_pci_slot` - The slot of the PCI device. * `management_network_mac` - MAC address of management network interface. You may wonder how vagrant-libvirt knows the IP address a VM received. Libvirt doesn't provide a standard way to find out the IP address of a running domain. But we do know the MAC address of the virtual machine's interface on the management network. Libvirt is closely connected with dnsmasq, which acts as a DHCP server. dnsmasq writes lease information in the `/var/lib/libvirt/dnsmasq` directory. Vagrant-libvirt looks for the MAC address in this file and extracts the corresponding IP address. ## Additional Disks You can create and attach additional disks to a VM via `libvirt.storage :file`. It has a number of options: * `path` - Location of the disk image. If unspecified, a path is automtically chosen in the same storage pool as the VMs primary disk. * `device` - Name of the device node the disk image will have in the VM, e.g. *vdb*. If unspecified, the next available device is chosen. * `size` - Size of the disk image. If unspecified, defaults to 10G. * `type` - Type of disk image to create. Defaults to *qcow2*. * `bus` - Type of bus to connect device to. Defaults to *virtio*. * `cache` - Cache mode to use, e.g. `none`, `writeback`, `writethrough` (see the [libvirt documentation for possible values](http://libvirt.org/formatdomain.html#elementsDisks) or [here](https://www.suse.com/documentation/sles11/book_kvm/data/sect1_chapter_book_kvm.html) for a fuller explanation). Defaults to *default*. * `allow_existing` - Set to true if you want to allow the VM to use a pre-existing disk. If the disk doesn't exist it will be created. Disks with this option set to true need to be removed manually. * `shareable` - Set to true if you want to simulate shared SAN storage. * `serial` - Serial number of the disk device. The following example creates two additional disks. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.storage :file, :size => '20G' libvirt.storage :file, :size => '40G', :type => 'raw' end end ``` For shared SAN storage to work the following example can be used: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.storage :file, :size => '20G', :path => 'my_shared_disk.img', :allow_existing => true, :shareable => true, :type => 'raw' end end ``` ### Reload behavior On `vagrant reload` the following additional disk attributes are updated in defined domain: * `bus` - Updated. Uses `device` as a search marker. It is not required to define `device`, but it's recommended. If `device` is defined then the order of addtitional disk definition becomes irrelevant. ## CDROMs You can attach up to four CDROMs to a VM via `libvirt.storage :file, :device => :cdrom`. Available options are: * `path` - The path to the iso to be used for the CDROM drive. * `dev` - The device to use (`hda`, `hdb`, `hdc`, or `hdd`). This will be automatically determined if unspecified. * `bus` - The bus to use for the CDROM drive. Defaults to `ide` The following example creates three CDROM drives in the VM: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.storage :file, :device => :cdrom, :path => '/path/to/iso1.iso' libvirt.storage :file, :device => :cdrom, :path => '/path/to/iso2.iso' libvirt.storage :file, :device => :cdrom, :path => '/path/to/iso3.iso' end end ``` ## Input You can specify multiple inputs to the VM via `libvirt.input`. Available options are listed below. Note that both options are required: * `type` - The type of the input * `bus` - The bus of the input ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # this is the default # libvirt.input :type => "mouse", :bus => "ps2" # very useful when having mouse issues when viewing VM via VNC libvirt.input :type => "tablet", :bus => "usb" end end ``` ## PCI device passthrough You can specify multiple PCI devices to passthrough to the VM via `libvirt.pci`. Available options are listed below. Note that all options are required: * `bus` - The bus of the PCI device * `slot` - The slot of the PCI device * `function` - The function of the PCI device You can extract that information from output of `lspci` command. First characters of each line are in format `[]:[].[]`. For example: ```shell $ lspci| grep NVIDIA 03:00.0 VGA compatible controller: NVIDIA Corporation GK110B [GeForce GTX TITAN Black] (rev a1) ``` In that case `bus` is `0x03`, `slot` is `0x00` and `function` is `0x0`. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.pci :bus => '0x06', :slot => '0x12', :function => '0x5' # Add another one if it is neccessary libvirt.pci :bus => '0x03', :slot => '0x00', :function => '0x0' end end ``` Note! Above options affect configuration only at domain creation. It won't change VM behaviour on `vagrant reload` after domain was created. Don't forget to [set](#domain-specific-options) `kvm_hidden` option to `true` especially if you are passthroughing NVIDIA GPUs. Otherwise GPU is visible from VM but cannot be operated. ## USB Controller Configuration The USB controller can be configured using `libvirt.usb_controller`, with the following options: * `model` - The USB controller device model to emulate. (mandatory) * `ports` - The number of devices that can be connected to the controller. See the [libvirt documentation](https://libvirt.org/formatdomain.html#elementsControllers) for a list of valid models. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Set up a USB3 controller libvirt.usb_controller :model => "nec-xhci" end end ``` ## USB Redirector Devices You can specify multiple redirect devices via `libvirt.redirdev`. There are two types, `tcp` and `spicevmc` supported, for forwarding USB-devices to the guest. Available options are listed below. * `type` - The type of the USB redirector device. (`tcp` or `spicevmc`) * `host` - The host where the device is attached to. (mandatory for type `tcp`) * `port` - The port where the device is listening. (mandatory for type `tcp`) ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # add two devices using spicevmc channel (1..2).each do libvirt.redirdev :type => "spicevmc" end # add device, provided by localhost:4000 libvirt.redirdev :type => "tcp", :host => "localhost", :port => "4000" end end ``` ### Filter for USB Redirector Devices You can define filter for redirected devices. These filters can be positiv or negative, by setting the mandatory option `allow=yes` or `allow=no`. All available options are listed below. Note the option `allow` is mandatory. * `class` - The device class of the USB device. A list of device classes is available on [Wikipedia](https://en.wikipedia.org/wiki/USB#Device_classes). * `vendor` - The vendor of the USB device. * `product` - The product id of the USB device. * `version` - The version of the USB device. Note that this is the version of `bcdDevice` * `allow` - allow or disallow redirecting this device. (mandatory) You can extract that information from output of `lsusb` command. Every line contains the information in format `Bus [] Device []: ID [:[]`. The `version` can be extracted from the detailed output of the device using `lsusb -D /dev/usb/[]/[]`. For example: ```shell # get bcdDevice from $: lsusb Bus 001 Device 009: ID 08e6:3437 Gemalto (was Gemplus) GemPC Twin SmartCard Reader $: lsusb -D /dev/bus/usb/001/009 | grep bcdDevice bcdDevice 2.00 ``` In this case, the USB device with `class 0x0b`, `vendor 0x08e6`, `product 0x3437` and `bcdDevice version 2.00` is allowed to be redirected to the guest. All other devices will be refused. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.redirdev :type => "spicevmc" libvirt.redirfilter :class => "0x0b" :vendor => "0x08e6" :product => "0x3437" :version => "2.00" :allow => "yes" libvirt.redirfilter :allow => "no" end end ``` ## Random number generator passthrough You can pass through `/dev/random` to your VM by configuring the domain like this: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Pass through /dev/random from the host to the VM libvirt.random :model => 'random' end end ``` At the moment only the `random` backend is supported. ## Watchdog device A virtual hardware watchdog device can be added to the guest via the `libvirt.watchdog` element. The option `model` is mandatory and could have on of the following values. * `i6300esb` - the recommended device, emulating a PCI Intel 6300ESB * 'ib700` - emulating an ISA iBase IB700 * `diag288` - emulating an S390 DIAG288 device The optional action attribute describes what `action` to take when the watchdog expires. Valid values are specific to the underlying hypervisor. The default behavior is `reset`. * `reset` - default, forcefully reset the guest * `shutdown` - gracefully shutdown the guest (not recommended) * `poweroff` - forcefully power off the guest * `pause` - pause the guest * `none` - do nothing * `dump` - automatically dump the guest * `inject-nmi` - inject a non-maskable interrupt into the guest ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Add libvirt watchdog device model i6300esb libvirt.watchdog :model => 'i6300esb', :action => 'reset' end end ``` ## Smartcard device A virtual smartcard device can be supplied to the guest via the `libvirt.smartcard` element. The option `mode` is mandatory and currently only value `passthrough` is supported. The value `spicevmc` for option `type` is default value and can be supressed. On using `type = tcp`, the options `source_mode`, `source_host` and `source_service` are mandatory. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Add smartcard device with type 'spicevmc' libvirt.smartcard :mode => 'passthrough', :type => 'spicevmc' end end ``` ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Add smartcard device with type 'tcp' domain.smartcard :mode => 'passthrough', :type => 'tcp', :source_mode => 'bind', :source_host => '127.0.0.1', :source_service => '2001' end end ``` ## Hypervisor Features Hypervisor features can be specified via `libvirt.features` as a list. The default options that are enabled are `acpi`, `apic` and `pae`. If you define `libvirt.features` you overwrite the defaults, so keep that in mind. An example: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Specify the default hypervisor features libvirt.features = ['acpi', 'apic', 'pae' ] end end ``` A different example for ARM boards: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Specify the default hypervisor features libvirt.features = ["apic", "gic version='2'" ] end end ``` You can also specify a special set of features that help improve the behavior of guests running Microsoft Windows. You can specify HyperV features via `libvirt.hyperv_feature`. Available options are listed below. Note that both options are required: * `name` - The name of the feature Hypervisor feature (see libvirt doc) * `state` - The state for this feature which can be either `on` or `off`. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Relax constraints on timers libvirt.hyperv_feature :name => 'relaxed', :state => 'on' # Enable virtual APIC libvirt.hyperv_feature :name => 'vapic', :state => 'on' end end ``` ## CPU features You can specify CPU feature policies via `libvirt.cpu_feature`. Available options are listed below. Note that both options are required: * `name` - The name of the feature for the chosen CPU (see libvirts `cpu_map.xml`) * `policy` - The policy for this feature (one of `force`, `require`, `optional`, `disable` and `forbid` - see libvirt documentation) ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # The feature will not be supported by virtual CPU. libvirt.cpu_feature :name => 'hypervisor', :policy => 'disable' # Guest creation will fail unless the feature is supported by host CPU. libvirt.cpu_feature :name => 'vmx', :policy => 'require' # The virtual CPU will claim the feature is supported regardless of it being supported by host CPU. libvirt.cpu_feature :name => 'pdpe1gb', :policy => 'force' end end ``` ## Memory Backing You can specify memoryBacking options via `libvirt.memorybacking`. Available options are shown below. Full documentation is available at the [libvirt _memoryBacking_ section](https://libvirt.org/formatdomain.html#elementsMemoryBacking). NOTE: The hugepages `` element is not yet supported ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.memorybacking :hugepages libvirt.memorybacking :nosharepages libvirt.memorybacking :locked libvirt.memorybacking :source, :type => 'file' libvirt.memorybacking :access, :mode => 'shared' libvirt.memorybacking :allocation, :mode => 'immediate' end end ``` ## USB device passthrough You can specify multiple USB devices to passthrough to the VM via `libvirt.usb`. The device can be specified by the following options: * `bus` - The USB bus ID, e.g. "1" * `device` - The USB device ID, e.g. "2" * `vendor` - The USB devices vendor ID (VID), e.g. "0x1234" * `product` - The USB devices product ID (PID), e.g. "0xabcd" At least one of these has to be specified, and `bus` and `device` may only be used together. The example values above match the device from the following output of `lsusb`: ``` Bus 001 Device 002: ID 1234:abcd Example device ``` Additionally, the following options can be used: * `startupPolicy` - Is passed through to libvirt and controls if the device has to exist. libvirt currently allows the following values: "mandatory", "requisite", "optional". ## No box and PXE boot There is support for PXE booting VMs with no disks as well as PXE booting VMs with blank disks. There are some limitations: * Requires Vagrant 1.6.0 or newer * No provisioning scripts are ran * No network configuration is being applied to the VM * No SSH connection can be made * `vagrant halt` will only work cleanly if the VM handles ACPI shutdown signals In short, VMs without a box can be created, halted and destroyed but all other functionality cannot be used. An example for a PXE booted VM with no disks whatsoever: ```ruby Vagrant.configure("2") do |config| config.vm.define :pxeclient do |pxeclient| pxeclient.vm.provider :libvirt do |domain| domain.boot 'network' end end end ``` And an example for a PXE booted VM with no box but a blank disk which will boot from this HD if the NICs fail to PXE boot: ```ruby Vagrant.configure("2") do |config| config.vm.define :pxeclient do |pxeclient| pxeclient.vm.provider :libvirt do |domain| domain.storage :file, :size => '100G', :type => 'qcow2' domain.boot 'network' domain.boot 'hd' end end end ``` Example for vm with 2 networks and only 1 is bootable and has dhcp server in this subnet, for example foreman with dhcp server Name of network "foreman_managed" is key for define boot order ```ruby config.vm.define :pxeclient do |pxeclient| pxeclient.vm.network :private_network,ip: '10.0.0.5', libvirt__network_name: "foreman_managed", libvirt__dhcp_enabled: false, libvirt__host_ip: '10.0.0.1' pxeclient.vm.provider :libvirt do |domain| domain.memory = 1000 boot_network = {'network' => 'foreman_managed'} domain.storage :file, :size => '100G', :type => 'qcow2' domain.boot boot_network domain.boot 'hd' end end ``` ## SSH Access To VM vagrant-libvirt supports vagrant's [standard ssh settings](https://docs.vagrantup.com/v2/vagrantfile/ssh_settings.html). ## Forwarded Ports vagrant-libvirt supports Forwarded Ports via ssh port forwarding. Please note that due to a well known limitation only the TCP protocol is supported. For each `forwarded_port` directive you specify in your Vagrantfile, vagrant-libvirt will maintain an active ssh process for the lifetime of the VM. If your VM should happen to be rebooted, the SSH session will need to be restablished by halting the VM and bringing it back up. vagrant-libvirt supports an additional `forwarded_port` option `gateway_ports` which defaults to `false`, but can be set to `true` if you want the forwarded port to be accessible from outside the Vagrant host. In this case you should also set the `host_ip` option to `'*'` since it defaults to `'localhost'`. You can also provide a custom adapter to forward from by 'adapter' option. Default is `eth0`. **Internally Accessible Port Forward** `config.vm.network :forwarded_port, guest: 80, host: 2000` **Externally Accessible Port Forward** `config.vm.network :forwarded_port, guest: 80, host: 2000, host_ip: "0.0.0.0"` ## Synced Folders Vagrant automatically syncs the project folder on the host to `/vagrant` in the guest. You can also configure additional synced folders. `vagrant-libvirt` supports bidirectional synced folders via [NFS](https://en.wikipedia.org/wiki/Network_File_System) or [VirtFS](http://www.linux-kvm.org/page/VirtFS) ([9p or Plan 9](https://en.wikipedia.org/wiki/9P_(protocol))) and unidirectional via rsync. The default is NFS. Difference between NFS and 9p is explained [here](https://unix.stackexchange.com/questions/240281/virtfs-plan-9-vs-nfs-as-tool-for-share-folder-for-virtual-machine). You can change the synced folder type for `/vagrant` by explicity configuring it an setting the type, e.g. ```shell config.vm.synced_folder './', '/vagrant', type: 'rsync' ``` or ```shell config.vm.synced_folder './', '/vagrant', type: '9p', disabled: false, accessmode: "squash", owner: "1000" ``` or ```shell config.vm.synced_folder './', '/vagrant', type: '9p', disabled: false, accessmode: "mapped", mount: false ``` For 9p shares, a `mount: false` option allows to define synced folders without mounting them at boot. Further documentation on using 9p can be found in [kernel docs](https://www.kernel.org/doc/Documentation/filesystems/9p.txt) and in [QEMU wiki](https://wiki.qemu.org/Documentation/9psetup#Starting_the_Guest_directly). Please do note that 9p depends on support in the guest and not all distros come with the 9p module by default. **SECURITY NOTE:** for remote libvirt, nfs synced folders requires a bridged public network interface and you must connect to libvirt via ssh. ## QEMU Session Support vagrant-libvirt supports using the QEMU session connection to maintain Vagrant VMs. As the session connection does not have root access to the system features which require root will not work. Access to networks created by the system QEMU connection can be granted by using the [QEMU bridge helper](https://wiki.qemu.org/Features/HelperNetworking). The bridge helper is enabled by default on some distros but may need to be enabled/installed on others. An example configuration of a machine using the QEMU session connection: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Use QEMU session instead of system connection libvirt.qemu_use_session = true # URI of QEMU session connection, default is as below libvirt.uri = 'qemu:///session' # URI of QEMU system connection, use to obtain IP address for management libvirt.system_uri = 'qemu:///system' # Path to store libvirt images for the virtual machine, default is as ~/.local/share/libvirt/images libvirt.storage_pool_path = '/home/user/.local/share/libvirt/images' # Management network device libvirt.management_network_device = 'virbr0' end # Public network configuration using existing network device # Note: Private networks do not work with QEMU session enabled as root access is required to create new network devices config.vm.network :public_network, :dev => "virbr1", :mode => "bridge", :type => "bridge" end ``` ## Customized Graphics vagrant-libvirt supports customizing the display and video settings of the managed guest. This is probably most useful for VNC-type displays with multiple guests. It lets you specify the exact port for each guest to use deterministically. Here is an example of using custom display options: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.graphics_port = 5901 libvirt.graphics_ip = '0.0.0.0' libvirt.video_type = 'qxl' end end ``` ## TPM Devices Modern versions of Libvirt support connecting to TPM devices on the host system. This allows you to enable Trusted Boot Extensions, among other features, on your guest VMs. In general, you will only need to modify the `tpm_path` variable in your guest configuration. However, advanced usage, such as the application of a Software TPM, may require modifying the `tpm_model` and `tpm_type` variables. The TPM options will only be used if you specify a TPM path. Declarations of any TPM options without specifying a path will result in those options being ignored. Here is an example of using the TPM options: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.tpm_model = 'tpm-tis' libvirt.tpm_type = 'passthrough' libvirt.tpm_path = '/dev/tpm0' end end ``` ## Libvirt communication channels For certain functionality to be available within a guest, a private communication channel must be established with the host. Two notable examples of this are the qemu guest agent, and the Spice/QXL graphics type. Below is a simple example which exposes a virtio serial channel to the guest. Note: in a multi-VM environment, the channel would be created for all VMs. ```ruby vagrant.configure(2) do |config| config.vm.provider :libvirt do |libvirt| libvirt.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' end end ``` Below is the syntax for creating a spicevmc channel for use by a qxl graphics card. ```ruby vagrant.configure(2) do |config| config.vm.provider :libvirt do |libvirt| libvirt.channel :type => 'spicevmc', :target_name => 'com.redhat.spice.0', :target_type => 'virtio' end end ``` These settings can be specified on a per-VM basis, however the per-guest settings will OVERRIDE any global 'config' setting. In the following example, we create 3 VM with the following configuration: * **master**: No channel settings specified, so we default to the provider setting of a single virtio guest agent channel. * **node1**: Override the channel setting, setting both the guest agent channel, and a spicevmc channel * **node2**: Override the channel setting, setting both the guest agent channel, and a 'guestfwd' channel. TCP traffic sent by the guest to the given IP address and port is forwarded to the host socket `/tmp/foo`. Note: this device must be unique for each VM. For example: ```ruby Vagrant.configure(2) do |config| config.vm.box = "fedora/24-cloud-base" config.vm.provider :libvirt do |libvirt| libvirt.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' end config.vm.define "master" do |master| master.vm.provider :libvirt do |domain| domain.memory = 1024 end end config.vm.define "node1" do |node1| node1.vm.provider :libvirt do |domain| domain.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' domain.channel :type => 'spicevmc', :target_name => 'com.redhat.spice.0', :target_type => 'virtio' end end config.vm.define "node2" do |node2| node2.vm.provider :libvirt do |domain| domain.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' domain.channel :type => 'unix', :target_type => 'guestfwd', :target_address => '192.0.2.42', :target_port => '4242', :source_path => '/tmp/foo' end end end ``` ## Custom command line arguments You can also specify multiple qemuargs arguments for qemu-system * `value` - Value ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.qemuargs :value => "-device" libvirt.qemuargs :value => "intel-iommu" end end ``` ## Box Format You can view an example box in the [`example_box/directory`](https://github.com/vagrant-libvirt/vagrant-libvirt/tree/master/example_box). That directory also contains instructions on how to build a box. The box is a tarball containing: * qcow2 image file named `box.img` * `metadata.json` file describing box image (`provider`, `virtual_size`, `format`) * `Vagrantfile` that does default settings for the provider-specific configuration for this provider ## Create Box To create a vagrant-libvirt box from a qcow2 image, run `create_box.sh` (located in the tools directory): ```shell $ create_box.sh ubuntu14.qcow2 ``` You can also create a box by using [Packer](https://packer.io). Packer templates for use with vagrant-libvirt are available at https://github.com/jakobadam/packer-qemu-templates. After cloning that project you can build a vagrant-libvirt box by running: ```shell $ cd packer-qemu-templates $ packer build ubuntu-14.04-server-amd64-vagrant.json ``` ## Development To work on the `vagrant-libvirt` plugin, clone this repository out, and use [Bundler](http://gembundler.com) to get the dependencies: ```shell $ git clone https://github.com/vagrant-libvirt/vagrant-libvirt.git $ cd vagrant-libvirt $ bundle install ``` Once you have the dependencies, verify the unit tests pass with `rspec`: ```shell $ bundle exec rspec spec/ ``` If those pass, you're ready to start developing the plugin. You can test the plugin without installing it into your Vagrant environment by just creating a `Vagrantfile` in the top level of this directory (it is gitignored) that uses it. Don't forget to add following line at the beginning of your `Vagrantfile` while in development mode: ```ruby Vagrant.require_plugin "vagrant-libvirt" ``` Now you can use bundler to execute Vagrant: ```shell $ bundle exec vagrant up --provider=libvirt ``` **IMPORTANT NOTE:** bundle is crucial. You need to use bundled Vagrant. ## Contributing 1. Fork it 2. Create your feature branch (`git checkout -b my-new-feature`) 3. Commit your changes (`git commit -am 'Add some feature'`) 4. Push to the branch (`git push origin my-new-feature`) 5. Create new Pull Request vagrant-libvirt-0.0.45/spec/0000755000004100000410000000000013363570025015701 5ustar www-datawww-datavagrant-libvirt-0.0.45/spec/unit/0000755000004100000410000000000013363570025016660 5ustar www-datawww-datavagrant-libvirt-0.0.45/spec/unit/templates/0000755000004100000410000000000013363570025020656 5ustar www-datawww-datavagrant-libvirt-0.0.45/spec/unit/templates/domain_all_settings.xml0000644000004100000410000000732113363570025025422 0ustar www-datawww-data 1 qemu64 hvm /efi/loader /usr/bin/kvm-spice /dev/random
vagrant-libvirt-0.0.45/spec/unit/templates/domain_defaults.xml0000644000004100000410000000143113363570025024535 0ustar www-datawww-data 1 hvm vagrant-libvirt-0.0.45/spec/unit/templates/domain_custom_cpu_model.xml0000644000004100000410000000144013363570025026267 0ustar www-datawww-data 1 SandyBridge hvm vagrant-libvirt-0.0.45/spec/unit/templates/domain_spec.rb0000644000004100000410000000672313363570025023474 0ustar www-datawww-datarequire 'support/sharedcontext' require 'vagrant-libvirt/config' require 'vagrant-libvirt/util/erb_template' describe 'templates/domain' do include_context 'unit' class DomainTemplateHelper < VagrantPlugins::ProviderLibvirt::Config include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate def finalize! super @qargs = @qemu_args end end let(:domain) { DomainTemplateHelper.new } let(:xml_expected) { File.read(File.join(File.dirname(__FILE__), test_file)) } context 'when only defaults used' do let(:test_file) { 'domain_defaults.xml' } it 'renders template' do domain.finalize! expect(domain.to_xml('domain')).to eq xml_expected end end context 'when all settings enabled' do before do domain.instance_variable_set('@domain_type', 'kvm') domain.cpu_mode = 'custom' domain.cpu_feature(name: 'AAA', policy: 'required') domain.hyperv_feature(name: 'BBB', state: 'on') domain.cputopology(sockets: '1', cores: '3', threads: '2') domain.machine_type = 'pc-compatible' domain.machine_arch = 'x86_64' domain.loader = '/efi/loader' domain.boot('network') domain.boot('cdrom') domain.boot('hd') domain.emulator_path = '/usr/bin/kvm-spice' domain.instance_variable_set('@domain_volume_path', '/var/lib/libvirt/images/test.qcow2') domain.instance_variable_set('@domain_volume_cache', 'unsafe') domain.disk_bus = 'ide' domain.disk_device = 'vda' domain.storage(:file, path: 'test-disk1.qcow2') domain.storage(:file, path: 'test-disk2.qcow2') domain.disks.each do |disk| disk[:absolute_path] = '/var/lib/libvirt/images/' + disk[:path] end domain.storage(:file, device: :cdrom) domain.storage(:file, device: :cdrom) domain.channel(type: 'unix', target_name: 'org.qemu.guest_agent.0', target_type: 'virtio') domain.channel(type: 'spicevmc', target_name: 'com.redhat.spice.0', target_type: 'virtio') domain.channel(type: 'unix', target_type: 'guestfwd', target_address: '192.0.2.42', target_port: '4242', source_path: '/tmp/foo') domain.random(model: 'random') domain.pci(bus: '0x06', slot: '0x12', function: '0x5') domain.pci(bus: '0x03', slot: '0x00', function: '0x0') domain.usb_controller(model: 'nec-xhci', ports: '4') domain.usb(bus: '1', device: '2', vendor: '0x1234', product: '0xabcd') domain.redirdev(type: 'tcp', host: 'localhost', port: '4000') domain.redirfilter(class: '0x0b', vendor: '0x08e6', product: '0x3437', version: '2.00', allow: 'yes') domain.watchdog(model: 'i6300esb', action: 'reset') domain.smartcard(mode: 'passthrough') domain.tpm_path = '/dev/tpm0' domain.qemuargs(value: '-device') domain.qemuargs(value: 'dummy-device') end let(:test_file) { 'domain_all_settings.xml' } it 'renders template' do domain.finalize! expect(domain.to_xml('domain')).to eq xml_expected end end context 'when custom cpu model enabled' do before do domain.cpu_mode = 'custom' domain.cpu_model = 'SandyBridge' end let(:test_file) { 'domain_custom_cpu_model.xml' } it 'renders template' do domain.finalize! expect(domain.to_xml('domain')).to eq xml_expected end end end vagrant-libvirt-0.0.45/spec/unit/config_spec.rb0000644000004100000410000000704413363570025021471 0ustar www-datawww-datarequire 'spec_helper' require 'support/sharedcontext' require 'vagrant-libvirt/config' describe VagrantPlugins::ProviderLibvirt::Config do include_context 'unit' def assert_invalid errors = subject.validate(machine) raise "No errors: #{errors.inspect}" if errors.values.all?(&:empty?) end def assert_valid errors = subject.validate(machine) raise "Errors: #{errors.inspect}" unless errors.values.all?(&:empty?) end describe '#validate' do it 'is valid with defaults' do assert_valid end context 'with disks defined' do before { expect(machine).to receive(:provider_config).and_return(subject).at_least(:once) } it 'is valid if relative path used for disk' do subject.storage :file, path: '../path/to/file.qcow2' assert_valid end it 'should be invalid if absolute path used for disk' do subject.storage :file, path: '/absolute/path/to/file.qcow2' assert_invalid end end context 'with mac defined' do let (:vm) { double('vm') } before { expect(machine.config).to receive(:vm).and_return(vm) } it 'is valid with valid mac' do expect(vm).to receive(:networks).and_return([[:public, { mac: 'aa:bb:cc:dd:ee:ff' }]]) assert_valid end it 'is valid with MAC containing no delimiters' do network = [:public, { mac: 'aabbccddeeff' }] expect(vm).to receive(:networks).and_return([network]) assert_valid expect(network[1][:mac]).to eql('aa:bb:cc:dd:ee:ff') end it 'should be invalid if MAC not formatted correctly' do expect(vm).to receive(:networks).and_return([[:public, { mac: 'aa/bb/cc/dd/ee/ff' }]]) assert_invalid end end end describe '#merge' do let(:one) { described_class.new } let(:two) { described_class.new } subject { one.merge(two) } context 'storage' do context 'with disks' do context 'assigned specific devices' do it 'should merge disks with specific devices' do one.storage(:file, device: 'vdb') two.storage(:file, device: 'vdc') subject.finalize! expect(subject.disks).to include(include(device: 'vdb'), include(device: 'vdc')) end end context 'without devices given' do it 'should merge disks with different devices assigned automatically' do one.storage(:file) two.storage(:file) subject.finalize! expect(subject.disks).to include(include(device: 'vdb'), include(device: 'vdc')) end end end context 'with cdroms only' do context 'assigned specific devs' do it 'should merge disks with specific devices' do one.storage(:file, device: :cdrom, dev: 'hda') two.storage(:file, device: :cdrom, dev: 'hdb') subject.finalize! expect(subject.cdroms).to include(include(dev: 'hda'), include(dev: 'hdb')) end end context 'without devs given' do it 'should merge cdroms with different devs assigned automatically' do one.storage(:file, device: :cdrom) two.storage(:file, device: :cdrom) subject.finalize! expect(subject.cdroms).to include(include(dev: 'hda'), include(dev: 'hdb')) end end end end end end vagrant-libvirt-0.0.45/spec/unit/action/0000755000004100000410000000000013363570025020135 5ustar www-datawww-datavagrant-libvirt-0.0.45/spec/unit/action/wait_till_up_spec.rb0000644000004100000410000001122313363570025024167 0ustar www-datawww-datarequire 'vagrant-libvirt/action/wait_till_up' require 'vagrant-libvirt/errors' require 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' describe VagrantPlugins::ProviderLibvirt::Action::WaitTillUp do subject { described_class.new(app, env) } include_context 'vagrant-unit' include_context 'libvirt' include_context 'unit' describe '#call' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver) .to receive(:get_domain).and_return(domain) allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver).to receive(:state) .and_return(:running) end context 'when machine does not exist' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver) .to receive(:get_domain).and_return(nil) end it 'raises exception' do expect(app).to_not receive(:call) expect { subject.call(env) }.to raise_error(::VagrantPlugins::ProviderLibvirt::Errors::NoDomainError, /No domain found. Domain dummy-vagrant_dummy not found/) end end context 'when machine is booting' do context 'if interrupted looking for IP' do before do env[:interrupted] = true end it 'should exit' do expect(app).to_not receive(:call) expect(ui).to receive(:info).with('Waiting for domain to get an IP address...') expect(subject.call(env)).to be_nil end end context 'if interrupted waiting for SSH' do before do allow(domain).to receive(:wait_for).and_return(true) allow(env).to receive(:[]).and_call_original allow(env).to receive(:[]).with(:interrupted).and_return(false, true, true) allow(env).to receive(:[]).with(:ip_address).and_return('192.168.121.2') end it 'should exit after getting IP' do expect(app).to_not receive(:call) expect(ui).to receive(:info).with('Waiting for domain to get an IP address...') expect(ui).to receive(:info).with('Waiting for SSH to become available...') logger = subject.instance_variable_get(:@logger) expect(logger).to receive(:debug).with(/Searching for IP for MAC address: .*/) expect(logger).to receive(:info).with('Got IP address 192.168.121.2') expect(logger).to receive(:info).with(/Time for getting IP: .*/) expect(env[:machine].communicate).to_not receive(:ready?) expect(subject.call(env)).to be_nil end end end context 'when machine boots and ssh available' do before do allow(domain).to receive(:wait_for).and_return(true) allow(env).to receive(:[]).and_call_original allow(env).to receive(:[]).with(:interrupted).and_return(false) allow(env).to receive(:[]).with(:ip_address).and_return('192.168.121.2') end it 'should call the next hook' do expect(app).to receive(:call) expect(ui).to receive(:info).with('Waiting for domain to get an IP address...') expect(ui).to receive(:info).with('Waiting for SSH to become available...') expect(env[:machine].communicate).to receive(:ready?).and_return(true) expect(subject.call(env)).to be_nil end end end describe '#recover' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver).to receive(:get_domain).and_return(machine) allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver).to receive(:state) .and_return(:not_created) allow(env).to receive(:[]).and_call_original end it 'should do nothing by default' do expect(env).to_not receive(:[]).with(:action_runner) # cleanup expect(subject.recover(env)).to be_nil end context 'with machine coming up' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver).to receive(:state) .and_return(:running) env[:destroy_on_error] = true end context 'and user has disabled destroy on failure' do before do env[:destroy_on_error] = false end it 'skips terminate on failure' do expect(env).to_not receive(:[]).with(:action_runner) # cleanup expect(subject.recover(env)).to be_nil end end context 'and using default settings' do let(:runner) { double('runner') } it 'deletes VM on failure' do expect(env).to receive(:[]).with(:action_runner).and_return(runner) # cleanup expect(runner).to receive(:run) expect(subject.recover(env)).to be_nil end end end end end vagrant-libvirt-0.0.45/spec/unit/action/destroy_domain_spec.rb0000644000004100000410000000625613363570025024525 0ustar www-datawww-datarequire 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' require 'vagrant-libvirt/action/destroy_domain' describe VagrantPlugins::ProviderLibvirt::Action::DestroyDomain do subject { described_class.new(app, env) } include_context 'unit' include_context 'libvirt' let(:libvirt_domain) { double('libvirt_domain') } let(:libvirt_client) { double('libvirt_client') } let(:servers) { double('servers') } describe '#call' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver) .to receive(:connection).and_return(connection) allow(connection).to receive(:client).and_return(libvirt_client) allow(libvirt_client).to receive(:lookup_domain_by_uuid) .and_return(libvirt_domain) allow(connection).to receive(:servers).and_return(servers) allow(servers).to receive(:get).and_return(domain) # always see this at the start of #call expect(ui).to receive(:info).with('Removing domain...') end context 'when no snapshots' do let(:root_disk) { double('libvirt_root_disk') } before do allow(libvirt_domain).to receive(:list_snapshots).and_return([]) allow(libvirt_domain).to receive(:has_managed_save?).and_return(nil) root_disk.stub(name: 'test.img') end context 'when only has root disk' do it 'calls fog to destroy volumes' do expect(domain).to receive(:destroy).with(destroy_volumes: true) expect(subject.call(env)).to be_nil end end context 'when has additional disks' do let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.define :test config.vm.provider :libvirt do |libvirt| libvirt.storage :file end end EOF end let(:extra_disk) { double('libvirt_extra_disk') } before do extra_disk.stub(name: 'test-vdb.qcow2') end it 'destroys disks individually' do allow(libvirt_domain).to receive(:name).and_return('test') allow(domain).to receive(:volumes).and_return([extra_disk], [root_disk]) expect(domain).to receive(:destroy).with(destroy_volumes: false) expect(extra_disk).to receive(:destroy) # extra disk remove expect(root_disk).to receive(:destroy) # root disk remove expect(subject.call(env)).to be_nil end end context 'when has CDROMs attached' do let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.define :test config.vm.provider :libvirt do |libvirt| libvirt.storage :file, :device => :cdrom end end EOF end it 'uses explicit removal of disks' do allow(libvirt_domain).to receive(:name).and_return('test') allow(domain).to receive(:volumes).and_return([root_disk]) expect(domain).to_not receive(:destroy).with(destroy_volumes: true) expect(root_disk).to receive(:destroy) # root disk remove expect(subject.call(env)).to be_nil end end end end end vagrant-libvirt-0.0.45/spec/unit/action/set_name_of_domain_spec.rb0000644000004100000410000000120513363570025025300 0ustar www-datawww-datarequire 'spec_helper' describe VagrantPlugins::ProviderLibvirt::Action::SetNameOfDomain do before :each do @env = EnvironmentHelper.new end it 'builds unique domain name' do @env.random_hostname = true dmn = VagrantPlugins::ProviderLibvirt::Action::SetNameOfDomain.new(Object.new, @env) first = dmn.build_domain_name(@env) second = dmn.build_domain_name(@env) first.should_not eq(second) end it 'builds simple domain name' do @env.default_prefix = 'pre' dmn = VagrantPlugins::ProviderLibvirt::Action::SetNameOfDomain.new(Object.new, @env) dmn.build_domain_name(@env).should eq('pre_') end end vagrant-libvirt-0.0.45/spec/spec_helper.rb0000644000004100000410000000023713363570025020521 0ustar www-datawww-datarequire 'coveralls' Coveralls.wear! require 'vagrant-libvirt' require 'support/environment_helper' require 'vagrant-spec/unit' RSpec.configure do |spec| end vagrant-libvirt-0.0.45/spec/support/0000755000004100000410000000000013363570025017415 5ustar www-datawww-datavagrant-libvirt-0.0.45/spec/support/sharedcontext.rb0000644000004100000410000000206413363570025022617 0ustar www-datawww-datarequire 'spec_helper' shared_context 'unit' do include_context 'vagrant-unit' let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.define :test end EOF end let(:test_env) do test_env = isolated_environment test_env.vagrantfile vagrantfile test_env end let(:env) { { env: iso_env, machine: machine, ui: ui, root_path: '/rootpath' } } let(:conf) { Vagrant::Config::V2::DummyConfig.new } let(:ui) { Vagrant::UI::Basic.new } let(:iso_env) { test_env.create_vagrant_env ui_class: Vagrant::UI::Basic } let(:machine) { iso_env.machine(:test, :libvirt) } # Mock the communicator to prevent SSH commands for being executed. let(:communicator) { double('communicator') } # Mock the guest operating system. let(:guest) { double('guest') } let(:app) { ->(env) {} } let(:plugin) { register_plugin } before (:each) do machine.stub(guest: guest) machine.stub(communicator: communicator) end end vagrant-libvirt-0.0.45/spec/support/environment_helper.rb0000644000004100000410000000145513363570025023652 0ustar www-datawww-datarequire 'ostruct' require 'pathname' class EnvironmentHelper attr_writer :domain_name attr_accessor :random_hostname, :name, :default_prefix def [](value) send(value.to_sym) end def cpus 4 end def memory 1024 end %w(cpus cpu_mode loader nvram boot_order machine_type disk_bus disk_device nested volume_cache kernel cmd_line initrd graphics_type graphics_autoport graphics_port graphics_ip graphics_passwd video_type video_vram keymap storage_pool_name disks cdroms driver).each do |name| define_method(name.to_sym) do nil end end def machine self end def provider_config self end def root_path Pathname.new('./spec/support/foo') end def domain_name # noop end def libvirt_compute OpenStruct.new(servers: []) end end vagrant-libvirt-0.0.45/spec/support/libvirt_context.rb0000644000004100000410000000165513363570025023170 0ustar www-datawww-datarequire 'fog/libvirt' shared_context 'libvirt' do include_context 'unit' let(:libvirt_context) { true } let(:id) { 'dummy-vagrant_dummy' } let(:connection) { double('connection') } let(:domain) { double('domain') } def connection_result(options = {}) result = options.fetch(:result, nil) double('connection_result' => result) end before (:each) do # we don't want unit tests to ever run commands on the system; so we wire # in a double to ensure any unexpected messages raise exceptions stub_const('::Fog::Compute', connection) # drivers also call vm_exists? during init; allow(connection).to receive(:servers).with(kind_of(String)) .and_return(connection_result(result: nil)) # return some information for domain when needed allow(domain).to receive(:mac).and_return('9C:D5:53:F1:5A:E7') machine.stub(id: id) end end vagrant-libvirt-0.0.45/.gitignore0000644000004100000410000000031613363570025016737 0ustar www-datawww-data*.gem *.rbc .bundle .config .yardoc Gemfile.lock InstalledFiles _yardoc coverage doc/ lib/bundler/man pkg rdoc spec/reports test/tmp test/version_tmp tmp Vagrantfile !example_box/Vagrantfile .vagrant *.swp vagrant-libvirt-0.0.45/LICENSE0000644000004100000410000000205513363570025015756 0ustar www-datawww-dataCopyright (c) 2013 Lukas Stanek MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. vagrant-libvirt-0.0.45/Rakefile0000644000004100000410000000025613363570025016417 0ustar www-datawww-data#require 'rubygems' #require 'bundler/setup' require 'bundler/gem_tasks' Bundler::GemHelper.install_tasks task default: [:deftask] task :deftask do puts 'call rake -T' end vagrant-libvirt-0.0.45/lib/0000755000004100000410000000000013363570025015515 5ustar www-datawww-datavagrant-libvirt-0.0.45/lib/vagrant-libvirt.rb0000644000004100000410000000146613363570025021164 0ustar www-datawww-datarequire 'pathname' module VagrantPlugins module ProviderLibvirt lib_path = Pathname.new(File.expand_path('../vagrant-libvirt', __FILE__)) autoload :Action, lib_path.join('action') autoload :Errors, lib_path.join('errors') autoload :Util, lib_path.join('util') def self.source_root @source_root ||= Pathname.new(File.expand_path('../../', __FILE__)) end end end begin require 'vagrant' rescue LoadError raise 'The Vagrant Libvirt plugin must be run within Vagrant.' end # This is a sanity check to make sure no one is attempting to install # this into an early Vagrant version. if Vagrant::VERSION < '1.5.0' raise 'The Vagrant Libvirt plugin is only compatible with Vagrant 1.5+.' end # make sure base module class defined before loading plugin require 'vagrant-libvirt/plugin' vagrant-libvirt-0.0.45/lib/vagrant-libvirt/0000755000004100000410000000000013363570025020630 5ustar www-datawww-datavagrant-libvirt-0.0.45/lib/vagrant-libvirt/driver.rb0000644000004100000410000001045713363570025022457 0ustar www-datawww-datarequire 'fog/libvirt' require 'libvirt' require 'log4r' module VagrantPlugins module ProviderLibvirt class Driver # store the connection at the process level # # possibly this should be a connection pool using the connection # settings as a key to allow per machine connection attributes # to be used. @@connection = nil @@system_connection = nil def initialize(machine) @logger = Log4r::Logger.new('vagrant_libvirt::driver') @machine = machine end def connection # If already connected to libvirt, just use it and don't connect # again. return @@connection if @@connection # Get config options for libvirt provider. config = @machine.provider_config uri = config.uri conn_attr = {} conn_attr[:provider] = 'libvirt' conn_attr[:libvirt_uri] = uri conn_attr[:libvirt_username] = config.username if config.username conn_attr[:libvirt_password] = config.password if config.password # Setup command for retrieving IP address for newly created machine # with some MAC address. Get it from dnsmasq leases table ip_command = %q( awk "/$mac/ {print \$1}" /proc/net/arp ) conn_attr[:libvirt_ip_command] = ip_command @logger.info("Connecting to Libvirt (#{uri}) ...") begin @@connection = Fog::Compute.new(conn_attr) rescue Fog::Errors::Error => e raise Errors::FogLibvirtConnectionError, error_message: e.message end @@connection end def system_connection # If already connected to libvirt, just use it and don't connect # again. return @@system_connection if @@system_connection config = @machine.provider_config @@system_connection = Libvirt::open_read_only(config.system_uri) @@system_connection end def get_domain(mid) begin domain = connection.servers.get(mid) rescue Libvirt::RetrieveError => e if e.libvirt_code == ProviderLibvirt::Util::ErrorCodes::VIR_ERR_NO_DOMAIN @logger.debug("machine #{mid} not found #{e}.") return nil else raise e end end domain end def created?(mid) domain = get_domain(mid) !domain.nil? end def get_ipaddress(machine) # Find the machine domain = get_domain(machine.id) if @machine.provider_config.qemu_use_session return get_ipaddress_system domain.mac end if domain.nil? # The machine can't be found return nil end # Get IP address from arp table ip_address = nil begin domain.wait_for(2) do addresses.each_pair do |_type, ip| # Multiple leases are separated with a newline, return only # the most recent address ip_address = ip[0].split("\n").first unless ip[0].nil? end !ip_address.nil? end rescue Fog::Errors::TimeoutError @logger.info('Timeout at waiting for an ip address for machine %s' % machine.name) end unless ip_address @logger.info('No arp table entry found for machine %s' % machine.name) return nil end ip_address end def get_ipaddress_system(mac) ip_address = nil system_connection.list_all_networks.each do |net| leases = net.dhcp_leases(mac, 0) # Assume the lease expiring last is the current IP address ip_address = leases.sort_by { |lse| lse["expirytime"] }.last["ipaddr"] if !leases.empty? break if ip_address end return ip_address end def state(machine) # may be other error states with initial retreival we can't handle begin domain = get_domain(machine.id) rescue Libvirt::RetrieveError => e @logger.debug("Machine #{machine.id} not found #{e}.") return :not_created end # TODO: terminated no longer appears to be a valid fog state, remove? return :not_created if domain.nil? || domain.state.to_sym == :terminated domain.state.tr('-', '_').to_sym end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/version.rb0000644000004100000410000000012713363570025022642 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt VERSION = '0.0.45'.freeze end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/provider.rb0000644000004100000410000001062213363570025023010 0ustar www-datawww-datarequire 'vagrant' module VagrantPlugins module ProviderLibvirt autoload :Driver, 'vagrant-libvirt/driver' # This is the base class for a provider for the V2 API. A provider # is responsible for creating compute resources to match the # needs of a Vagrant-configured system. class Provider < Vagrant.plugin('2', :provider) def initialize(machine) @machine = machine raise 'REQUIRE USE RUBY >= 1.9.3 VERSION' if RUBY_VERSION < '1.9.3' end # This should return an action callable for the given name. def action(name) # Attempt to get the action method from the Action class if it # exists, otherwise return nil to show that we don't support the # given action. action_method = "action_#{name}" return Action.send(action_method) if Action.respond_to?(action_method) nil end def driver return @driver if @driver @driver = Driver.new(@machine) end # This method is called if the underying machine ID changes. Providers # can use this method to load in new data for the actual backing # machine or to realize that the machine is now gone (the ID can # become `nil`). def machine_id_changed; end # This should return a hash of information that explains how to # SSH into the machine. If the machine is not at a point where # SSH is even possible, then `nil` should be returned. def ssh_info # Return the ssh_info if already retrieved otherwise call the driver # and save the result. # # Ssh info has following format.. # # { # :host => "1.2.3.4", # :port => "22", # :username => "mitchellh", # :private_key_path => "/path/to/my/key" # } # note that modifing @machine.id or accessing @machine.state is not # thread safe, so be careful to avoid these here as this method may # be called from other threads of execution. return nil if state.id != :running ip = driver.get_ipaddress(@machine) # if can't determine the IP, just return nil and let the core # deal with it, similar to the docker provider return nil unless ip ssh_info = { host: ip, port: @machine.config.ssh.guest_port, forward_agent: @machine.config.ssh.forward_agent, forward_x11: @machine.config.ssh.forward_x11 } if @machine.provider_config.connect_via_ssh ssh_info[:proxy_command] = "ssh '#{@machine.provider_config.host}' " \ "-l '#{@machine.provider_config.username}' " \ "-i '#{@machine.provider_config.id_ssh_key_file}' " \ 'nc %h %p' end ssh_info end def mac_addresses # Run a custom action called "read_mac_addresses" which will return # a list of mac addresses used by the machine. The returned data will # be in the following format: # # { # : # } env = @machine.action('read_mac_addresses') env[:machine_mac_addresses] end # This should return the state of the machine within this provider. # The state must be an instance of {MachineState}. def state state_id = nil state_id = :not_created unless @machine.id state_id = :not_created if !state_id && (!@machine.id || !driver.created?(@machine.id)) # Query the driver for the current state of the machine state_id = driver.state(@machine) if @machine.id && !state_id state_id = :unknown unless state_id # This is a special pseudo-state so that we don't set the # NOT_CREATED_ID while we're setting up the machine. This avoids # clearing the data dir. state_id = :preparing if @machine.id == 'preparing' # Get the short and long description short = state_id.to_s.tr('_', ' ') long = I18n.t("vagrant_libvirt.states.#{state_id}") # If we're not created, then specify the special ID flag if state_id == :not_created state_id = Vagrant::MachineState::NOT_CREATED_ID end # Return the MachineState object Vagrant::MachineState.new(state_id, short, long) end def to_s id = @machine.id.nil? ? 'new' : @machine.id "Libvirt (#{id})" end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/plugin.rb0000644000004100000410000000541013363570025022453 0ustar www-datawww-databegin require 'vagrant' rescue LoadError raise 'The Vagrant Libvirt plugin must be run within Vagrant.' end # compatibility fix to define constant not available vagrant <1.6 ::Vagrant::MachineState::NOT_CREATED_ID ||= :not_created module VagrantPlugins module ProviderLibvirt class Plugin < Vagrant.plugin('2') name 'libvirt' description <<-DESC Vagrant plugin to manage VMs in libvirt. DESC config('libvirt', :provider) do require_relative 'config' Config end provider('libvirt', parallel: true, box_optional: true) do require_relative 'provider' Provider end action_hook(:remove_libvirt_image) do |hook| hook.after Vagrant::Action::Builtin::BoxRemove, Action.remove_libvirt_image end guest_capability('linux', 'mount_p9_shared_folder') do require_relative 'cap/mount_p9' Cap::MountP9 end provider_capability(:libvirt, :nic_mac_addresses) do require_relative 'cap/nic_mac_addresses' Cap::NicMacAddresses end # lower priority than nfs or rsync # https://github.com/vagrant-libvirt/vagrant-libvirt/pull/170 synced_folder('9p', 4) do require_relative 'cap/synced_folder' VagrantPlugins::SyncedFolder9p::SyncedFolder end # This initializes the internationalization strings. def self.setup_i18n I18n.load_path << File.expand_path('locales/en.yml', ProviderLibvirt.source_root) I18n.reload! end # This sets up our log level to be whatever VAGRANT_LOG is. def self.setup_logging require 'log4r' level = nil begin level = Log4r.const_get(ENV['VAGRANT_LOG'].upcase) rescue NameError # This means that the logging constant wasn't found, # which is fine. We just keep `level` as `nil`. But # we tell the user. level = nil end # Some constants, such as "true" resolve to booleans, so the # above error checking doesn't catch it. This will check to make # sure that the log level is an integer, as Log4r requires. level = nil unless level.is_a?(Integer) # Set the logging level on all "vagrant" namespaced # logs as long as we have a valid level. if level logger = Log4r::Logger.new('vagrant_libvirt') logger.outputters = Log4r::Outputter.stderr logger.level = level logger = nil end end # Setup logging and i18n before any autoloading loads other classes # with logging configured as this prevents inheritance of the log level # from the parent logger. setup_logging setup_i18n end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/templates/0000755000004100000410000000000013363570025022626 5ustar www-datawww-datavagrant-libvirt-0.0.45/lib/vagrant-libvirt/templates/default_storage_volume.xml.erb0000644000004100000410000000100613363570025030653 0ustar www-datawww-data <%= @name %> <%= split_size_unit(@allocation)[0] %> <%= split_size_unit(@capacity)[0] %> <%= @storage_volume_uid %> <%= @storage_volume_gid %> 0744 vagrant-libvirt-0.0.45/lib/vagrant-libvirt/templates/private_network.xml.erb0000644000004100000410000000264413363570025027350 0ustar www-datawww-data <%= @network_name %> <% if @network_domain_name %> <% end %> <% if @network_mtu %> <% end %> <% if (@network_forward_mode != 'none' && @network_forward_mode != 'veryisolated') %> <% if @network_forward_device %> <% else %> <% end %> <% end %> <% if @network_forward_mode != 'veryisolated' %> <% if @network_dhcp_enabled %> <% if @network_dhcp_bootp_file %> <% if @network_dhcp_bootp_server %> <% else %> <% end %> <% end %> <% end %> <% end %> <% if !@network_ipv6_address.nil? && !@network_ipv6_prefix.nil? %> <% end %> vagrant-libvirt-0.0.45/lib/vagrant-libvirt/templates/default_storage_pool.xml.erb0000644000004100000410000000044313363570025030321 0ustar www-datawww-data default <%= @storage_pool_path %> 0755 <%= @storage_pool_uid %> <%= @storage_pool_gid %> vagrant-libvirt-0.0.45/lib/vagrant-libvirt/templates/domain.xml.erb0000644000004100000410000002073613363570025025376 0ustar www-datawww-data <%= @name %> <%= @uuid %> <%= @memory_size %> <%= @cpus %> <% if @cpu_mode != 'host-passthrough' %> <% if @cpu_mode == 'custom' %><%= @cpu_model %><% end %> <% if @nested %> <% if @cpu_features.select{|x| x[:name] == 'vmx'}.empty? %> <% end %> <% if @cpu_features.select{|x| x[:name] == 'svm'}.empty? %> <% end %> <% end %> <% @cpu_features.each do |cpu_feature| %> <% end %> <% unless @cpu_topology.empty? %> <%# CPU topology -%> <% end %> <% end %> <% if @numa_nodes %> <% @numa_nodes.each_with_index do |node, index| %> <% end %> <% end %> <% unless @memory_backing.empty? %> <% @memory_backing.each do |backing| %> <<%= backing[:name] %> <%= backing[:config].map { |k,v| "#{k}='#{v}'"}.join(' ') %>/> <% end %> <% end%> <% if @machine_type %> <% if @machine_arch %> hvm <% else %> hvm <% end %> <% else %> <% if @machine_arch %> hvm <% else %> hvm <% end %> <% end %> <% if @loader %> <% if @nvram %> <%= @loader %> <% else %> <%= @loader %> <% end %> <% end %> <% if @nvram %> <%= @nvram %> <% end %> <% if @boot_order.count >= 1 %> <% end %> <%= @kernel %> <%= @initrd %> <%= @cmd_line %> <% if @dtb %> <%= @dtb %> <% end %> <% @features.each do |feature| %> <<%= feature %>/> <% end %> <% if @kvm_hidden %> <% end %> <% if !@features_hyperv.empty? %> <% @features_hyperv.each do |feature| %> <<%= feature[:name] %> state='<%= feature[:state] %>' /> <% end %> <% end %> <% if @emulator_path %> <%= @emulator_path %> <% end %> <% if @domain_volume_path %> <%# we need to ensure a unique target dev -%> <% end %> <%# additional disks -%> <% @disks.each do |d| -%> <% if d[:shareable] %> <% end %> <% if d[:serial] %> <%= d[:serial] %> <% end %> <%# this will get auto generated by libvirt
-%> <% end -%> <% @cdroms.each do |c| %> <% end %> <% @channels.each do |channel| %> <%if channel[:source_mode] or channel[:source_path] %> mode='<%= channel[:source_mode] %>' <% end %> <% if channel[:source_path] %> path="<%= channel[:source_path] %>" <% end %> /> <% end %> name="<%= channel[:target_name] %>" <% end %> <% if channel[:target_address] %> address="<%= channel[:target_address] %>" <% end %> <% if channel[:target_port] %> port="<%= channel[:target_port] %>" <% end %> /> <% end %> <% @inputs.each do |input| %> <% end %> <% if !@sound_type.nil? %> <%# Sound device-%> <%# End Sound%> <% end %> <% if @graphics_type != 'none' %> <%# Video device -%> /> <%#End Video -%> <% end %> <% if @rng[:model] == "random"%> /dev/random <% end %> <% @pcis.each do |pci| %>
<% end %> <% @usbs.each do |usb| %> <% if usb[:vendor] %> <% end %> <% if usb[:product] %> <% end %> <% if usb[:bus] && usb[:device] %>
<% end %> <% end %> <% unless @redirdevs.empty? %> <% @redirdevs.each do |redirdev| %> <% end %> <% unless @redirfilters.empty? %> <% @redirfilters.each do |usbdev| %> <% end %> <% end %> <% end %> <% unless @watchdog_dev.empty? %> <%# Watchdog Device -%> <% end %> <% unless @smartcard_dev.empty? -%> <% if @smartcard_dev[:mode] == 'passthrough' %> <% if @smartcard_dev[:type] == 'tcp' %> <% else %> <% end %> <% end %> <% end -%> <% if @tpm_path -%> <%# TPM Device -%> <% end -%> <% if not @usbctl_dev.empty? %> <%# USB Controller -%> /> <% end %> <% unless @qargs.empty? %> <% @qargs.each do |arg| %> <% end %> <% end %> vagrant-libvirt-0.0.45/lib/vagrant-libvirt/templates/public_interface.xml.erb0000644000004100000410000000156313363570025027422 0ustar www-datawww-data trustGuestRxFilters='yes'<% end %>> <% if @mac %> <% end %> <%if @type == 'direct'%> <% elsif !@portgroup.nil? %> <% else %> <% end %> <% if @driver_name and @driver_queues %> <% elsif @driver_queues %> <% elsif @driver_name %> <% end %> <% if @ovs %> <% end %> <% if @pci_bus and @pci_slot %>
<% end %> vagrant-libvirt-0.0.45/lib/vagrant-libvirt/errors.rb0000644000004100000410000001002713363570025022471 0ustar www-datawww-datarequire 'vagrant' module VagrantPlugins module ProviderLibvirt module Errors class VagrantLibvirtError < Vagrant::Errors::VagrantError error_namespace('vagrant_libvirt.errors') end # package not supported class PackageNotSupported < VagrantLibvirtError error_key(:package_not_supported) end # Storage pools and volumes exceptions class NoStoragePool < VagrantLibvirtError error_key(:no_storage_pool) end class DomainVolumeExists < VagrantLibvirtError error_key(:domain_volume_exists) end class NoDomainVolume < VagrantLibvirtError error_key(:no_domain_volume) end class CreatingStoragePoolError < VagrantLibvirtError error_key(:creating_storage_pool_error) end class CreatingVolumeError < VagrantLibvirtError error_key(:creating_volume_error) end class ImageUploadError < VagrantLibvirtError error_key(:image_upload_error) end # Box exceptions class NoBoxVolume < VagrantLibvirtError error_key(:no_box_volume) end class NoBoxVirtualSizeSet < VagrantLibvirtError error_key(:no_box_virtual_size) end class NoBoxFormatSet < VagrantLibvirtError error_key(:no_box_format) end class WrongBoxFormatSet < VagrantLibvirtError error_key(:wrong_box_format) end # Fog libvirt exceptions class FogError < VagrantLibvirtError error_key(:fog_error) end class FogLibvirtConnectionError < VagrantLibvirtError error_key(:fog_libvirt_connection_error) end class FogCreateVolumeError < VagrantLibvirtError error_key(:fog_create_volume_error) end class FogCreateDomainVolumeError < VagrantLibvirtError error_key(:fog_create_domain_volume_error) end class FogCreateServerError < VagrantLibvirtError error_key(:fog_create_server_error) end # Network exceptions class ManagementNetworkError < VagrantLibvirtError error_key(:management_network_error) end class NetworkNameAndAddressMismatch < VagrantLibvirtError error_key(:network_name_and_address_mismatch) end class DHCPMismatch < VagrantLibvirtError error_key(:dhcp_mismatch) end class CreateNetworkError < VagrantLibvirtError error_key(:create_network_error) end class DestroyNetworkError < VagrantLibvirtError error_key(:destroy_network_error) end class NetworkNotAvailableError < VagrantLibvirtError error_key(:network_not_available_error) end class AutostartNetworkError < VagrantLibvirtError error_key(:autostart_network_error) end class ActivateNetworkError < VagrantLibvirtError error_key(:activate_network_error) end class TunnelPortNotDefined < VagrantLibvirtError error_key(:tunnel_port_not_defined) end class ManagementNetworkRequired < VagrantLibvirtError error_key(:management_network_required) end # Other exceptions class InterfaceSlotNotAvailable < VagrantLibvirtError error_key(:interface_slot_not_available) end class InterfaceSlotExhausted < VagrantLibvirtError error_key(:interface_slot_exhausted) end class RsyncError < VagrantLibvirtError error_key(:rsync_error) end class DomainNameExists < VagrantLibvirtError error_key(:domain_name_exists) end class NoDomainError < VagrantLibvirtError error_key(:no_domain_error) end class AttachDeviceError < VagrantLibvirtError error_key(:attach_device_error) end class DetachDeviceError < VagrantLibvirtError error_key(:detach_device_error) end class NoIpAddressError < VagrantLibvirtError error_key(:no_ip_address_error) end class DeleteSnapshotError < VagrantLibvirtError error_key(:delete_snapshot_error) end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/util/0000755000004100000410000000000013363570025021605 5ustar www-datawww-datavagrant-libvirt-0.0.45/lib/vagrant-libvirt/util/network_util.rb0000644000004100000410000001523213363570025024663 0ustar www-datawww-datarequire 'nokogiri' require 'vagrant/util/network_ip' module VagrantPlugins module ProviderLibvirt module Util module NetworkUtil include Vagrant::Util::NetworkIP def configured_networks(env, logger) qemu_use_session = env[:machine].provider_config.qemu_use_session management_network_device = env[:machine].provider_config.management_network_device management_network_name = env[:machine].provider_config.management_network_name management_network_address = env[:machine].provider_config.management_network_address management_network_mode = env[:machine].provider_config.management_network_mode management_network_mac = env[:machine].provider_config.management_network_mac management_network_guest_ipv6 = env[:machine].provider_config.management_network_guest_ipv6 management_network_autostart = env[:machine].provider_config.management_network_autostart management_network_pci_bus = env[:machine].provider_config.management_network_pci_bus management_network_pci_slot = env[:machine].provider_config.management_network_pci_slot logger.info "Using #{management_network_name} at #{management_network_address} as the management network #{management_network_mode} is the mode" begin management_network_ip = IPAddr.new(management_network_address) rescue ArgumentError raise Errors::ManagementNetworkError, error_message: "#{management_network_address} is not a valid IP address" end # capture address into $1 and mask into $2 management_network_ip.inspect =~ /IPv4:(.*)\/(.*)>/ if Regexp.last_match(2) == '255.255.255.255' raise Errors::ManagementNetworkError, error_message: "#{management_network_address} does not include both an address and subnet mask" end if qemu_use_session management_network_options = { iface_type: :public_network, dev: management_network_device, mode: 'bridge', type: 'bridge', bus: management_network_pci_bus, slot: management_network_pci_slot } else management_network_options = { iface_type: :private_network, network_name: management_network_name, ip: Regexp.last_match(1), netmask: Regexp.last_match(2), dhcp_enabled: true, forward_mode: management_network_mode, guest_ipv6: management_network_guest_ipv6, autostart: management_network_autostart, bus: management_network_pci_bus, slot: management_network_pci_slot } end unless management_network_mac.nil? management_network_options[:mac] = management_network_mac end unless management_network_pci_bus.nil? and management_network_pci_slot.nil? management_network_options[:bus] = management_network_pci_bus management_network_options[:slot] = management_network_pci_slot end if (env[:machine].config.vm.box && !env[:machine].provider_config.mgmt_attach) raise Errors::ManagementNetworkRequired end # add management network to list of networks to check # unless mgmt_attach set to false networks = if env[:machine].provider_config.mgmt_attach [management_network_options] else [] end env[:machine].config.vm.networks.each do |type, original_options| logger.debug "In config found network type #{type} options #{original_options}" # Options can be specified in Vagrantfile in short format (:ip => ...), # or provider format # (:libvirt__network_name => ...). # https://github.com/mitchellh/vagrant/blob/master/lib/vagrant/util/scoped_hash_override.rb options = scoped_hash_override(original_options, :libvirt) # store type in options # use default values if not already set options = { iface_type: type, netmask: '255.255.255.0', dhcp_enabled: true, forward_mode: 'nat' }.merge(options) if options[:type].to_s == 'dhcp' && options[:ip].nil? options[:network_name] = 'vagrant-private-dhcp' end # add to list of networks to check networks.push(options) end networks end # Return a list of all (active and inactive) libvirt networks as a list # of hashes with their name, network address and status (active or not) def libvirt_networks(libvirt_client) libvirt_networks = [] active = libvirt_client.list_networks inactive = libvirt_client.list_defined_networks # Iterate over all (active and inactive) networks. active.concat(inactive).each do |network_name| libvirt_network = libvirt_client.lookup_network_by_name( network_name ) # Parse ip address and netmask from the network xml description. xml = Nokogiri::XML(libvirt_network.xml_desc) ip = xml.xpath('/network/ip/@address').first ip = ip.value if ip netmask = xml.xpath('/network/ip/@netmask').first netmask = netmask.value if netmask dhcp_enabled = if xml.at_xpath('//network/ip/dhcp') true else false end domain_name = xml.at_xpath('/network/domain/@name') domain_name = domain_name.value if domain_name # Calculate network address of network from ip address and # netmask. network_address = (network_address(ip, netmask) if ip && netmask) libvirt_networks << { name: network_name, ip_address: ip, netmask: netmask, network_address: network_address, dhcp_enabled: dhcp_enabled, bridge_name: libvirt_network.bridge_name, domain_name: domain_name, created: true, active: libvirt_network.active?, autostart: libvirt_network.autostart?, libvirt_network: libvirt_network } end libvirt_networks end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/util/timer.rb0000644000004100000410000000055713363570025023261 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Util class Timer # A basic utility method that times the execution of the given # block and returns it. def self.time start_time = Time.now.to_f yield end_time = Time.now.to_f end_time - start_time end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/util/erb_template.rb0000644000004100000410000000151013363570025024572 0ustar www-datawww-datarequire 'erubis' module VagrantPlugins module ProviderLibvirt module Util module ErbTemplate # TODO: remove and use nokogiri builder # TODO: might be a chance to use vagrant template system according to https://github.com/mitchellh/vagrant/issues/3231 def to_xml(template_name = nil, data = binding) erb = template_name || self.class.to_s.split('::').last.downcase path = File.join(File.dirname(__FILE__), '..', 'templates', "#{erb}.xml.erb") template = File.read(path) # TODO: according to erubis documentation, we should rather use evaluate and forget about # binding since the template may then change variables values Erubis::Eruby.new(template, trim: true).result(data) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/util/storage_util.rb0000644000004100000410000000131013363570025024626 0ustar www-datawww-data module VagrantPlugins module ProviderLibvirt module Util module StorageUtil def storage_uid(env) env[:machine].provider_config.qemu_use_session ? Process.uid : 0 end def storage_gid(env) env[:machine].provider_config.qemu_use_session ? Process.gid : 0 end def storage_pool_path(env) if env[:machine].provider_config.storage_pool_path env[:machine].provider_config.storage_pool_path elsif env[:machine].provider_config.qemu_use_session File.expand_path('~/.local/share/libvirt/images') else '/var/lib/libvirt/images' end end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/util/error_codes.rb0000644000004100000410000001415013363570025024441 0ustar www-datawww-data# Ripped from http://libvirt.org/html/libvirt-virterror.html#virErrorNumber. module VagrantPlugins module ProviderLibvirt module Util module ErrorCodes VIR_ERR_OK = 0 VIR_ERR_INTERNAL_ERROR = 1 # internal error VIR_ERR_NO_MEMORY = 2 # memory allocation failure VIR_ERR_NO_SUPPORT = 3 # no support for this function VIR_ERR_UNKNOWN_HOST = 4 # could not resolve hostname VIR_ERR_NO_CONNECT = 5 # can't connect to hypervisor VIR_ERR_INVALID_CONN = 6 # invalid connection object VIR_ERR_INVALID_DOMAIN = 7 # invalid domain object VIR_ERR_INVALID_ARG = 8 # invalid function argument VIR_ERR_OPERATION_FAILED = 9 # a command to hypervisor failed VIR_ERR_GET_FAILED = 10 # a HTTP GET command to failed VIR_ERR_POST_FAILED = 11 # a HTTP POST command to failed VIR_ERR_HTTP_ERROR = 12 # unexpected HTTP error code VIR_ERR_SEXPR_SERIAL = 13 # failure to serialize an S-Expr VIR_ERR_NO_XEN = 14 # could not open Xen hypervisor control VIR_ERR_XEN_CALL = 15 # failure doing an hypervisor call VIR_ERR_OS_TYPE = 16 # unknown OS type VIR_ERR_NO_KERNEL = 17 # missing kernel information VIR_ERR_NO_ROOT = 18 # missing root device information VIR_ERR_NO_SOURCE = 19 # missing source device information VIR_ERR_NO_TARGET = 20 # missing target device information VIR_ERR_NO_NAME = 21 # missing domain name information VIR_ERR_NO_OS = 22 # missing domain OS information VIR_ERR_NO_DEVICE = 23 # missing domain devices information VIR_ERR_NO_XENSTORE = 24 # could not open Xen Store control VIR_ERR_DRIVER_FULL = 25 # too many drivers registered VIR_ERR_CALL_FAILED = 26 # not supported by the drivers (DEPRECATED) VIR_ERR_XML_ERROR = 27 # an XML description is not well formed or broken VIR_ERR_DOM_EXIST = 28 # the domain already exist VIR_ERR_OPERATION_DENIED = 29 # operation forbidden on read-only connections VIR_ERR_OPEN_FAILED = 30 # failed to open a conf file VIR_ERR_READ_FAILED = 31 # failed to read a conf file VIR_ERR_PARSE_FAILED = 32 # failed to parse a conf file VIR_ERR_CONF_SYNTAX = 33 # failed to parse the syntax of a conf file VIR_ERR_WRITE_FAILED = 34 # failed to write a conf file VIR_ERR_XML_DETAIL = 35 # detail of an XML error VIR_ERR_INVALID_NETWORK = 36 # invalid network object VIR_ERR_NETWORK_EXIST = 37 # the network already exist VIR_ERR_SYSTEM_ERROR = 38 # general system call failure VIR_ERR_RPC = 39 # some sort of RPC error VIR_ERR_GNUTLS_ERROR = 40 # error from a GNUTLS call VIR_WAR_NO_NETWORK = 41 # failed to start network VIR_ERR_NO_DOMAIN = 42 # domain not found or unexpectedly disappeared VIR_ERR_NO_NETWORK = 43 # network not found VIR_ERR_INVALID_MAC = 44 # invalid MAC address VIR_ERR_AUTH_FAILED = 45 # authentication failed VIR_ERR_INVALID_STORAGE_POOL = 46 # invalid storage pool object VIR_ERR_INVALID_STORAGE_VOL = 47 # invalid storage vol object VIR_WAR_NO_STORAGE = 48 # failed to start storage VIR_ERR_NO_STORAGE_POOL = 49 # storage pool not found VIR_ERR_NO_STORAGE_VOL = 50 # storage volume not found VIR_WAR_NO_NODE = 51 # failed to start node driver VIR_ERR_INVALID_NODE_DEVICE = 52 # invalid node device object VIR_ERR_NO_NODE_DEVICE = 53 # node device not found VIR_ERR_NO_SECURITY_MODEL = 54 # security model not found VIR_ERR_OPERATION_INVALID = 55 # operation is not applicable at this time VIR_WAR_NO_INTERFACE = 56 # failed to start interface driver VIR_ERR_NO_INTERFACE = 57 # interface driver not running VIR_ERR_INVALID_INTERFACE = 58 # invalid interface object VIR_ERR_MULTIPLE_INTERFACES = 59 # more than one matching interface found VIR_WAR_NO_NWFILTER = 60 # failed to start nwfilter driver VIR_ERR_INVALID_NWFILTER = 61 # invalid nwfilter object VIR_ERR_NO_NWFILTER = 62 # nw filter pool not found VIR_ERR_BUILD_FIREWALL = 63 # nw filter pool not found VIR_WAR_NO_SECRET = 64 # failed to start secret storage VIR_ERR_INVALID_SECRET = 65 # invalid secret VIR_ERR_NO_SECRET = 66 # secret not found VIR_ERR_CONFIG_UNSUPPORTED = 67 # unsupported configuration construct VIR_ERR_OPERATION_TIMEOUT = 68 # timeout occurred during operation VIR_ERR_MIGRATE_PERSIST_FAILED = 69 # a migration worked, but making the VM persist on the dest host failed VIR_ERR_HOOK_SCRIPT_FAILED = 70 # a synchronous hook script failed VIR_ERR_INVALID_DOMAIN_SNAPSHOT = 71 # invalid domain snapshot VIR_ERR_NO_DOMAIN_SNAPSHOT = 72 # domain snapshot not found VIR_ERR_INVALID_STREAM = 73 # stream pointer not valid VIR_ERR_ARGUMENT_UNSUPPORTED = 74 # valid API use but unsupported by the given driver VIR_ERR_STORAGE_PROBE_FAILED = 75 # storage pool probe failed VIR_ERR_STORAGE_POOL_BUILT = 76 # storage pool already built VIR_ERR_SNAPSHOT_REVERT_RISKY = 77 # force was not requested for a risky domain snapshot revert VIR_ERR_OPERATION_ABORTED = 78 # operation on a domain was canceled/aborted by user VIR_ERR_AUTH_CANCELLED = 79 # authentication cancelled VIR_ERR_NO_DOMAIN_METADATA = 80 # The metadata is not present VIR_ERR_MIGRATE_UNSAFE = 81 # Migration is not safe VIR_ERR_OVERFLOW = 82 # integer overflow VIR_ERR_BLOCK_COPY_ACTIVE = 83 # action prevented by block copy job VIR_ERR_OPERATION_UNSUPPORTED = 84 # The requested operation is not supported VIR_ERR_SSH = 85 # error in ssh transport driver VIR_ERR_AGENT_UNRESPONSIVE = 86 # guest agent is unresponsive, not running or not usable VIR_ERR_RESOURCE_BUSY = 87 # resource is already in use VIR_ERR_ACCESS_DENIED = 88 # operation on the object/resource was denied VIR_ERR_DBUS_SERVICE = 89 # error from a dbus service VIR_ERR_STORAGE_VOL_EXIST = 90 # the storage vol already exists end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/util/collection.rb0000644000004100000410000000100413363570025024260 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Util module Collection # This method finds a matching _thing_ in a collection of # _things_. This works matching if the ID or NAME equals to # `name`. Or, if `name` is a regexp, a partial match is chosen # as well. def self.find_matching(collection, name) collection.each do |single| return single if single.name == name end nil end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/cap/0000755000004100000410000000000013363570025021373 5ustar www-datawww-datavagrant-libvirt-0.0.45/lib/vagrant-libvirt/cap/nic_mac_addresses.rb0000644000004100000410000000075013363570025025350 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Cap class NicMacAddresses def self.nic_mac_addresses(machine) # Vagrant expects a Hash with an index starting at 1 as key # and the mac as uppercase string without colons as value nic_macs = {} machine.provider.mac_addresses.each do |index, mac| nic_macs[index + 1] = mac.upcase.delete(':') end nic_macs end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/cap/mount_p9.rb0000644000004100000410000000275013363570025023476 0ustar www-datawww-datarequire 'digest/md5' require 'vagrant/util/retryable' module VagrantPlugins module ProviderLibvirt module Cap class MountP9 extend Vagrant::Util::Retryable def self.mount_p9_shared_folder(machine, folders) folders.each do |_name, opts| # Expand the guest path so we can handle things like "~/vagrant" expanded_guest_path = machine.guest.capability( :shell_expand_guest_path, opts[:guestpath] ) # Do the actual creating and mounting machine.communicate.sudo("mkdir -p #{expanded_guest_path}") # Mount mount_tag = Digest::MD5.new.update(opts[:hostpath]).to_s[0, 31] mount_opts = '-o trans=virtio' mount_opts += ",access=#{opts[:owner]}" if opts[:owner] mount_opts += ",version=#{opts[:version]}" if opts[:version] mount_opts += ",#{opts[:mount_opts]}" if opts[:mount_opts] mount_command = "mount -t 9p #{mount_opts} '#{mount_tag}' #{expanded_guest_path}" retryable(on: Vagrant::Errors::LinuxMountFailed, tries: 5, sleep: 3) do machine.communicate.sudo('modprobe 9p') machine.communicate.sudo('modprobe 9pnet_virtio') machine.communicate.sudo(mount_command, error_class: Vagrant::Errors::LinuxMountFailed) end end end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/cap/synced_folder.rb0000644000004100000410000001040513363570025024540 0ustar www-datawww-datarequire 'log4r' require 'ostruct' require 'nokogiri' require 'digest/md5' require 'vagrant/util/subprocess' require 'vagrant/errors' require 'vagrant-libvirt/errors' # require_relative "helper" module VagrantPlugins module SyncedFolder9p class SyncedFolder < Vagrant.plugin('2', :synced_folder) include Vagrant::Util include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate def initialize(*args) super @logger = Log4r::Logger.new('vagrant_libvirt::synced_folders::9p') end def usable?(machine, _raise_error = false) # bail now if not using libvirt since checking version would throw error return false unless machine.provider_name == :libvirt # support in device attach/detach introduced in 1.2.2 # version number format is major * 1,000,000 + minor * 1,000 + release libvirt_version = machine.provider.driver.connection.client.libversion libvirt_version >= 1_002_002 end def prepare(machine, folders, _opts) raise Vagrant::Errors::Error('No libvirt connection') if machine.provider.driver.connection.nil? @conn = machine.provider.driver.connection.client begin # loop through folders folders.each do |id, folder_opts| folder_opts.merge!(target: id, accessmode: 'passthrough', mount: true, readonly: nil) { |_k, ov, _nv| ov } mount_tag = Digest::MD5.new.update(folder_opts[:hostpath]).to_s[0, 31] folder_opts[:mount_tag] = mount_tag machine.ui.info "================\nMachine id: #{machine.id}\nShould be mounting folders\n #{id}, opts: #{folder_opts}" #xml = to_xml('filesystem', folder_opts) xml = Nokogiri::XML::Builder.new do |xml| xml.filesystem(type: 'mount', accessmode: folder_opts[:accessmode]) do xml.driver(type: 'path', wrpolicy: 'immediate') xml.source(dir: folder_opts[:hostpath]) xml.target(dir: mount_tag) xml.readonly unless folder_opts[:readonly].nil? end end.to_xml( save_with: Nokogiri::XML::Node::SaveOptions::NO_DECLARATION | Nokogiri::XML::Node::SaveOptions::NO_EMPTY_TAGS | Nokogiri::XML::Node::SaveOptions::FORMAT ) # puts "<<<<< XML:\n #{xml}\n >>>>>" @conn.lookup_domain_by_uuid(machine.id).attach_device(xml, 0) end rescue => e machine.ui.error("could not attach device because: #{e}") raise VagrantPlugins::ProviderLibvirt::Errors::AttachDeviceError, error_message: e.message end end # TODO: once up, mount folders def enable(machine, folders, _opts) # Go through each folder and mount machine.ui.info('mounting p9 share in guest') # Only mount folders that have a guest path specified. mount_folders = {} folders.each do |id, opts| next unless opts[:mount] && opts[:guestpath] && !opts[:guestpath].empty? mount_folders[id] = opts.dup # merge common options if not given mount_folders[id].merge!(version: '9p2000.L') { |_k, ov, _nv| ov } end # Mount the actual folder machine.guest.capability( :mount_p9_shared_folder, mount_folders ) end def cleanup(machine, _opts) if machine.provider.driver.connection.nil? raise Vagrant::Errors::Error('No libvirt connection') end @conn = machine.provider.driver.connection.client begin if machine.id && machine.id != '' dom = @conn.lookup_domain_by_uuid(machine.id) Nokogiri::XML(dom.xml_desc).xpath( '/domain/devices/filesystem' ).each do |xml| dom.detach_device(xml.to_s) machine.ui.info 'Cleaned up shared folders' end end rescue => e machine.ui.error("could not detach device because: #{e}") raise VagrantPlugins::ProviderLibvirt::Errors::DetachDeviceError, error_message: e.message end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/0000755000004100000410000000000013363570025022105 5ustar www-datawww-datavagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/create_domain.rb0000644000004100000410000003165413363570025025235 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action class CreateDomain include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::create_domain') @app = app end def _disk_name(name, disk) "#{name}-#{disk[:device]}.#{disk[:type]}" # disk name end def _disks_print(disks) disks.collect do |x| "#{x[:device]}(#{x[:type]},#{x[:size]})" end.join(', ') end def _cdroms_print(cdroms) cdroms.collect { |x| x[:dev] }.join(', ') end def call(env) # Get config. config = env[:machine].provider_config # Gather some info about domain @name = env[:domain_name] @uuid = config.uuid @cpus = config.cpus.to_i @cpu_features = config.cpu_features @cpu_topology = config.cpu_topology @features = config.features @features_hyperv = config.features_hyperv @cpu_mode = config.cpu_mode @cpu_model = config.cpu_model @cpu_fallback = config.cpu_fallback @numa_nodes = config.numa_nodes @loader = config.loader @nvram = config.nvram @machine_type = config.machine_type @machine_arch = config.machine_arch @disk_bus = config.disk_bus @disk_device = config.disk_device @nested = config.nested @memory_size = config.memory.to_i * 1024 @memory_backing = config.memory_backing @management_network_mac = config.management_network_mac @domain_volume_cache = config.volume_cache @kernel = config.kernel @cmd_line = config.cmd_line @emulator_path = config.emulator_path @initrd = config.initrd @dtb = config.dtb @graphics_type = config.graphics_type @graphics_autoport = config.graphics_autoport @graphics_port = config.graphics_port @graphics_ip = config.graphics_ip @graphics_passwd = if config.graphics_passwd.to_s.empty? '' else "passwd='#{config.graphics_passwd}'" end @video_type = config.video_type @sound_type = config.sound_type @video_vram = config.video_vram @keymap = config.keymap @kvm_hidden = config.kvm_hidden @tpm_model = config.tpm_model @tpm_type = config.tpm_type @tpm_path = config.tpm_path # Boot order @boot_order = config.boot_order # Storage @storage_pool_name = config.storage_pool_name @disks = config.disks @cdroms = config.cdroms # Input @inputs = config.inputs # Channels @channels = config.channels # PCI device passthrough @pcis = config.pcis # Watchdog device @watchdog_dev = config.watchdog_dev # USB controller @usbctl_dev = config.usbctl_dev # USB device passthrough @usbs = config.usbs # Redirected devices @redirdevs = config.redirdevs @redirfilters = config.redirfilters # smartcard device @smartcard_dev = config.smartcard_dev # RNG device passthrough @rng = config.rng config = env[:machine].provider_config @domain_type = config.driver @os_type = 'hvm' # Get path to domain image from the storage pool selected if we have a box. if env[:machine].config.vm.box actual_volumes = env[:machine].provider.driver.connection.volumes.all.select do |x| x.pool_name == @storage_pool_name end domain_volume = ProviderLibvirt::Util::Collection.find_matching( actual_volumes, "#{@name}.img" ) raise Errors::DomainVolumeExists if domain_volume.nil? @domain_volume_path = domain_volume.path end # If we have a box, take the path from the domain volume and set our storage_prefix. # If not, we dump the storage pool xml to get its defined path. # the default storage prefix is typically: /var/lib/libvirt/images/ if !config.qemu_use_session if env[:machine].config.vm.box storage_prefix = File.dirname(@domain_volume_path) + '/' # steal else storage_pool = env[:machine].provider.driver.connection.client.lookup_storage_pool_by_name(@storage_pool_name) raise Errors::NoStoragePool if storage_pool.nil? xml = Nokogiri::XML(storage_pool.xml_desc) storage_prefix = xml.xpath('/pool/target/path').inner_text.to_s + '/' end end @disks.each do |disk| disk[:path] ||= _disk_name(@name, disk) # On volume creation, the element inside # is oddly ignored; instead the path is taken from the # element: # http://www.redhat.com/archives/libvir-list/2008-August/msg00329.html disk[:name] = disk[:path] disk[:absolute_path] = storage_prefix + disk[:path] if env[:machine].provider.driver.connection.volumes.select do |x| x.name == disk[:name] && x.pool_name == @storage_pool_name end.empty? # make the disk. equivalent to: # qemu-img create -f qcow2 5g begin env[:machine].provider.driver.connection.volumes.create( name: disk[:name], format_type: disk[:type], path: disk[:absolute_path], capacity: disk[:size], #:allocation => ?, pool_name: @storage_pool_name ) rescue Fog::Errors::Error => e raise Errors::FogDomainVolumeCreateError, error_message: e.message end else disk[:preexisting] = true end end # Output the settings we're going to use to the user env[:ui].info(I18n.t('vagrant_libvirt.creating_domain')) env[:ui].info(" -- Name: #{@name}") env[:ui].info(" -- Forced UUID: #{@uuid}") if @uuid != '' env[:ui].info(" -- Domain type: #{@domain_type}") env[:ui].info(" -- Cpus: #{@cpus}") if not @cpu_topology.empty? env[:ui].info(" -- CPU topology: sockets=#{@cpu_topology[:sockets]}, cores=#{@cpu_topology[:cores]}, threads=#{@cpu_topology[:threads]}") end @cpu_features.each do |cpu_feature| env[:ui].info(" -- CPU Feature: name=#{cpu_feature[:name]}, policy=#{cpu_feature[:policy]}") end @features.each do |feature| env[:ui].info(" -- Feature: #{feature}") end @features_hyperv.each do |feature| env[:ui].info(" -- Feature (HyperV): name=#{feature[:name]}, state=#{feature[:state]}") end env[:ui].info(" -- Memory: #{@memory_size / 1024}M") @memory_backing.each do |backing| env[:ui].info(" -- Memory Backing: #{backing[:name]}: #{backing[:config].map { |k,v| "#{k}='#{v}'"}.join(' ')}") end env[:ui].info(" -- Management MAC: #{@management_network_mac}") env[:ui].info(" -- Loader: #{@loader}") env[:ui].info(" -- Nvram: #{@nvram}") if env[:machine].config.vm.box env[:ui].info(" -- Base box: #{env[:machine].box.name}") end env[:ui].info(" -- Storage pool: #{@storage_pool_name}") env[:ui].info(" -- Image: #{@domain_volume_path} (#{env[:box_virtual_size]}G)") env[:ui].info(" -- Volume Cache: #{@domain_volume_cache}") env[:ui].info(" -- Kernel: #{@kernel}") env[:ui].info(" -- Initrd: #{@initrd}") env[:ui].info(" -- Graphics Type: #{@graphics_type}") env[:ui].info(" -- Graphics Port: #{@graphics_port}") env[:ui].info(" -- Graphics IP: #{@graphics_ip}") env[:ui].info(" -- Graphics Password: #{@graphics_passwd.empty? ? 'Not defined' : 'Defined'}") env[:ui].info(" -- Video Type: #{@video_type}") env[:ui].info(" -- Video VRAM: #{@video_vram}") env[:ui].info(" -- Sound Type: #{@sound_type}") env[:ui].info(" -- Keymap: #{@keymap}") env[:ui].info(" -- TPM Path: #{@tpm_path}") @boot_order.each do |device| env[:ui].info(" -- Boot device: #{device}") end unless @disks.empty? env[:ui].info(" -- Disks: #{_disks_print(@disks)}") end @disks.each do |disk| msg = " -- Disk(#{disk[:device]}): #{disk[:absolute_path]}" msg += ' Shared' if disk[:shareable] msg += ' (Remove only manually)' if disk[:allow_existing] msg += ' Not created - using existed.' if disk[:preexisting] env[:ui].info(msg) end unless @cdroms.empty? env[:ui].info(" -- CDROMS: #{_cdroms_print(@cdroms)}") end @cdroms.each do |cdrom| env[:ui].info(" -- CDROM(#{cdrom[:dev]}): #{cdrom[:path]}") end @inputs.each do |input| env[:ui].info(" -- INPUT: type=#{input[:type]}, bus=#{input[:bus]}") end @channels.each do |channel| env[:ui].info(" -- CHANNEL: type=#{channel[:type]}, mode=#{channel[:source_mode]}") env[:ui].info(" -- CHANNEL: target_type=#{channel[:target_type]}, target_name=#{channel[:target_name]}") end @pcis.each do |pci| env[:ui].info(" -- PCI passthrough: #{pci[:bus]}:#{pci[:slot]}.#{pci[:function]}") end unless @rng[:model].nil? env[:ui].info(" -- RNG device model: #{@rng[:model]}") end if not @watchdog_dev.empty? env[:ui].info(" -- Watchdog device: model=#{@watchdog_dev[:model]}, action=#{@watchdog_dev[:action]}") end if not @usbctl_dev.empty? msg = " -- USB controller: model=#{@usbctl_dev[:model]}" msg += ", ports=#{@usbctl_dev[:ports]}" if @usbctl_dev[:ports] env[:ui].info(msg) end @usbs.each do |usb| usb_dev = [] usb_dev.push("bus=#{usb[:bus]}") if usb[:bus] usb_dev.push("device=#{usb[:device]}") if usb[:device] usb_dev.push("vendor=#{usb[:vendor]}") if usb[:vendor] usb_dev.push("product=#{usb[:product]}") if usb[:product] env[:ui].info(" -- USB passthrough: #{usb_dev.join(', ')}") end unless @redirdevs.empty? env[:ui].info(' -- Redirected Devices: ') @redirdevs.each do |redirdev| msg = " -> bus=usb, type=#{redirdev[:type]}" env[:ui].info(msg) end end unless @redirfilters.empty? env[:ui].info(' -- USB Device filter for Redirected Devices: ') @redirfilters.each do |redirfilter| msg = " -> class=#{redirfilter[:class]}, " msg += "vendor=#{redirfilter[:vendor]}, " msg += "product=#{redirfilter[:product]}, " msg += "version=#{redirfilter[:version]}, " msg += "allow=#{redirfilter[:allow]}" env[:ui].info(msg) end end if not @smartcard_dev.empty? env[:ui].info(" -- smartcard device: mode=#{@smartcard_dev[:mode]}, type=#{@smartcard_dev[:type]}") end @qargs = config.qemu_args if not @qargs.empty? env[:ui].info(' -- Command line args: ') @qargs.each do |arg| msg = " -> value=#{arg[:value]}, " env[:ui].info(msg) end end env[:ui].info(" -- Command line : #{@cmd_line}") unless @cmd_line.empty? # Create libvirt domain. # Is there a way to tell fog to create new domain with already # existing volume? Use domain creation from template.. begin server = env[:machine].provider.driver.connection.servers.create( xml: to_xml('domain') ) rescue Fog::Errors::Error => e raise Errors::FogCreateServerError, error_message: e.message end # Immediately save the ID since it is created at this point. env[:machine].id = server.id @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/start_domain.rb0000644000004100000410000003214613363570025025124 0ustar www-datawww-datarequire 'log4r' require 'rexml/document' module VagrantPlugins module ProviderLibvirt module Action # Just start the domain. class StartDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::start_domain') @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.starting_domain')) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? config = env[:machine].provider_config begin # update domain settings on change. libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(env[:machine].id) # libvirt API doesn't support modifying memory on NUMA enabled CPUs # http://libvirt.org/git/?p=libvirt.git;a=commit;h=d174394105cf00ed266bf729ddf461c21637c736 if config.numa_nodes == nil if config.memory.to_i * 1024 != libvirt_domain.max_memory libvirt_domain.max_memory = config.memory.to_i * 1024 libvirt_domain.memory = libvirt_domain.max_memory end end begin # XML definition manipulation descr = libvirt_domain.xml_desc(1) xml_descr = REXML::Document.new descr descr_changed = false # additional disk bus config.disks.each do |disk| device = disk[:device] bus = disk[:bus] REXML::XPath.each(xml_descr, '/domain/devices/disk[@device="disk"]/target[@dev="' + device + '"]') do |disk_target| next unless disk_target.attributes['bus'] != bus descr_changed = true disk_target.attributes['bus'] = bus disk_target.parent.delete_element("#{disk_target.parent.xpath}/address") end end # disk_bus REXML::XPath.each(xml_descr, '/domain/devices/disk[@device="disk"]/target[@dev="vda"]') do |disk_target| next unless disk_target.attributes['bus'] != config.disk_bus descr_changed = true disk_target.attributes['bus'] = config.disk_bus disk_target.parent.delete_element("#{disk_target.parent.xpath}/address") end # Iterface type unless config.nic_model_type.nil? REXML::XPath.each(xml_descr, '/domain/devices/interface/model') do |iface_model| if iface_model.attributes['type'] != config.nic_model_type descr_changed = true iface_model.attributes['type'] = config.nic_model_type end end end # vCpu count if config.cpus.to_i != libvirt_domain.vcpus.length descr_changed = true REXML::XPath.first(xml_descr, '/domain/vcpu').text = config.cpus end # cpu_mode cpu = REXML::XPath.first(xml_descr, '/domain/cpu') if cpu.nil? descr_changed = true cpu = REXML::Element.new('cpu', REXML::XPath.first(xml_descr, '/domain')) cpu.attributes['mode'] = config.cpu_mode else if cpu.attributes['mode'] != config.cpu_mode descr_changed = true cpu.attributes['mode'] = config.cpu_mode end end if config.cpu_mode != 'host-passthrough' cpu_model = REXML::XPath.first(xml_descr, '/domain/cpu/model') if cpu_model.nil? descr_changed = true cpu_model = REXML::Element.new('model', REXML::XPath.first(xml_descr, '/domain/cpu')) cpu_model.attributes['fallback'] = 'allow' cpu_model.text = config.cpu_model else if cpu_model.text != config.cpu_model descr_changed = true cpu_model.text = config.cpu_model end if cpu_model.attributes['fallback'] != config.cpu_fallback descr_changed = true cpu_model.attributes['fallback'] = config.cpu_fallback end end vmx_feature = REXML::XPath.first(xml_descr, '/domain/cpu/feature[@name="vmx"]') svm_feature = REXML::XPath.first(xml_descr, '/domain/cpu/feature[@name="svm"]') if config.nested if vmx_feature.nil? descr_changed = true vmx_feature = REXML::Element.new('feature', REXML::XPath.first(xml_descr, '/domain/cpu')) vmx_feature.attributes['policy'] = 'optional' vmx_feature.attributes['name'] = 'vmx' end if svm_feature.nil? descr_changed = true svm_feature = REXML::Element.new('feature', REXML::XPath.first(xml_descr, '/domain/cpu')) svm_feature.attributes['policy'] = 'optional' svm_feature.attributes['name'] = 'svm' end else unless vmx_feature.nil? descr_changed = true cpu.delete_element(vmx_feature) end unless svm_feature.nil? descr_changed = true cpu.delete_element(svm_feature) end end elsif config.numa_nodes == nil unless cpu.elements.to_a.empty? descr_changed = true cpu.elements.each do |elem| cpu.delete_element(elem) end end end # Graphics graphics = REXML::XPath.first(xml_descr, '/domain/devices/graphics') if config.graphics_type != 'none' if graphics.nil? descr_changed = true graphics = REXML::Element.new('graphics', REXML::XPath.first(xml_descr, '/domain/devices')) end if graphics.attributes['type'] != config.graphics_type descr_changed = true graphics.attributes['type'] = config.graphics_type end if graphics.attributes['listen'] != config.graphics_ip descr_changed = true graphics.attributes['listen'] = config.graphics_ip graphics.delete_element('//listen') end if graphics.attributes['autoport'] != config.graphics_autoport descr_changed = true graphics.attributes['autoport'] = config.graphics_autoport if config.graphics_autoport == 'no' graphics.attributes['port'] = config.graphics_port end end if graphics.attributes['keymap'] != config.keymap descr_changed = true graphics.attributes['keymap'] = config.keymap end if graphics.attributes['passwd'] != config.graphics_passwd descr_changed = true if config.graphics_passwd.nil? graphics.attributes.delete 'passwd' else graphics.attributes['passwd'] = config.graphics_passwd end end else # graphics_type = none, remove entire element graphics.parent.delete_element(graphics) unless graphics.nil? end # TPM if config.tpm_path raise Errors::FogCreateServerError, 'The TPM Path must be fully qualified' unless config.tpm_path[0].chr == '/' tpm = REXML::XPath.first(xml_descr, '/domain/devices/tpm') if tpm.nil? descr_changed = true tpm = REXML::Element.new('tpm', REXML::XPath.first(xml_descr, '/domain/devices/tpm/model')) tpm.attributes['model'] = config.tpm_model tpm_backend_type = tpm.add_element('backend') tpm_backend_type.attributes['type'] = config.tpm_type tpm_device_path = tpm_backend_type.add_element('device') tpm_device_path.attributes['path'] = config.tpm_path else if tpm.attributes['model'] != config.tpm_model descr_changed = true tpm.attributes['model'] = config.tpm_model end if tpm.elements['backend'].attributes['type'] != config.tpm_type descr_changed = true tpm.elements['backend'].attributes['type'] = config.tpm_type end if tpm.elements['backend'].elements['device'].attributes['path'] != config.tpm_path descr_changed = true tpm.elements['backend'].elements['device'].attributes['path'] = config.tpm_path end end end # Video device video = REXML::XPath.first(xml_descr, '/domain/devices/video') if !video.nil? && (config.graphics_type == 'none') # graphics_type = none, video devices are removed since there is no possible output descr_changed = true video.parent.delete_element(video) else video_model = REXML::XPath.first(xml_descr, '/domain/devices/video/model') if video_model.nil? video_model = REXML::Element.new('model', REXML::XPath.first(xml_descr, '/domain/devices/video')) video_model.attributes['type'] = config.video_type video_model.attributes['vram'] = config.video_vram else if video_model.attributes['type'] != config.video_type || video_model.attributes['vram'] != config.video_vram descr_changed = true video_model.attributes['type'] = config.video_type video_model.attributes['vram'] = config.video_vram end end end # Sound device if config.sound_type sound = REXML::XPath.first(xml_descr,'/domain/devices/sound/model') end # dtb if config.dtb dtb = REXML::XPath.first(xml_descr, '/domain/os/dtb') if dtb.nil? descr_changed = true dtb = REXML::Element.new('dtb', REXML::XPath.first(xml_descr, '/domain/os')) dtb.text = config.dtb else if dtb.text != config.dtb descr_changed = true dtb.text = config.dtb end end end # kernel and initrd if config.kernel kernel = REXML::XPath.first(xml_descr, '/domain/os/kernel') if kernel.nil? descr_changed = true kernel = REXML::Element.new('kernel', REXML::XPath.first(xml_descr, '/domain/os')) kernel.text = config.kernel else if kernel.text != config.kernel descr_changed = true kernel.text = config.kernel end end end if config.initrd initrd = REXML::XPath.first(xml_descr, '/domain/os/initrd') if initrd.nil? descr_changed = true initrd = REXML::Element.new('initrd', REXML::XPath.first(xml_descr, '/domain/os')) initrd.text = config.initrd else if initrd.text != config.initrd descr_changed = true initrd.text = config.initrd end end end # Apply if descr_changed begin libvirt_domain.undefine new_descr = '' xml_descr.write new_descr server = env[:machine].provider.driver.connection.servers.create(xml: new_descr) rescue Fog::Errors::Error => e server = env[:machine].provider.driver.connection.servers.create(xml: descr) raise Errors::FogCreateServerError, error_message: e.message end end rescue => e env[:ui].error("Error when updating domain settings: #{e.message}") end # Autostart with host if enabled in Vagrantfile libvirt_domain.autostart = config.autostart # Actually start the domain domain.start rescue => e raise Errors::FogError, message: e.message end @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/forward_ports.rb0000644000004100000410000001533113363570025025330 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action # Adds support for vagrant's `forward_ports` configuration directive. class ForwardPorts @@lock = Mutex.new def initialize(app, _env) @app = app @logger = Log4r::Logger.new('vagrant_libvirt::action::forward_ports') end def call(env) @env = env # Get the ports we're forwarding env[:forwarded_ports] = compile_forwarded_ports(env[:machine].config) # Warn if we're port forwarding to any privileged ports env[:forwarded_ports].each do |fp| next unless fp[:host] <= 1024 env[:ui].warn I18n.t( 'vagrant.actions.vm.forward_ports.privileged_ports' ) break end # Continue, we need the VM to be booted in order to grab its IP @app.call env if @env[:forwarded_ports].any? env[:ui].info I18n.t('vagrant.actions.vm.forward_ports.forwarding') forward_ports end end def forward_ports @env[:forwarded_ports].each do |fp| message_attributes = { adapter: fp[:adapter] || 'eth0', guest_port: fp[:guest], host_port: fp[:host] } @env[:ui].info(I18n.t( 'vagrant.actions.vm.forward_ports.forwarding_entry', message_attributes )) if fp[:protocol] == 'udp' @env[:ui].warn I18n.t('vagrant_libvirt.warnings.forwarding_udp') next end ssh_pid = redirect_port( @env[:machine], fp[:host_ip] || 'localhost', fp[:host], fp[:guest_ip] || @env[:machine].provider.ssh_info[:host], fp[:guest], fp[:gateway_ports] || false ) store_ssh_pid(fp[:host], ssh_pid) end end private def compile_forwarded_ports(config) mappings = {} config.vm.networks.each do |type, options| next if options[:disabled] next unless type == :forwarded_port && options[:id] != 'ssh' if options.fetch(:host_ip, '').to_s.strip.empty? options.delete(:host_ip) end mappings[options[:host]] = options end mappings.values end def redirect_port(machine, host_ip, host_port, guest_ip, guest_port, gateway_ports) ssh_info = machine.ssh_info params = %W( -L #{host_ip}:#{host_port}:#{guest_ip}:#{guest_port} -N #{ssh_info[:host]} ).join(' ') params += ' -g' if gateway_ports options = (%W( User=#{ssh_info[:username]} Port=#{ssh_info[:port]} UserKnownHostsFile=/dev/null StrictHostKeyChecking=no PasswordAuthentication=no ForwardX11=#{ssh_info[:forward_x11] ? 'yes' : 'no'} IdentitiesOnly=#{ssh_info[:keys_only] ? 'yes' : 'no'} ) + ssh_info[:private_key_path].map do |pk| "IdentityFile='\"#{pk}\"'" end).map { |s| s.prepend('-o ') }.join(' ') options += " -o ProxyCommand=\"#{ssh_info[:proxy_command]}\"" if machine.provider_config.connect_via_ssh # TODO: instead of this, try and lock and get the stdin from spawn... ssh_cmd = 'exec ' if host_port <= 1024 @@lock.synchronize do # TODO: add i18n @env[:ui].info 'Requesting sudo for host port(s) <= 1024' r = system('sudo -v') if r ssh_cmd << 'sudo ' # add sudo prefix end end end ssh_cmd << "ssh #{options} #{params}" @logger.debug "Forwarding port with `#{ssh_cmd}`" log_file = ssh_forward_log_file(host_ip, host_port, guest_ip, guest_port) @logger.info "Logging to #{log_file}" spawn(ssh_cmd, [:out, :err] => [log_file, 'w']) end def ssh_forward_log_file(host_ip, host_port, guest_ip, guest_port) log_dir = @env[:machine].data_dir.join('logs') log_dir.mkdir unless log_dir.directory? File.join( log_dir, 'ssh-forwarding-%s_%s-%s_%s.log' % [host_ip, host_port, guest_ip, guest_port] ) end def store_ssh_pid(host_port, ssh_pid) data_dir = @env[:machine].data_dir.join('pids') data_dir.mkdir unless data_dir.directory? data_dir.join("ssh_#{host_port}.pid").open('w') do |pid_file| pid_file.write(ssh_pid) end end end end end end module VagrantPlugins module ProviderLibvirt module Action # Cleans up ssh-forwarded ports on VM halt/destroy. class ClearForwardedPorts @@lock = Mutex.new def initialize(app, _env) @app = app @logger = Log4r::Logger.new( 'vagrant_libvirt::action::clear_forward_ports' ) end def call(env) @env = env if ssh_pids.any? env[:ui].info I18n.t( 'vagrant.actions.vm.clear_forward_ports.deleting' ) ssh_pids.each do |tag| next unless ssh_pid?(tag[:pid]) @logger.debug "Killing pid #{tag[:pid]}" kill_cmd = '' if tag[:port] <= 1024 kill_cmd << 'sudo ' # add sudo prefix end kill_cmd << "kill #{tag[:pid]}" @@lock.synchronize do system(kill_cmd) end end @logger.info 'Removing ssh pid files' remove_ssh_pids else @logger.info 'No ssh pids found' end @app.call env end protected def ssh_pids glob = @env[:machine].data_dir.join('pids').to_s + '/ssh_*.pid' @ssh_pids = Dir[glob].map do |file| { pid: File.read(file).strip.chomp, port: File.basename(file)['ssh_'.length..-1 * ('.pid'.length + 1)].to_i } end end def ssh_pid?(pid) @logger.debug 'Checking if #{pid} is an ssh process '\ 'with `ps -o cmd= #{pid}`' `ps -o cmd= #{pid}`.strip.chomp =~ /ssh/ end def remove_ssh_pids glob = @env[:machine].data_dir.join('pids').to_s + '/ssh_*.pid' Dir[glob].each do |file| File.delete file end end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/destroy_domain.rb0000644000004100000410000000614413363570025025457 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action class DestroyDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::destroy_domain') @app = app end def call(env) # Destroy the server, remove the tracking ID env[:ui].info(I18n.t('vagrant_libvirt.destroy_domain')) # Must delete any snapshots before domain can be destroyed # Fog libvirt currently doesn't support snapshots. Use # ruby-libvirt client directly. Note this is racy, see # http://www.libvirt.org/html/libvirt-libvirt.html#virDomainSnapshotListNames libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid( env[:machine].id ) begin libvirt_domain.list_snapshots.each do |name| @logger.info("Deleting snapshot '#{name}'") begin libvirt_domain.lookup_snapshot_by_name(name).delete rescue => e raise Errors::DeleteSnapshotError, error_message: e.message end end rescue # Some drivers (xen) don't support getting list of snapshots, # not much can be done here about it @logger.warn("Failed to get list of snapshots") end # must remove managed saves libvirt_domain.managed_save_remove if libvirt_domain.has_managed_save? domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) if env[:machine].provider_config.disks.empty? && env[:machine].provider_config.cdroms.empty? # if using default configuration of disks and cdroms # cdroms are consider volumes, but cannot be destroyed domain.destroy(destroy_volumes: true) else domain.destroy(destroy_volumes: false) env[:machine].provider_config.disks.each do |disk| # shared disks remove only manually or ??? next if disk[:allow_existing] diskname = libvirt_domain.name + '-' + disk[:device] + '.' + disk[:type].to_s # diskname is unique libvirt_disk = domain.volumes.select do |x| x.name == diskname end.first if libvirt_disk libvirt_disk.destroy elsif disk[:path] poolname = env[:machine].provider_config.storage_pool_name libvirt_disk = domain.volumes.select do |x| # FIXME: can remove pool/target.img and pool/123/target.img x.path =~ /\/#{disk[:path]}$/ && x.pool_name == poolname end.first libvirt_disk.destroy if libvirt_disk end end # remove root storage root_disk = domain.volumes.select do |x| x.name == libvirt_domain.name + '.img' end.first root_disk.destroy if root_disk end @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/is_suspended.rb0000644000004100000410000000267013363570025025124 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action # This can be used with "Call" built-in to check if the machine # is suspended and branch in the middleware. class IsSuspended def initialize(app, _env) @app = app end def call(env) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? config = env[:machine].provider_config libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(env[:machine].id) if config.suspend_mode == 'managedsave' if libvirt_domain.has_managed_save? env[:result] = libvirt_domain.has_managed_save? else env[:result] = domain.state.to_s == 'paused' if env[:result] env[:ui].warn('One time switching to pause suspend mode, found a paused VM.') config.suspend_mode = 'pause' end end else if libvirt_domain.has_managed_save? env[:ui].warn('One time switching to managedsave suspend mode, state found.') env[:result] = true config.suspend_mode = 'managedsave' else env[:result] = domain.state.to_s == 'paused' end end @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/handle_box_image.rb0000644000004100000410000001651613363570025025710 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action class HandleBoxImage include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::StorageUtil @@lock = Mutex.new def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::handle_box_image') @app = app end def call(env) # Verify box metadata for mandatory values. # # Virtual size has to be set for allocating space in storage pool. box_virtual_size = env[:machine].box.metadata['virtual_size'] raise Errors::NoBoxVirtualSizeSet if box_virtual_size.nil? # Support qcow2 format only for now, but other formats with backing # store capability should be usable. box_format = env[:machine].box.metadata['format'] if box_format.nil? raise Errors::NoBoxFormatSet elsif box_format != 'qcow2' raise Errors::WrongBoxFormatSet end # Get config options config = env[:machine].provider_config box_image_file = env[:machine].box.directory.join('box.img').to_s env[:box_volume_name] = env[:machine].box.name.to_s.dup.gsub('/', '-VAGRANTSLASH-') env[:box_volume_name] << "_vagrant_box_image_#{ begin env[:machine].box.version.to_s rescue '' end}.img" # Override box_virtual_size if config.machine_virtual_size if config.machine_virtual_size < box_virtual_size # Warn that a virtual size less than the box metadata size # is not supported and will be ignored env[:ui].warn I18n.t( 'vagrant_libvirt.warnings.ignoring_virtual_size_too_small', requested: config.machine_virtual_size, minimum: box_virtual_size ) else env[:ui].info I18n.t('vagrant_libvirt.manual_resize_required') box_virtual_size = config.machine_virtual_size end end # save for use by later actions env[:box_virtual_size] = box_virtual_size # while inside the synchronize block take care not to call the next # action in the chain, as must exit this block first to prevent # locking all subsequent actions as well. @@lock.synchronize do # Don't continue if image already exists in storage pool. break if ProviderLibvirt::Util::Collection.find_matching( env[:machine].provider.driver.connection.volumes.all, env[:box_volume_name] ) # Box is not available as a storage pool volume. Create and upload # it as a copy of local box image. env[:ui].info(I18n.t('vagrant_libvirt.uploading_volume')) # Create new volume in storage pool unless File.exist?(box_image_file) raise Vagrant::Errors::BoxNotFound, name: env[:machine].box.name end box_image_size = File.size(box_image_file) # B message = "Creating volume #{env[:box_volume_name]}" message << " in storage pool #{config.storage_pool_name}." @logger.info(message) if config.qemu_use_session begin @name = env[:box_volume_name] @allocation = "#{box_image_size / 1024 / 1024}M" @capacity = "#{box_virtual_size}G" @format_type = box_format ? box_format : 'raw' @storage_volume_uid = storage_uid env @storage_volume_gid = storage_gid env libvirt_client = env[:machine].provider.driver.connection.client libvirt_pool = libvirt_client.lookup_storage_pool_by_name( config.storage_pool_name ) libvirt_volume = libvirt_pool.create_volume_xml( to_xml('default_storage_volume') ) rescue => e raise Errors::CreatingVolumeError, error_message: e.message end else begin fog_volume = env[:machine].provider.driver.connection.volumes.create( name: env[:box_volume_name], allocation: "#{box_image_size / 1024 / 1024}M", capacity: "#{box_virtual_size}G", format_type: box_format, pool_name: config.storage_pool_name ) rescue Fog::Errors::Error => e raise Errors::FogCreateVolumeError, error_message: e.message end end # Upload box image to storage pool ret = upload_image(box_image_file, config.storage_pool_name, env[:box_volume_name], env) do |progress| env[:ui].clear_line env[:ui].report_progress(progress, box_image_size, false) end # Clear the line one last time since the progress meter doesn't # disappear immediately. env[:ui].clear_line # If upload failed or was interrupted, remove created volume from # storage pool. if env[:interrupted] || !ret begin if config.qemu_use_session libvirt_volume.delete else fog_volume.destroy end rescue nil end end end @app.call(env) end def split_size_unit(text) if text.kind_of? Integer # if text is an integer, match will fail size = text unit = 'G' else matcher = text.match(/(\d+)(.+)/) size = matcher[1] unit = matcher[2] end [size, unit] end protected # Fog libvirt currently doesn't support uploading images to storage # pool volumes. Use ruby-libvirt client instead. def upload_image(image_file, pool_name, volume_name, env) image_size = File.size(image_file) # B begin pool = env[:machine].provider.driver.connection.client.lookup_storage_pool_by_name( pool_name ) volume = pool.lookup_volume_by_name(volume_name) stream = env[:machine].provider.driver.connection.client.stream volume.upload(stream, offset = 0, length = image_size) # Exception ProviderLibvirt::RetrieveError can be raised if buffer is # longer than length accepted by API send function. # # TODO: How to find out if buffer is too large and what is the # length that send function will accept? buf_size = 1024 * 250 # 250K progress = 0 open(image_file, 'rb') do |io| while (buff = io.read(buf_size)) sent = stream.send buff progress += sent yield progress end end rescue => e raise Errors::ImageUploadError, error_message: e.message end progress == image_size end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/prepare_nfs_valid_ids.rb0000644000004100000410000000063413363570025026757 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action class PrepareNFSValidIds def initialize(app, _env) @app = app @logger = Log4r::Logger.new('vagrant::action::vm::nfs') end def call(env) env[:nfs_valid_ids] = env[:machine].provider.driver.connection.servers.all.map(&:id) @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/halt_domain.rb0000644000004100000410000000176513363570025024722 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Halt the domain. class HaltDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::halt_domain') @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.halt_domain')) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? begin env[:machine].guest.capability(:halt) rescue @logger.info('Trying libvirt graceful shutdown.') domain.shutdown end begin domain.wait_for(30) do !ready? end rescue Fog::Errors::TimeoutError @logger.info('VM is still running. Calling force poweroff.') domain.poweroff end @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/remove_libvirt_image.rb0000644000004100000410000000112113363570025026617 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action class RemoveLibvirtImage def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::remove_libvirt_image') @app = app end def call(env) env[:ui].info('Vagrant-libvirt plugin removed box only from you LOCAL ~/.vagrant/boxes directory') env[:ui].info('From libvirt storage pool you have to delete image manually(virsh, virt-manager or by any other tool)') @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/create_network_interfaces.rb0000644000004100000410000003116613363570025027660 0ustar www-datawww-datarequire 'log4r' require 'vagrant/util/network_ip' require 'vagrant/util/scoped_hash_override' module VagrantPlugins module ProviderLibvirt module Action # Create network interfaces for domain, before domain is running. # Networks for connecting those interfaces should be already prepared. class CreateNetworkInterfaces include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::NetworkUtil include Vagrant::Util::NetworkIP include Vagrant::Util::ScopedHashOverride def initialize(app, env) @logger = Log4r::Logger.new('vagrant_libvirt::action::create_network_interfaces') @management_network_name = env[:machine].provider_config.management_network_name config = env[:machine].provider_config @nic_model_type = config.nic_model_type || 'virtio' @nic_adapter_count = config.nic_adapter_count @app = app end def call(env) # Get domain first. begin domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid( env[:machine].id.to_s ) rescue => e raise Errors::NoDomainError, error_message: e.message end # Setup list of interfaces before creating them. adapters = [] # Vagrant gives you adapter 0 by default # Assign interfaces to slots. configured_networks(env, @logger).each do |options| # dont need to create interface for this type next if options[:iface_type] == :forwarded_port # TODO: fill first ifaces with adapter option specified. if options[:adapter] if adapters[options[:adapter]] raise Errors::InterfaceSlotNotAvailable end free_slot = options[:adapter].to_i @logger.debug "Using specified adapter slot #{free_slot}" else free_slot = find_empty(adapters) @logger.debug "Adapter not specified so found slot #{free_slot}" raise Errors::InterfaceSlotExhausted if free_slot.nil? end # We have slot for interface, fill it with interface configuration. adapters[free_slot] = options adapters[free_slot][:network_name] = interface_network( env[:machine].provider.driver.connection.client, adapters[free_slot] ) end # Create each interface as new domain device. @macs_per_network = Hash.new(0) adapters.each_with_index do |iface_configuration, slot_number| @iface_number = slot_number @network_name = iface_configuration[:network_name] @source_options = { network: @network_name } @mac = iface_configuration.fetch(:mac, false) @model_type = iface_configuration.fetch(:model_type, @nic_model_type) @driver_name = iface_configuration.fetch(:driver_name, false) @driver_queues = iface_configuration.fetch(:driver_queues, false) @device_name = iface_configuration.fetch(:iface_name, false) @mtu = iface_configuration.fetch(:mtu, nil) @pci_bus = iface_configuration.fetch(:bus, nil) @pci_slot = iface_configuration.fetch(:slot, nil) template_name = 'interface' @type = nil @udp_tunnel = nil # Configuration for public interfaces which use the macvtap driver if iface_configuration[:iface_type] == :public_network @device = iface_configuration.fetch(:dev, 'eth0') @mode = iface_configuration.fetch(:mode, 'bridge') @type = iface_configuration.fetch(:type, 'direct') @model_type = iface_configuration.fetch(:model_type, @nic_model_type) @driver_name = iface_configuration.fetch(:driver_name, false) @driver_queues = iface_configuration.fetch(:driver_queues, false) @portgroup = iface_configuration.fetch(:portgroup, nil) @network_name = iface_configuration.fetch(:network_name, @network_name) template_name = 'public_interface' @logger.info("Setting up public interface using device #{@device} in mode #{@mode}") @ovs = iface_configuration.fetch(:ovs, false) @trust_guest_rx_filters = iface_configuration.fetch(:trust_guest_rx_filters, false) # configuration for udp or tcp tunnel interfaces (p2p conn btwn guest OSes) elsif iface_configuration.fetch(:tunnel_type, nil) @type = iface_configuration.fetch(:tunnel_type) @tunnel_port = iface_configuration.fetch(:tunnel_port, nil) raise Errors::TunnelPortNotDefined if @tunnel_port.nil? if @type == 'udp' # default udp tunnel source to 127.0.0.1 @udp_tunnel={ address: iface_configuration.fetch(:tunnel_local_ip,'127.0.0.1'), port: iface_configuration.fetch(:tunnel_local_port) } end # default mcast tunnel to 239.255.1.1. Web search says this # 239.255.x.x is a safe range to use for general use mcast default_ip = if @type == 'mcast' '239.255.1.1' else '127.0.0.1' end @source_options = { address: iface_configuration.fetch(:tunnel_ip, default_ip), port: @tunnel_port } @tunnel_type = iface_configuration.fetch(:model_type, @nic_model_type) @driver_name = iface_configuration.fetch(:driver_name, false) @driver_queues = iface_configuration.fetch(:driver_queues, false) template_name = 'tunnel_interface' @logger.info("Setting up #{@type} tunnel interface using #{@tunnel_ip} port #{@tunnel_port}") end message = "Creating network interface eth#{@iface_number}" message << " connected to network #{@network_name}." if @mac @mac = @mac.scan(/(\h{2})/).join(':') message << " Using MAC address: #{@mac}" end @logger.info(message) begin # FIXME: all options for network driver should be hash from Vagrantfile driver_options = {} driver_options[:name] = @driver_name if @driver_name driver_options[:queues] = @driver_queues if @driver_queues @udp_tunnel ||= {} xml = if template_name == 'interface' or template_name == 'tunnel_interface' interface_xml(@type, @source_options, @mac, @device_name, @iface_number, @model_type, @mtu, driver_options, @udp_tunnel, @pci_bus, @pci_slot) else to_xml(template_name) end domain.attach_device(xml) rescue => e raise Errors::AttachDeviceError, error_message: e.message end # Re-read the network configuration and grab the MAC address if iface_configuration[:iface_type] == :public_network xml = Nokogiri::XML(domain.xml_desc) source = "@network='#{@network_name}'" if @type == 'direct' source = "@dev='#{@device}'" elsif @portgroup.nil? source = "@bridge='#{@device}'" end if not @mac macs = xml.xpath("/domain/devices/interface[source[#{source}]]/mac/@address") @mac = macs[@macs_per_network[source]] iface_configuration[:mac] = @mac.to_s end @macs_per_network[source] += 1 end end # Continue the middleware chain. @app.call(env) if env[:machine].config.vm.box # Configure interfaces that user requested. Machine should be up and # running now. networks_to_configure = [] adapters.each_with_index do |options, slot_number| # Skip configuring the management network, which is on the first interface. # It's used for provisioning and it has to be available during provisioning, # ifdown command is not acceptable here. next if slot_number.zero? next if options[:auto_config] === false @logger.debug "Configuring interface slot_number #{slot_number} options #{options}" network = { interface: slot_number, use_dhcp_assigned_default_route: options[:use_dhcp_assigned_default_route], mac_address: options[:mac] } if options[:ip] network = { type: :static, ip: options[:ip], netmask: options[:netmask], gateway: options[:gateway] }.merge(network) else network[:type] = :dhcp end # do not run configure_networks for tcp tunnel interfaces next if options.fetch(:tunnel_type, nil) networks_to_configure << network end env[:ui].info I18n.t('vagrant.actions.vm.network.configuring') env[:machine].guest.capability( :configure_networks, networks_to_configure ) end end private def target_dev_name(device_name, type, iface_number) if device_name device_name elsif type == 'network' "vnet#{iface_number}" else # TODO can we use same name vnet#ifnum? #"tnet#{iface_number}" FIXME plugin vagrant-libvirt trying to create second tnet0 interface "vnet#{iface_number}" end end def interface_xml(type, source_options, mac, device_name, iface_number, model_type, mtu, driver_options, udp_tunnel={}, pci_bus, pci_slot) Nokogiri::XML::Builder.new do |xml| xml.interface(type: type || 'network') do xml.source(source_options) do xml.local(udp_tunnel) if type == 'udp' end xml.mac(address: mac) if mac xml.target(dev: target_dev_name(device_name, type, iface_number)) xml.alias(name: "net#{iface_number}") xml.model(type: model_type.to_s) xml.mtu(size: Integer(mtu)) if mtu xml.driver(driver_options) xml.address(type: 'pci', bus: pci_bus, slot: pci_slot) if pci_bus and pci_slot end end.to_xml( save_with: Nokogiri::XML::Node::SaveOptions::NO_DECLARATION | Nokogiri::XML::Node::SaveOptions::NO_EMPTY_TAGS | Nokogiri::XML::Node::SaveOptions::FORMAT ) end def find_empty(array, start = 0, stop = @nic_adapter_count) (start..stop).each do |i| return i unless array[i] end nil end # Return network name according to interface options. def interface_network(libvirt_client, options) # no need to get interface network for tcp tunnel config return 'tunnel_interface' if options.fetch(:tunnel_type, nil) if options[:network_name] @logger.debug 'Found network by name' return options[:network_name] end # Get list of all (active and inactive) libvirt networks. available_networks = libvirt_networks(libvirt_client) return 'public' if options[:iface_type] == :public_network if options[:ip] address = network_address(options[:ip], options[:netmask]) available_networks.each do |network| if address == network[:network_address] @logger.debug 'Found network by ip' return network[:name] end end end raise Errors::NetworkNotAvailableError, network_name: options[:ip] end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/wait_till_up.rb0000644000004100000410000001103613363570025025127 0ustar www-datawww-datarequire 'log4r' require 'vagrant-libvirt/errors' require 'vagrant-libvirt/util/timer' require 'vagrant/util/retryable' module VagrantPlugins module ProviderLibvirt module Action # Wait till domain is started, till it obtains an IP address and is # accessible via ssh. class WaitTillUp include Vagrant::Util::Retryable def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::wait_till_up') @app = app end def call(env) # Initialize metrics if they haven't been env[:metrics] ||= {} # Get domain object domain = env[:machine].provider.driver.get_domain(env[:machine].id.to_s) if domain.nil? raise Errors::NoDomainError, error_message: "Domain #{env[:machine].id} not found" end # Wait for domain to obtain an ip address. Ip address is searched # from arp table, either localy or remotely via ssh, if libvirt # connection was done via ssh. env[:ip_address] = nil @logger.debug("Searching for IP for MAC address: #{domain.mac}") env[:ui].info(I18n.t('vagrant_libvirt.waiting_for_ip')) if env[:machine].provider_config.qemu_use_session env[:metrics]['instance_ip_time'] = Util::Timer.time do retryable(on: Fog::Errors::TimeoutError, tries: 300) do # If we're interrupted don't worry about waiting return terminate(env) if env[:interrupted] # Wait for domain to obtain an ip address domain.wait_for(2) do env[:ip_address] = env[:machine].provider.driver.get_ipaddress_system(domain.mac) !env[:ip_address].nil? end end end else env[:metrics]['instance_ip_time'] = Util::Timer.time do retryable(on: Fog::Errors::TimeoutError, tries: 300) do # If we're interrupted don't worry about waiting return terminate(env) if env[:interrupted] # Wait for domain to obtain an ip address domain.wait_for(2) do addresses.each_pair do |_type, ip| env[:ip_address] = ip[0] unless ip[0].nil? end !env[:ip_address].nil? end end end end @logger.info("Got IP address #{env[:ip_address]}") @logger.info("Time for getting IP: #{env[:metrics]['instance_ip_time']}") # Machine has ip address assigned, now wait till we are able to # connect via ssh. env[:metrics]['instance_ssh_time'] = Util::Timer.time do env[:ui].info(I18n.t('vagrant_libvirt.waiting_for_ssh')) retryable(on: Fog::Errors::TimeoutError, tries: 60) do # If we're interrupted don't worry about waiting next if env[:interrupted] # Wait till we are able to connect via ssh. loop do # If we're interrupted then just back out break if env[:interrupted] break if env[:machine].communicate.ready? sleep 2 end end end # if interrupted above, just terminate immediately return terminate(env) if env[:interrupted] @logger.info("Time for SSH ready: #{env[:metrics]['instance_ssh_time']}") # Booted and ready for use. # env[:ui].info(I18n.t("vagrant_libvirt.ready")) @app.call(env) end def recover(env) return if env['vagrant.error'].is_a?(Vagrant::Errors::VagrantError) # Undo the import terminate(env) end def terminate(env) if env[:machine].provider.state.id != :not_created # If we're not supposed to destroy on error then just return return unless env[:destroy_on_error] if env[:halt_on_error] halt_env = env.dup halt_env.delete(:interrupted) halt_env[:config_validate] = false env[:action_runner].run(Action.action_halt, halt_env) else destroy_env = env.dup destroy_env.delete(:interrupted) destroy_env[:config_validate] = false destroy_env[:force_confirm_destroy] = true env[:action_runner].run(Action.action_destroy, destroy_env) end end end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/create_domain_volume.rb0000644000004100000410000000620313363570025026614 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Create a snapshot of base box image. This new snapshot is just new # cow image with backing storage pointing to base box image. Use this # image as new domain volume. class CreateDomainVolume include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::StorageUtil def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::create_domain_volume') @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.creating_domain_volume')) # Get config options. config = env[:machine].provider_config # This is name of newly created image for vm. @name = "#{env[:domain_name]}.img" # Verify the volume doesn't exist already. domain_volume = ProviderLibvirt::Util::Collection.find_matching( env[:machine].provider.driver.connection.volumes.all, @name ) raise Errors::DomainVolumeExists if domain_volume # Get path to backing image - box volume. box_volume = ProviderLibvirt::Util::Collection.find_matching( env[:machine].provider.driver.connection.volumes.all, env[:box_volume_name] ) @backing_file = box_volume.path # Virtual size of image. Take value worked out by HandleBoxImage @capacity = env[:box_virtual_size] # G # Create new volume from xml template. Fog currently doesn't support # volume snapshots directly. begin xml = Nokogiri::XML::Builder.new do |xml| xml.volume do xml.name(@name) xml.capacity(@capacity, unit: 'G') xml.target do xml.format(type: 'qcow2') xml.permissions do xml.owner storage_uid(env) xml.group storage_gid(env) xml.mode '0600' xml.label 'virt_image_t' end end xml.backingStore do xml.path(@backing_file) xml.format(type: 'qcow2') xml.permissions do xml.owner storage_uid(env) xml.group storage_gid(env) xml.mode '0600' xml.label 'virt_image_t' end end end end.to_xml( save_with: Nokogiri::XML::Node::SaveOptions::NO_DECLARATION | Nokogiri::XML::Node::SaveOptions::NO_EMPTY_TAGS | Nokogiri::XML::Node::SaveOptions::FORMAT ) domain_volume = env[:machine].provider.driver.connection.volumes.create( xml: xml, pool_name: config.storage_pool_name ) rescue Fog::Errors::Error => e raise Errors::FogDomainVolumeCreateError, error_message: e.message end @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/is_created.rb0000644000004100000410000000065113363570025024536 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action # This can be used with "Call" built-in to check if the machine # is created and branch in the middleware. class IsCreated def initialize(app, _env) @app = app end def call(env) env[:result] = env[:machine].state.id != :not_created @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/message_not_created.rb0000644000004100000410000000047113363570025026427 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action class MessageNotCreated def initialize(app, _env) @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.not_created')) @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/destroy_networks.rb0000644000004100000410000000701113363570025026056 0ustar www-datawww-datarequire 'log4r' require 'nokogiri' module VagrantPlugins module ProviderLibvirt module Action # Destroy all networks created for this specific domain. Skip # removing if network has still active connections. class DestroyNetworks def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::destroy_networks') @app = app end def call(env) if env[:machine].provider_config.qemu_use_session @app.call(env) return end # If there were some networks created for this machine, in machines # data directory, created_networks file holds UUIDs of each network. created_networks_file = env[:machine].data_dir + 'created_networks' @logger.info 'Checking if any networks were created' # If created_networks file doesn't exist, there are no networks we # need to remove. unless File.exist?(created_networks_file) env[:machine].id = nil return @app.call(env) end @logger.info 'File with created networks exists' # Iterate over each created network UUID and try to remove it. created_networks = [] file = File.open(created_networks_file, 'r') file.readlines.each do |network_uuid| @logger.info "Checking for #{network_uuid}" # lookup_network_by_uuid throws same exception # if there is an error or if the network just doesn't exist begin libvirt_network = env[:machine].provider.driver.connection.client.lookup_network_by_uuid( network_uuid ) rescue Libvirt::RetrieveError => e # this network is already destroyed, so move on if e.message =~ /Network not found/ @logger.info 'It is already undefined' next # some other error occured, so raise it again else raise e end end # Skip removing if network has still active connections. xml = Nokogiri::XML(libvirt_network.xml_desc) connections = xml.xpath('/network/@connections').first unless connections.nil? @logger.info 'Still has connections so will not undefine' created_networks << network_uuid next end # Shutdown network first. # Undefine network. begin libvirt_network.destroy libvirt_network.undefine @logger.info 'Undefined it' rescue => e raise Errors::DestroyNetworkError, network_name: libvirt_network.name, error_message: e.message end end file.close # Update status of created networks after removing some/all of them. # Not sure why we are doing this, something else seems to always delete the file if !created_networks.empty? File.open(created_networks_file, 'w') do |file| @logger.info 'Writing new created_networks file' created_networks.each do |network_uuid| file.puts network_uuid end end else @logger.info 'Deleting created_networks file' File.delete(created_networks_file) end env[:machine].id = nil @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/set_boot_order.rb0000644000004100000410000000757713363570025025463 0ustar www-datawww-datarequire 'log4r' require 'nokogiri' module VagrantPlugins module ProviderLibvirt module Action # boot order useful for pxe in discovery workflow class SetBootOrder def initialize(app, env) @app = app @logger = Log4r::Logger.new('vagrant_libvirt::action::set_boot_order') config = env[:machine].provider_config @boot_order = config.boot_order end def call(env) # Get domain first begin domain = env[:machine].provider .driver .connection .client .lookup_domain_by_uuid( env[:machine].id.to_s ) rescue => e raise Errors::NoDomainError, error_message: e.message end # Only execute specific boot ordering if this is defined # in the Vagrant file if @boot_order.count >= 1 # If a domain is initially defined with no box or disk or # with an explicit boot order, libvirt adds # This conflicts with an explicit boot_order configuration, # so we need to remove it from the domain xml and feed it back. # Also see https://bugzilla.redhat.com/show_bug.cgi?id=1248514 # as to why we have to do this after all devices have been defined. xml = Nokogiri::XML(domain.xml_desc) xml.search('/domain/os/boot').each(&:remove) # Parse the XML and find each defined drive and network interfacee hd = xml.search("/domain/devices/disk[@device='disk']") cdrom = xml.search("/domain/devices/disk[@device='cdrom']") # implemented only for 1 network nets = @boot_order.flat_map do |x| x.class == Hash ? x : nil end.compact raise 'Defined only for 1 network for boot' if nets.size > 1 network = search_network(nets, xml) # Generate an array per device group and a flattened # array from all of those devices = { 'hd' => hd, 'cdrom' => cdrom, 'network' => network } final_boot_order = final_boot_order(@boot_order, devices) # Loop over the entire defined boot order array and # create boot order entries in the domain XML final_boot_order.each_with_index do |node, index| boot = "" node.add_child(boot) logger_msg(node, index) end # Finally redefine the domain XML through libvirt # to apply the boot ordering env[:machine].provider .driver .connection .client .define_domain_xml(xml.to_s) end @app.call(env) end def final_boot_order(boot_order, devices) boot_order.flat_map do |category| devices[category.class == Hash ? category.keys.first : category] end end def search_network(nets, xml) str = '/domain/devices/interface' str += "[(@type='network' or @type='udp' or @type='bridge')" unless nets.empty? str += " and source[@network='#{nets.first['network']}']" end str += ']' @logger.debug(str) xml.search(str) end def logger_msg(node, index) name = if node.name == 'disk' node['device'] elsif node.name == 'interface' node.name end @logger.debug "Setting #{name} to boot index #{index + 1}" end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/handle_storage_pool.rb0000644000004100000410000000421313363570025026442 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action class HandleStoragePool include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::StorageUtil @@lock = Mutex.new def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::handle_storage_pool') @app = app end def call(env) # Get config options. config = env[:machine].provider_config # while inside the synchronize block take care not to call the next # action in the chain, as must exit this block first to prevent # locking all subsequent actions as well. @@lock.synchronize do # Check for storage pool, where box image should be created break if ProviderLibvirt::Util::Collection.find_matching( env[:machine].provider.driver.connection.pools.all, config.storage_pool_name ) @logger.info("No storage pool '#{config.storage_pool_name}' is available.") # If user specified other pool than default, don't create default # storage pool, just write error message. raise Errors::NoStoragePool if config.storage_pool_name != 'default' @logger.info("Creating storage pool 'default'") # Fog libvirt currently doesn't support creating pools. Use # ruby-libvirt client directly. begin @storage_pool_path = storage_pool_path(env) @storage_pool_uid = storage_uid(env) @storage_pool_gid = storage_gid(env) libvirt_pool = env[:machine].provider.driver.connection.client.define_storage_pool_xml( to_xml('default_storage_pool') ) libvirt_pool.build libvirt_pool.create rescue => e raise Errors::CreatingStoragePoolError, error_message: e.message end raise Errors::NoStoragePool unless libvirt_pool end @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/create_networks.rb0000644000004100000410000003447413363570025025645 0ustar www-datawww-datarequire 'log4r' require 'vagrant/util/network_ip' require 'vagrant/util/scoped_hash_override' require 'ipaddr' require 'thread' module VagrantPlugins module ProviderLibvirt module Action # Prepare all networks needed for domain connections. class CreateNetworks include Vagrant::Util::NetworkIP include Vagrant::Util::ScopedHashOverride include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::NetworkUtil @@lock = Mutex.new def initialize(app, env) mess = 'vagrant_libvirt::action::create_networks' @logger = Log4r::Logger.new(mess) @app = app @available_networks = [] @options = {} @libvirt_client = env[:machine].provider.driver.connection.client end def call(env) if env[:machine].provider_config.qemu_use_session @app.call(env) return end # only one vm at a time should try to set up networks # otherwise they'll have inconsitent views of current state # and conduct redundant operations that cause errors @@lock.synchronize do # Iterate over networks If some network is not # available, create it if possible. Otherwise raise an error. configured_networks(env, @logger).each do |options| # Only need to create private networks next if options[:iface_type] != :private_network || options.fetch(:tunnel_type, nil) @logger.debug "Searching for network with options #{options}" # should fix other methods so this doesn't have to be instance var @options = options # Get a list of all (active and inactive) libvirt networks. This # list is used throughout this class and should be easier to # process than libvirt API calls. @available_networks = libvirt_networks( env[:machine].provider.driver.connection.client ) # Prepare a hash describing network for this specific interface. @interface_network = { name: nil, ip_address: nil, netmask: @options[:netmask], network_address: nil, bridge_name: nil, domain_name: nil, ipv6_address: options[:ipv6_address] || nil, ipv6_prefix: options[:ipv6_prefix] || nil, created: false, active: false, autostart: options[:autostart] || false, guest_ipv6: @options[:guest_ipv6] || 'yes', libvirt_network: nil } if @options[:ip] handle_ip_option(env) elsif @options[:type].to_s == 'dhcp' handle_dhcp_private_network(env) elsif @options[:network_name] handle_network_name_option(env) else raise Errors::CreateNetworkError, error_message: @options end autostart_network if @interface_network[:autostart] activate_network unless @interface_network[:active] end end @app.call(env) end private def lookup_network_by_ip(ip) @logger.debug "looking up network with ip == #{ip}" @available_networks.find { |network| network[:network_address] == ip } end # Return hash of network for specified name, or nil if not found. def lookup_network_by_name(network_name) @logger.debug "looking up network named #{network_name}" @available_networks.find { |network| network[:name] == network_name } end # Return hash of network for specified bridge, or nil if not found. def lookup_bridge_by_name(bridge_name) @logger.debug "looking up bridge named #{bridge_name}" @available_networks.find { |network| network[:bridge_name] == bridge_name } end # Throw an error if dhcp setting for an existing network does not # match what was configured in the vagrantfile # since we always enable dhcp for the management network # this ensures we wont start a vm vagrant cant reach # Allow the situation where DHCP is not requested (:libvirt__dhcp_enabled == false) # but where it is enabled on the virtual network def verify_dhcp if @interface_network[:dhcp_enabled] == true && @options[:dhcp_enabled] == false raise Errors::DHCPMismatch, network_name: @interface_network[:name], requested: @options[:dhcp_enabled] ? 'enabled' : 'disabled' end end # Handle only situations, when ip is specified. Variables @options and # @available_networks should be filled before calling this function. def handle_ip_option(env) return unless @options[:ip] net_address = nil unless @options[:forward_mode] == 'veryisolated' net_address = network_address(@options[:ip], @options[:netmask]) # Set IP address of network (actually bridge). It will be used as # gateway address for machines connected to this network. @interface_network[:ip_address] = get_host_ip_addr(net_address) end @interface_network[:network_address] = net_address # if network is veryisolated, search by name network = if @options[:libvirt__forward_mode] == 'veryisolated' lookup_network_by_name(@options[:network_name]) elsif net_address # otherwise, search by ip (if set) lookup_network_by_ip(net_address) else # leaving this here to mimic prior behavior. If we get # here, something's probably broken. lookup_network_by_name(@options[:network_name]) end @interface_network = network if network verify_dhcp if @interface_network[:created] if @options[:network_name] @logger.debug 'Checking that network name does not clash with ip' if @interface_network[:created] # Just check for mismatch error here - if name and ip from # config match together. if @options[:network_name] != @interface_network[:name] raise Errors::NetworkNameAndAddressMismatch, ip_address: @options[:ip], network_name: @options[:network_name] end else # Network is not created, but name is set. We need to check, # whether network name from config doesn't already exist. if lookup_network_by_name @options[:network_name] raise Errors::NetworkNameAndAddressMismatch, ip_address: @options[:ip], network_name: @options[:network_name] end # Network with 'name' doesn't exist. Set it as name for new # network. @interface_network[:name] = @options[:network_name] end end # Do we need to create new network? unless @interface_network[:created] # TODO: stop after some loops. Don't create infinite loops. # Is name for new network set? If not, generate a unique one. count = 0 while @interface_network[:name].nil? @logger.debug 'generating name for network' # Generate a network name. network_name = env[:root_path].basename.to_s.dup network_name << count.to_s count += 1 # Check if network name is unique. next if lookup_network_by_name(network_name) @interface_network[:name] = network_name end # Generate a unique name for network bridge. @interface_network[:bridge_name] = generate_bridge_name # Create a private network. create_private_network(env) end end # Handle network_name option, if ip was not specified. Variables # @options and @available_networks should be filled before calling this # function. def handle_network_name_option(env) return if @options[:ip] || \ !@options[:network_name] || \ !@options[:libvirt__forward_mode] == 'veryisolated' network = lookup_network_by_name(@options[:network_name]) @interface_network = network if network if @options[:libvirt__forward_mode] == 'veryisolated' # if this interface has a network address, something's wrong. if @interface_network[:network_address] raise Errors::NetworkNotAvailableError, network_name: @options[:network_name] end else if !@interface_network raise Errors::NetworkNotAvailableError, network_name: @options[:network_name] else verify_dhcp end end # Do we need to create new network? unless @interface_network[:created] @interface_network[:name] = @options[:network_name] @interface_network[:ip_address] ||= @options[:host_ip] # Generate a unique name for network bridge. @interface_network[:bridge_name] = generate_bridge_name # Create a private network. create_private_network(env) end end def handle_dhcp_private_network(env) net_address = @options[:libvirt__network_address] net_address = '172.28.128.0' unless net_address network = lookup_network_by_ip(net_address) @interface_network = network if network # Do we need to create new network? unless @interface_network[:created] @interface_network[:name] = 'vagrant-private-dhcp' @interface_network[:network_address] = net_address # Set IP address of network (actually bridge). It will be used as # gateway address for machines connected to this network. @interface_network[:ip_address] = get_host_ip_addr(net_address) # Generate a unique name for network bridge. @interface_network[:bridge_name] = generate_bridge_name # Create a private network. create_private_network(env) end end # Return provided address or first address of network otherwise def get_host_ip_addr(network) @options[:host_ip] ? IPAddr.new(@options[:host_ip]) : IPAddr.new(network).succ end # Return the first available virbr interface name def generate_bridge_name @logger.debug 'generating name for bridge' count = 0 while lookup_bridge_by_name(bridge_name = "virbr#{count}") count += 1 end @logger.debug "found available bridge name #{bridge_name}" bridge_name end def create_private_network(env) @network_name = @interface_network[:name] @network_bridge_name = @interface_network[:bridge_name] @network_address = @interface_network[:ip_address] @network_netmask = @interface_network[:netmask] @network_mtu = Integer(@options[:mtu]) if @options[:mtu] @guest_ipv6 = @interface_network[:guest_ipv6] @network_ipv6_address = @interface_network[:ipv6_address] @network_ipv6_prefix = @interface_network[:ipv6_prefix] @network_forward_mode = @options[:forward_mode] if @options[:forward_device] @network_forward_device = @options[:forward_device] end if @options[:dhcp_enabled] # Find out DHCP addresses pool range. network_address = "#{@interface_network[:network_address]}/" network_address << (@interface_network[:netmask]).to_s net = @interface_network[:network_address] ? IPAddr.new(network_address) : nil # First is address of network, second is gateway (by default). # So start the range two addresses after network address by default. # TODO: Detect if this IP is not set on the interface. start_address = @options[:dhcp_start] || net.to_range.begin.succ # Default to last possible address. (Stop address must not be broadcast address.) stop_address = @options[:dhcp_stop] || (net.to_range.end & IPAddr.new('255.255.255.254')) @network_dhcp_enabled = true @network_dhcp_bootp_file = @options[:dhcp_bootp_file] @network_dhcp_bootp_server = @options[:dhcp_bootp_server] @network_range_start = start_address @network_range_stop = stop_address else @network_dhcp_enabled = false end @network_domain_name = @options[:domain_name] begin @interface_network[:libvirt_network] = \ @libvirt_client.define_network_xml(to_xml('private_network')) @logger.debug 'created network' rescue => e raise Errors::CreateNetworkError, error_message: e.message end created_networks_file = env[:machine].data_dir + 'created_networks' message = 'Saving information about created network ' message << "#{@interface_network[:name]}, " message << "UUID=#{@interface_network[:libvirt_network].uuid} " message << "to file #{created_networks_file}." @logger.info(message) File.open(created_networks_file, 'a') do |file| file.puts @interface_network[:libvirt_network].uuid end end def autostart_network @interface_network[:libvirt_network].autostart = true rescue => e raise Errors::AutostartNetworkError, error_message: e.message end def activate_network @interface_network[:libvirt_network].create rescue => e raise Errors::ActivateNetworkError, error_message: e.message end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/message_already_created.rb0000644000004100000410000000050113363570025027242 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action class MessageAlreadyCreated def initialize(app, _env) @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.already_created')) @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/prepare_nfs_settings.rb0000644000004100000410000000532013363570025026656 0ustar www-datawww-datarequire 'nokogiri' require 'socket' require 'timeout' module VagrantPlugins module ProviderLibvirt module Action class PrepareNFSSettings include Vagrant::Action::Builtin::MixinSyncedFolders def initialize(app, _env) @app = app @logger = Log4r::Logger.new('vagrant::action::vm::nfs') end def call(env) @machine = env[:machine] @app.call(env) if using_nfs? @logger.info('Using NFS, preparing NFS settings by reading host IP and machine IP') env[:nfs_machine_ip] = read_machine_ip(env[:machine]) env[:nfs_host_ip] = read_host_ip(env[:nfs_machine_ip]) @logger.info("host IP: #{env[:nfs_host_ip]} machine IP: #{env[:nfs_machine_ip]}") raise Vagrant::Errors::NFSNoHostonlyNetwork if !env[:nfs_machine_ip] || !env[:nfs_host_ip] end end # We're using NFS if we have any synced folder with NFS configured. If # we are not using NFS we don't need to do the extra work to # populate these fields in the environment. def using_nfs? !!synced_folders(@machine)[:nfs] end # Returns the IP address of the host # # @param [Machine] machine # @return [String] def read_host_ip(ip) UDPSocket.open do |s| if ip.is_a?(Array) s.connect(ip.last, 1) else s.connect(ip, 1) end s.addr.last end end # Returns the IP address of the guest # # @param [Machine] machine # @return [String] def read_machine_ip(machine) # check host only ip ssh_host = machine.ssh_info[:host] return ssh_host if ping(ssh_host) # check other ips command = "ip=$(which ip); ${ip:-/sbin/ip} addr show | grep -i 'inet ' | grep -v '127.0.0.1' | tr -s ' ' | cut -d' ' -f3 | cut -d'/' -f 1" result = '' machine.communicate.execute(command) do |type, data| result << data if type == :stdout end ips = result.chomp.split("\n").uniq @logger.info("guest IPs: #{ips.join(', ')}") ips.each do |ip| next if ip == ssh_host return ip if ping(ip) end end private # Check if we can open a connection to the host def ping(host, timeout = 3) ::Timeout.timeout(timeout) do s = TCPSocket.new(host, 'echo') s.close end true rescue Errno::ECONNREFUSED true rescue Timeout::Error, StandardError false end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/message_will_not_destroy.rb0000644000004100000410000000060113363570025027533 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action class MessageWillNotDestroy def initialize(app, env) @app = app end def call(env) env[:ui].info I18n.t("vagrant.commands.destroy.will_not_destroy", name: env[:machine].name) @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/message_not_suspended.rb0000644000004100000410000000047513363570025027016 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action class MessageNotSuspended def initialize(app, _env) @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.not_suspended')) @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/package_domain.rb0000644000004100000410000001000313363570025025346 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Action for create new box for libvirt provider class PackageDomain def initialize(app, env) @logger = Log4r::Logger.new('vagrant_libvirt::action::package_domain') @app = app env['package.files'] ||= {} env['package.output'] ||= 'package.box' end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.package_domain')) libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid( env[:machine].id ) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) root_disk = domain.volumes.select do |x| x.name == libvirt_domain.name + '.img' end.first boxname = env['package.output'] raise "#{boxname}: Already exists" if File.exist?(boxname) @tmp_dir = Dir.pwd + '/_tmp_package' @tmp_img = @tmp_dir + '/box.img' Dir.mkdir(@tmp_dir) if File.readable?(root_disk.path) backing = `qemu-img info "#{root_disk.path}" | grep 'backing file:' | cut -d ':' -f2`.chomp else env[:ui].error("Require set read access to #{root_disk.path}. sudo chmod a+r #{root_disk.path}") FileUtils.rm_rf(@tmp_dir) raise 'Have no access' end env[:ui].info('Image has backing image, copying image and rebasing ...') FileUtils.cp(root_disk.path, @tmp_img) `qemu-img rebase -p -b "" #{@tmp_img}` # remove hw association with interface # working for centos with lvs default disks `virt-sysprep --no-logfile --operations defaults,-ssh-userdir -a #{@tmp_img}` # add any user provided file extra = '' @tmp_include = @tmp_dir + '/_include' if env['package.include'] extra = './_include' Dir.mkdir(@tmp_include) env['package.include'].each do |f| env[:ui].info("Including user file: #{f}") FileUtils.cp(f, @tmp_include) end end if env['package.vagrantfile'] extra = './_include' Dir.mkdir(@tmp_include) unless File.directory?(@tmp_include) env[:ui].info('Including user Vagrantfile') FileUtils.cp(env['package.vagrantfile'], @tmp_include + '/Vagrantfile') end Dir.chdir(@tmp_dir) info = JSON.parse(`qemu-img info --output=json #{@tmp_img}`) img_size = (Float(info['virtual-size'])/(1024**3)).ceil File.write(@tmp_dir + '/metadata.json', metadata_content(img_size)) File.write(@tmp_dir + '/Vagrantfile', vagrantfile_content) assemble_box(boxname, extra) FileUtils.mv(@tmp_dir + '/' + boxname, '../' + boxname) FileUtils.rm_rf(@tmp_dir) env[:ui].info('Box created') env[:ui].info('You can now add the box:') env[:ui].info("vagrant box add #{boxname} --name any_comfortable_name") @app.call(env) end def assemble_box(boxname, extra) `tar cvzf "#{boxname}" --totals ./metadata.json ./Vagrantfile ./box.img #{extra}` end def vagrantfile_content <<-EOF Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.driver = "kvm" libvirt.host = "" libvirt.connect_via_ssh = false libvirt.storage_pool_name = "default" end end user_vagrantfile = File.expand_path('../_include/Vagrantfile', __FILE__) load user_vagrantfile if File.exists?(user_vagrantfile) EOF end def metadata_content(filesize) <<-EOF { "provider": "libvirt", "format": "qcow2", "virtual_size": #{filesize} } EOF end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/set_name_of_domain.rb0000644000004100000410000000416613363570025026247 0ustar www-datawww-datarequire 'securerandom' module VagrantPlugins module ProviderLibvirt module Action # Setup name for domain and domain volumes. class SetNameOfDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::set_name_of_domain') @app = app end def call(env) env[:domain_name] = build_domain_name(env) begin @logger.info("Looking for domain #{env[:domain_name]} through list " \ "#{env[:machine].provider.driver.connection.servers.all}") # Check if the domain name is not already taken domain = ProviderLibvirt::Util::Collection.find_matching( env[:machine].provider.driver.connection.servers.all, env[:domain_name] ) rescue Fog::Errors::Error => e @logger.info(e.to_s) domain = nil end @logger.info("Looking for domain #{env[:domain_name]}") unless domain.nil? raise ProviderLibvirt::Errors::DomainNameExists, domain_name: env[:domain_name] end @app.call(env) end # build domain name # random_hostname option avoids # `domain about to create is already taken` # parsable and sortable by epoch time # @example # development-centos-6-chef-11_1404488971_3b7a569e2fd7c554b852 # @return [String] libvirt domain name def build_domain_name(env) config = env[:machine].provider_config domain_name = if config.default_prefix.nil? env[:root_path].basename.to_s.dup.concat('_') elsif config.default_prefix.empty? # don't have any prefix, not even "_" '' else config.default_prefix.to_s.dup.concat('_') end domain_name << env[:machine].name.to_s domain_name.gsub!(/[^-a-z0-9_\.]/i, '') domain_name << "_#{Time.now.utc.to_i}_#{SecureRandom.hex(10)}" if config.random_hostname domain_name end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/read_mac_addresses.rb0000644000004100000410000000210113363570025026214 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action class ReadMacAddresses def initialize(app, _env) @app = app @logger = Log4r::Logger.new('vagrant_libvirt::action::read_mac_addresses') end def call(env) env[:machine_mac_addresses] = read_mac_addresses(env[:machine].provider.driver.connection, env[:machine]) end def read_mac_addresses(libvirt, machine) return nil if machine.id.nil? domain = libvirt.client.lookup_domain_by_uuid(machine.id) if domain.nil? @logger.info('Machine could not be found, assuming it got destroyed') machine.id = nil return nil end xml = Nokogiri::XML(domain.xml_desc) mac = xml.xpath('/domain/devices/interface/mac/@address') return {} if mac.empty? Hash[mac.each_with_index.map do |x, i| @logger.debug("interface[#{i}] = #{x.value}") [i, x.value] end] end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/resume_domain.rb0000644000004100000410000000167213363570025025267 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Resume suspended domain. class ResumeDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::resume_domain') @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.resuming_domain')) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(env[:machine].id) config = env[:machine].provider_config if config.suspend_mode == 'managedsave' domain.start else domain.resume end @logger.info("Machine #{env[:machine].id} is resumed.") @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/is_running.rb0000644000004100000410000000106613363570025024610 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action # This can be used with "Call" built-in to check if the machine # is running and branch in the middleware. class IsRunning def initialize(app, _env) @app = app end def call(env) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? env[:result] = domain.state.to_s == 'running' @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/suspend_domain.rb0000644000004100000410000000236713363570025025452 0ustar www-datawww-datarequire 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Suspend domain. class SuspendDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::suspend_domain') @app = app end # make pause def call(env) env[:ui].info(I18n.t('vagrant_libvirt.suspending_domain')) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? config = env[:machine].provider_config if config.suspend_mode == 'managedsave' libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(env[:machine].id) begin libvirt_domain.managed_save rescue => e env[:ui].error("Error doing a managed save for domain. It may have entered a paused state. Check the output of `virsh managedsave DOMAIN_NAME --verbose` on the VM host, error: #{e.message}") end else domain.suspend end @logger.info("Machine #{env[:machine].id} is suspended ") @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/remove_stale_volume.rb0000644000004100000410000000325713363570025026515 0ustar www-datawww-datarequire 'log4r' # require 'log4r/yamlconfigurator' module VagrantPlugins module ProviderLibvirt module Action class RemoveStaleVolume def initialize(app, _env) # log4r_config= YAML.load_file(File.join(File.dirname(__FILE__),"log4r.yaml")) # log_cfg = Log4r::YamlConfigurator # log_cfg.decode_yaml( log4r_config['log4r_config'] ) @logger = Log4r::Logger.new('vagrant_libvirt::action::remove_stale_volume') @app = app end def call(env) # Remove stale server volume env[:ui].info(I18n.t('vagrant_libvirt.remove_stale_volume')) config = env[:machine].provider_config # Check for storage pool, where box image should be created fog_pool = ProviderLibvirt::Util::Collection.find_matching( env[:machine].provider.driver.connection.pools.all, config.storage_pool_name ) @logger.debug("**** Pool #{fog_pool.name}") # This is name of newly created image for vm. name = "#{env[:domain_name]}.img" @logger.debug("**** Volume name #{name}") # remove root storage box_volume = ProviderLibvirt::Util::Collection.find_matching( env[:machine].provider.driver.connection.volumes.all, name ) if box_volume && box_volume.pool_name == fog_pool.name @logger.info("Deleting volume #{box_volume.key}") box_volume.destroy env[:result] = box_volume else env[:result] = nil end # Continue the middleware chain. @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/prune_nfs_exports.rb0000644000004100000410000000117513363570025026221 0ustar www-datawww-datarequire 'yaml' module VagrantPlugins module ProviderLibvirt module Action class PruneNFSExports def initialize(app, _env) @app = app end def call(env) if env[:host] uuid = env[:machine].id # get all uuids uuids = env[:machine].provider.driver.connection.servers.all.map(&:id) # not exiisted in array will removed from nfs uuids.delete(uuid) env[:host].capability( :nfs_prune, env[:machine].ui, uuids ) end @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/share_folders.rb0000644000004100000410000000370313363570025025255 0ustar www-datawww-datarequire 'pathname' require 'log4r' module VagrantPlugins module ProviderLibvirt module Action class ShareFolders def initialize(app, _env) @logger = Log4r::Logger.new('vagrant::action::vm::share_folders') @app = app end def call(env) @env = env prepare_folders create_metadata @app.call(env) end # This method returns an actual list of shared # folders to create and their proper path. def shared_folders {}.tap do |result| @env[:machine].config.vm.synced_folders.each do |id, data| # Ignore NFS shared folders next if !data[:type] == :nfs # This to prevent overwriting the actual shared folders data result[id] = data.dup end end end # Prepares the shared folders by verifying they exist and creating them # if they don't. def prepare_folders shared_folders.each do |_id, options| hostpath = Pathname.new(options[:hostpath]).expand_path(@env[:root_path]) next unless !hostpath.directory? && options[:create] # Host path doesn't exist, so let's create it. @logger.debug("Host path doesn't exist, creating: #{hostpath}") begin hostpath.mkpath rescue Errno::EACCES raise Vagrant::Errors::SharedFolderCreateFailed, path: hostpath.to_s end end end def create_metadata @env[:ui].info I18n.t('vagrant.actions.vm.share_folders.creating') folders = [] shared_folders.each do |id, data| folders << { name: id, hostpath: File.expand_path(data[:hostpath], @env[:root_path]), transient: data[:transient] } end end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action/message_not_running.rb0000644000004100000410000000047113363570025026500 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Action class MessageNotRunning def initialize(app, _env) @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.not_running')) @app.call(env) end end end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/util.rb0000644000004100000410000000070613363570025022135 0ustar www-datawww-datamodule VagrantPlugins module ProviderLibvirt module Util autoload :ErbTemplate, 'vagrant-libvirt/util/erb_template' autoload :Collection, 'vagrant-libvirt/util/collection' autoload :Timer, 'vagrant-libvirt/util/timer' autoload :NetworkUtil, 'vagrant-libvirt/util/network_util' autoload :StorageUtil, 'vagrant-libvirt/util/storage_util' autoload :ErrorCodes, 'vagrant-libvirt/util/error_codes' end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/action.rb0000644000004100000410000003007413363570025022436 0ustar www-datawww-datarequire 'vagrant/action/builder' require 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Include the built-in modules so we can use them as top-level things. include Vagrant::Action::Builtin @logger = Log4r::Logger.new('vagrant_libvirt::action') # remove image from libvirt storage pool def self.remove_libvirt_image Vagrant::Action::Builder.new.tap do |b| b.use RemoveLibvirtImage end end # This action is called to bring the box up from nothing. def self.action_up Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use BoxCheckOutdated b.use Call, IsCreated do |env, b2| # Create VM if not yet created. if !env[:result] b2.use SetNameOfDomain if !env[:machine].config.vm.box b2.use CreateDomain b2.use CreateNetworks b2.use CreateNetworkInterfaces b2.use SetBootOrder b2.use StartDomain else b2.use HandleStoragePool b2.use HandleBox b2.use HandleBoxImage b2.use CreateDomainVolume b2.use CreateDomain b2.use Provision b2.use PrepareNFSValidIds b2.use SyncedFolderCleanup b2.use SyncedFolders b2.use PrepareNFSSettings b2.use ShareFolders b2.use CreateNetworks b2.use CreateNetworkInterfaces b2.use SetBootOrder b2.use StartDomain b2.use WaitTillUp b2.use ForwardPorts b2.use SetHostname # b2.use SyncFolders end else env[:halt_on_error] = true b2.use action_start end end end end # Assuming VM is created, just start it. This action is not called # directly by any subcommand. VM can be suspended, already running or in # poweroff state. def self.action_start Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsRunning do |env, b2| # If the VM is running, run the necessary provisioners if env[:result] b2.use action_provision next end b2.use Call, IsSuspended do |env2, b3| # if vm is suspended resume it then exit if env2[:result] b3.use CreateNetworks b3.use ResumeDomain next end if !env[:machine].config.vm.box # With no box, we just care about network creation and starting it b3.use CreateNetworks b3.use SetBootOrder b3.use StartDomain else # VM is not running or suspended. b3.use Provision # Ensure networks are created and active b3.use CreateNetworks b3.use SetBootOrder b3.use PrepareNFSValidIds b3.use SyncedFolderCleanup b3.use SyncedFolders # Start it.. b3.use StartDomain # Machine should gain IP address when comming up, # so wait for dhcp lease and store IP into machines data_dir. b3.use WaitTillUp b3.use ForwardPorts b3.use PrepareNFSSettings b3.use ShareFolders end end end end end # This is the action that is primarily responsible for halting the # virtual machine. def self.action_halt Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use ClearForwardedPorts b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsSuspended do |env2, b3| b3.use CreateNetworks if env2[:result] b3.use ResumeDomain if env2[:result] end b2.use Call, IsRunning do |env2, b3| next unless env2[:result] # VM is running, halt it. b3.use HaltDomain end end end end # This is the action implements the reload command # It uses the halt and start actions def self.action_reload Vagrant::Action::Builder.new.tap do |b| b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use ConfigValidate b2.use action_halt b2.use action_start end end end # not implemented and looks like not require def self.action_package Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use PackageDomain end end # This is the action that is primarily responsible for completely # freeing the resources of the underlying virtual machine. def self.action_destroy Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] # Try to remove stale volumes anyway b2.use SetNameOfDomain b2.use RemoveStaleVolume if env[:machine].config.vm.box b2.use MessageNotCreated unless env[:result] next end b2.use Call, DestroyConfirm do |env2, b3| if env2[:result] b3.use ClearForwardedPorts # b3.use PruneNFSExports b3.use DestroyDomain b3.use DestroyNetworks b3.use ProvisionerCleanup else b3.use MessageWillNotDestroy end end end end end # This action is called to SSH into the machine. def self.action_ssh Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsRunning do |env2, b3| unless env2[:result] b3.use MessageNotRunning next end b3.use SSHExec end end end end # This action is called when `vagrant provision` is called. def self.action_provision Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsRunning do |env2, b3| unless env2[:result] b3.use MessageNotRunning next end b3.use Provision # b3.use SyncFolders end end end end # This is the action that is primarily responsible for suspending # the virtual machine. def self.action_suspend Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsRunning do |env2, b3| unless env2[:result] b3.use MessageNotRunning next end b3.use SuspendDomain end end end end # This is the action that is primarily responsible for resuming # suspended machines. def self.action_resume Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsSuspended do |env2, b3| unless env2[:result] b3.use MessageNotSuspended next end b3.use CreateNetworks b3.use ResumeDomain end end end end def self.action_read_mac_addresses Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use ReadMacAddresses end end # This is the action that will run a single SSH command. def self.action_ssh_run Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsRunning do |env2, b3| unless env2[:result] b3.use MessageNotRunning next end b3.use SSHRun end end end end action_root = Pathname.new(File.expand_path('../action', __FILE__)) autoload :PackageDomain, action_root.join('package_domain') autoload :CreateDomain, action_root.join('create_domain') autoload :CreateDomainVolume, action_root.join('create_domain_volume') autoload :CreateNetworkInterfaces, action_root.join('create_network_interfaces') autoload :CreateNetworks, action_root.join('create_networks') autoload :DestroyDomain, action_root.join('destroy_domain') autoload :DestroyNetworks, action_root.join('destroy_networks') autoload :ForwardPorts, action_root.join('forward_ports') autoload :ClearForwardedPorts, action_root.join('forward_ports') autoload :HaltDomain, action_root.join('halt_domain') autoload :HandleBoxImage, action_root.join('handle_box_image') autoload :HandleStoragePool, action_root.join('handle_storage_pool') autoload :RemoveLibvirtImage, action_root.join('remove_libvirt_image') autoload :IsCreated, action_root.join('is_created') autoload :IsRunning, action_root.join('is_running') autoload :IsSuspended, action_root.join('is_suspended') autoload :MessageAlreadyCreated, action_root.join('message_already_created') autoload :MessageNotCreated, action_root.join('message_not_created') autoload :MessageNotRunning, action_root.join('message_not_running') autoload :MessageNotSuspended, action_root.join('message_not_suspended') autoload :MessageWillNotDestroy, action_root.join('message_will_not_destroy') autoload :RemoveStaleVolume, action_root.join('remove_stale_volume') autoload :PrepareNFSSettings, action_root.join('prepare_nfs_settings') autoload :PrepareNFSValidIds, action_root.join('prepare_nfs_valid_ids') autoload :PruneNFSExports, action_root.join('prune_nfs_exports') autoload :ReadMacAddresses, action_root.join('read_mac_addresses') autoload :ResumeDomain, action_root.join('resume_domain') autoload :SetNameOfDomain, action_root.join('set_name_of_domain') autoload :SetBootOrder, action_root.join('set_boot_order') # I don't think we need it anymore autoload :ShareFolders, action_root.join('share_folders') autoload :StartDomain, action_root.join('start_domain') autoload :SuspendDomain, action_root.join('suspend_domain') autoload :TimedProvision, action_root.join('timed_provision') autoload :WaitTillUp, action_root.join('wait_till_up') autoload :PrepareNFSValidIds, action_root.join('prepare_nfs_valid_ids') autoload :SSHRun, 'vagrant/action/builtin/ssh_run' autoload :HandleBox, 'vagrant/action/builtin/handle_box' autoload :SyncedFolders, 'vagrant/action/builtin/synced_folders' autoload :SyncedFolderCleanup, 'vagrant/action/builtin/synced_folder_cleanup' autoload :ProvisionerCleanup, 'vagrant/action/builtin/provisioner_cleanup' end end end vagrant-libvirt-0.0.45/lib/vagrant-libvirt/config.rb0000644000004100000410000006401613363570025022431 0ustar www-datawww-datarequire 'vagrant' class Numeric Alphabet = ('a'..'z').to_a def vdev s = '' q = self (q, r = (q - 1).divmod(26)) && s.prepend(Alphabet[r]) until q.zero? 'vd' + s end end module VagrantPlugins module ProviderLibvirt class Config < Vagrant.plugin('2', :config) # manually specify URI # will supercede most other options if provided attr_accessor :uri # A hypervisor name to access via Libvirt. attr_accessor :driver # The name of the server, where libvirtd is running. attr_accessor :host # If use ssh tunnel to connect to Libvirt. attr_accessor :connect_via_ssh # Path towards the libvirt socket attr_accessor :socket # The username to access Libvirt. attr_accessor :username # Password for Libvirt connection. attr_accessor :password # ID SSH key file attr_accessor :id_ssh_key_file # Libvirt storage pool name, where box image and instance snapshots will # be stored. attr_accessor :storage_pool_name attr_accessor :storage_pool_path # Turn on to prevent hostname conflicts attr_accessor :random_hostname # Libvirt default network attr_accessor :management_network_device attr_accessor :management_network_name attr_accessor :management_network_address attr_accessor :management_network_mode attr_accessor :management_network_mac attr_accessor :management_network_guest_ipv6 attr_accessor :management_network_autostart attr_accessor :management_network_pci_bus attr_accessor :management_network_pci_slot # System connection information attr_accessor :system_uri # Default host prefix (alternative to use project folder name) attr_accessor :default_prefix # Domain specific settings used while creating new domain. attr_accessor :uuid attr_accessor :memory attr_accessor :memory_backing attr_accessor :channel attr_accessor :cpus attr_accessor :cpu_mode attr_accessor :cpu_model attr_accessor :cpu_fallback attr_accessor :cpu_features attr_accessor :cpu_topology attr_accessor :features attr_accessor :features_hyperv attr_accessor :numa_nodes attr_accessor :loader attr_accessor :nvram attr_accessor :boot_order attr_accessor :machine_type attr_accessor :machine_arch attr_accessor :machine_virtual_size attr_accessor :disk_bus attr_accessor :disk_device attr_accessor :nic_model_type attr_accessor :nested attr_accessor :volume_cache attr_accessor :kernel attr_accessor :cmd_line attr_accessor :initrd attr_accessor :dtb attr_accessor :emulator_path attr_accessor :graphics_type attr_accessor :graphics_autoport attr_accessor :graphics_port attr_accessor :graphics_passwd attr_accessor :graphics_ip attr_accessor :video_type attr_accessor :video_vram attr_accessor :keymap attr_accessor :kvm_hidden attr_accessor :sound_type # Sets the information for connecting to a host TPM device # Only supports socket-based TPMs attr_accessor :tpm_model attr_accessor :tpm_type attr_accessor :tpm_path # Sets the max number of NICs that can be created # Default set to 8. Don't change the default unless you know # what are doing attr_accessor :nic_adapter_count # Storage attr_accessor :disks attr_accessor :cdroms # Inputs attr_accessor :inputs # Channels attr_accessor :channels # PCI device passthrough attr_accessor :pcis # Random number device passthrough attr_accessor :rng # Watchdog device attr_accessor :watchdog_dev # USB controller attr_accessor :usbctl_dev # USB device passthrough attr_accessor :usbs # Redirected devices attr_accessor :redirdevs attr_accessor :redirfilters # smartcard device attr_accessor :smartcard_dev # Suspend mode attr_accessor :suspend_mode # Autostart attr_accessor :autostart # Attach mgmt network attr_accessor :mgmt_attach # Additional qemuargs arguments attr_accessor :qemu_args # Use qemu session instead of system attr_accessor :qemu_use_session def initialize @uri = UNSET_VALUE @driver = UNSET_VALUE @host = UNSET_VALUE @connect_via_ssh = UNSET_VALUE @username = UNSET_VALUE @password = UNSET_VALUE @id_ssh_key_file = UNSET_VALUE @storage_pool_name = UNSET_VALUE @random_hostname = UNSET_VALUE @management_network_device = UNSET_VALUE @management_network_name = UNSET_VALUE @management_network_address = UNSET_VALUE @management_network_mode = UNSET_VALUE @management_network_mac = UNSET_VALUE @management_network_guest_ipv6 = UNSET_VALUE @management_network_autostart = UNSET_VALUE @management_network_pci_slot = UNSET_VALUE @management_network_pci_bus = UNSET_VALUE # System connection information @system_uri = UNSET_VALUE # Domain specific settings. @uuid = UNSET_VALUE @memory = UNSET_VALUE @memory_backing = UNSET_VALUE @cpus = UNSET_VALUE @cpu_mode = UNSET_VALUE @cpu_model = UNSET_VALUE @cpu_fallback = UNSET_VALUE @cpu_features = UNSET_VALUE @cpu_topology = UNSET_VALUE @features = UNSET_VALUE @features_hyperv = UNSET_VALUE @numa_nodes = UNSET_VALUE @loader = UNSET_VALUE @nvram = UNSET_VALUE @machine_type = UNSET_VALUE @machine_arch = UNSET_VALUE @machine_virtual_size = UNSET_VALUE @disk_bus = UNSET_VALUE @disk_device = UNSET_VALUE @nic_model_type = UNSET_VALUE @nested = UNSET_VALUE @volume_cache = UNSET_VALUE @kernel = UNSET_VALUE @initrd = UNSET_VALUE @dtb = UNSET_VALUE @cmd_line = UNSET_VALUE @emulator_path = UNSET_VALUE @graphics_type = UNSET_VALUE @graphics_autoport = UNSET_VALUE @graphics_port = UNSET_VALUE @graphics_ip = UNSET_VALUE @graphics_passwd = UNSET_VALUE @video_type = UNSET_VALUE @video_vram = UNSET_VALUE @sound_type = UNSET_VALUE @keymap = UNSET_VALUE @kvm_hidden = UNSET_VALUE @tpm_model = UNSET_VALUE @tpm_type = UNSET_VALUE @tpm_path = UNSET_VALUE @nic_adapter_count = UNSET_VALUE # Boot order @boot_order = [] # Storage @disks = [] @cdroms = [] # Inputs @inputs = UNSET_VALUE # Channels @channels = UNSET_VALUE # PCI device passthrough @pcis = UNSET_VALUE # Random number device passthrough @rng = UNSET_VALUE # Watchdog device @watchdog_dev = UNSET_VALUE # USB controller @usbctl_dev = UNSET_VALUE # USB device passthrough @usbs = UNSET_VALUE # Redirected devices @redirdevs = UNSET_VALUE @redirfilters = UNSET_VALUE # smartcard device @smartcard_dev = UNSET_VALUE # Suspend mode @suspend_mode = UNSET_VALUE # Autostart @autostart = UNSET_VALUE # Attach mgmt network @mgmt_attach = UNSET_VALUE @qemu_args = [] @qemu_use_session = UNSET_VALUE end def boot(device) @boot_order << device # append end def _get_device(disks) # skip existing devices and also the first one (vda) exist = disks.collect { |x| x[:device] } + [1.vdev.to_s] skip = 1 # we're 1 based, not 0 based... loop do dev = skip.vdev # get lettered device return dev unless exist.include?(dev) skip += 1 end end def _get_cdrom_dev(cdroms) exist = Hash[cdroms.collect { |x| [x[:dev], true] }] # hda - hdc curr = 'a'.ord while curr <= 'd'.ord dev = 'hd' + curr.chr if exist[dev] curr += 1 next else return dev end end # is it better to raise our own error, or let libvirt cause the exception? raise 'Only four cdroms may be attached at a time' end def _generate_numa @numa_nodes.collect { |x| # Perform some validation of cpu values unless x[:cpus] =~ /^\d+-\d+$/ raise 'numa_nodes[:cpus] must be in format "integer-integer"' end # Convert to KiB x[:memory] = x[:memory].to_i * 1024 } # Grab the value of the last @numa_nodes[:cpus] and verify @cpus matches # Note: [:cpus] is zero based and @cpus is not, so we need to +1 last_cpu = @numa_nodes.last[:cpus] last_cpu = last_cpu.scan(/\d+$/)[0] last_cpu = last_cpu.to_i + 1 if @cpus != last_cpu.to_i raise 'The total number of numa_nodes[:cpus] must equal config.cpus' end @numa_nodes end def cpu_feature(options = {}) if options[:name].nil? || options[:policy].nil? raise 'CPU Feature name AND policy must be specified' end @cpu_features = [] if @cpu_features == UNSET_VALUE @cpu_features.push(name: options[:name], policy: options[:policy]) end def hyperv_feature(options = {}) if options[:name].nil? || options[:state].nil? raise 'Feature name AND state must be specified' end @features_hyperv = [{name: options[:name], state: options[:state]}] if @features_hyperv == UNSET_VALUE end def cputopology(options = {}) if options[:sockets].nil? || options[:cores].nil? || options[:threads].nil? raise 'CPU topology must have all of sockets, cores and threads specified' end if @cpu_topology == UNSET_VALUE @cpu_topology = {} end @cpu_topology[:sockets] = options[:sockets] @cpu_topology[:cores] = options[:cores] @cpu_topology[:threads] = options[:threads] end def memorybacking(option, config = {}) case option when :source raise 'Source type must be specified' if config[:type].nil? when :access raise 'Access mode must be specified' if config[:mode].nil? when :allocation raise 'Allocation mode must be specified' if config[:mode].nil? end @memory_backing = [] if @memory_backing == UNSET_VALUE @memory_backing.push(name: option, config: config) end def input(options = {}) if options[:type].nil? || options[:bus].nil? raise 'Input type AND bus must be specified' end @inputs = [] if @inputs == UNSET_VALUE @inputs.push(type: options[:type], bus: options[:bus]) end def channel(options = {}) if options[:type].nil? raise 'Channel type must be specified.' elsif options[:type] == 'unix' && options[:target_type] == 'guestfwd' # Guest forwarding requires a target (ip address) and a port if options[:target_address].nil? || options[:target_port].nil? || options[:source_path].nil? raise 'guestfwd requires target_address, target_port and source_path' end end @channels = [] if @channels == UNSET_VALUE @channels.push(type: options[:type], source_mode: options[:source_mode], source_path: options[:source_path], target_address: options[:target_address], target_name: options[:target_name], target_port: options[:target_port], target_type: options[:target_type]) end def random(options = {}) if !options[:model].nil? && options[:model] != 'random' raise 'The only supported rng backend is "random".' end @rng = {} if @rng == UNSET_VALUE @rng[:model] = options[:model] end def pci(options = {}) if options[:bus].nil? || options[:slot].nil? || options[:function].nil? raise 'Bus AND slot AND function must be specified. Check `lspci` for that numbers.' end @pcis = [] if @pcis == UNSET_VALUE @pcis.push(bus: options[:bus], slot: options[:slot], function: options[:function]) end def watchdog(options = {}) if options[:model].nil? raise 'Model must be specified.' end if @watchdog_dev == UNSET_VALUE @watchdog_dev = {} end @watchdog_dev[:model] = options[:model] @watchdog_dev[:action] = options[:action] || 'reset' end def usb_controller(options = {}) if options[:model].nil? raise 'USB controller model must be specified.' end if @usbctl_dev == UNSET_VALUE @usbctl_dev = {} end @usbctl_dev[:model] = options[:model] @usbctl_dev[:ports] = options[:ports] end def usb(options = {}) if (options[:bus].nil? || options[:device].nil?) && options[:vendor].nil? && options[:product].nil? raise 'Bus and device and/or vendor and/or product must be specified. Check `lsusb` for these.' end @usbs = [] if @usbs == UNSET_VALUE @usbs.push(bus: options[:bus], device: options[:device], vendor: options[:vendor], product: options[:product], startupPolicy: options[:startupPolicy]) end def redirdev(options = {}) raise 'Type must be specified.' if options[:type].nil? @redirdevs = [] if @redirdevs == UNSET_VALUE @redirdevs.push(type: options[:type]) end def redirfilter(options = {}) raise 'Option allow must be specified.' if options[:allow].nil? @redirfilters = [] if @redirfilters == UNSET_VALUE @redirfilters.push(class: options[:class] || -1, vendor: options[:vendor] || -1, product: options[:product] || -1, version: options[:version] || -1, allow: options[:allow]) end def smartcard(options = {}) if options[:mode].nil? raise 'Option mode must be specified.' elsif options[:mode] != 'passthrough' raise 'Currently only passthrough mode is supported!' elsif options[:type] == 'tcp' && (options[:source_mode].nil? || options[:source_host].nil? || options[:source_service].nil?) raise 'If using type "tcp", option "source_mode", "source_host" and "source_service" must be specified.' end if @smartcard_dev == UNSET_VALUE @smartcard_dev = {} end @smartcard_dev[:mode] = options[:mode] @smartcard_dev[:type] = options[:type] || 'spicevmc' @smartcard_dev[:source_mode] = options[:source_mode] if @smartcard_dev[:type] == 'tcp' @smartcard_dev[:source_host] = options[:source_host] if @smartcard_dev[:type] == 'tcp' @smartcard_dev[:source_service] = options[:source_service] if @smartcard_dev[:type] == 'tcp' end # NOTE: this will run twice for each time it's needed- keep it idempotent def storage(storage_type, options = {}) if storage_type == :file if options[:device] == :cdrom _handle_cdrom_storage(options) else _handle_disk_storage(options) end end end def _handle_cdrom_storage(options = {}) # # # # #
# # # note the target dev will need to be changed with each cdrom drive (hdc, hdd, etc), # as will the address unit number (unit=0, unit=1, etc) options = { bus: 'ide', path: nil }.merge(options) cdrom = { dev: options[:dev], bus: options[:bus], path: options[:path] } @cdroms << cdrom end def _handle_disk_storage(options = {}) options = { type: 'qcow2', size: '10G', # matches the fog default path: nil, bus: 'virtio' }.merge(options) disk = { device: options[:device], type: options[:type], size: options[:size], path: options[:path], bus: options[:bus], cache: options[:cache] || 'default', allow_existing: options[:allow_existing], shareable: options[:shareable], serial: options[:serial] } @disks << disk # append end def qemuargs(options = {}) @qemu_args << options if options[:value] end # code to generate URI from a config moved out of the connect action def _generate_uri # builds the libvirt connection URI from the given driver config # Setup connection uri. uri = @driver.dup virt_path = case uri when 'qemu', 'kvm' @qemu_use_session ? '/session' : '/system' when 'openvz', 'uml', 'phyp', 'parallels' '/system' when '@en', 'esx' '/' when 'vbox', 'vmwarews', 'hyperv' '/session' else raise "Require specify driver #{uri}" end if uri == 'kvm' uri = 'qemu' # use qemu uri for kvm domain type end if @connect_via_ssh uri << '+ssh://' uri << @username + '@' if @username uri << if @host @host else 'localhost' end else uri << '://' uri << @host if @host end uri << virt_path uri << '?no_verify=1' if @id_ssh_key_file # set ssh key for access to libvirt host uri << "\&keyfile=" # if no slash, prepend $HOME/.ssh/ @id_ssh_key_file.prepend("#{`echo ${HOME}`.chomp}/.ssh/") if @id_ssh_key_file !~ /\A\// uri << @id_ssh_key_file end # set path to libvirt socket uri << "\&socket=" + @socket if @socket uri end def finalize! @driver = 'kvm' if @driver == UNSET_VALUE @host = nil if @host == UNSET_VALUE @connect_via_ssh = false if @connect_via_ssh == UNSET_VALUE @username = nil if @username == UNSET_VALUE @password = nil if @password == UNSET_VALUE @id_ssh_key_file = 'id_rsa' if @id_ssh_key_file == UNSET_VALUE @storage_pool_name = 'default' if @storage_pool_name == UNSET_VALUE @storage_pool_path = nil if @storage_pool_path == UNSET_VALUE @random_hostname = false if @random_hostname == UNSET_VALUE @management_network_device = 'virbr0' if @management_network_device == UNSET_VALUE @management_network_name = 'vagrant-libvirt' if @management_network_name == UNSET_VALUE @management_network_address = '192.168.121.0/24' if @management_network_address == UNSET_VALUE @management_network_mode = 'nat' if @management_network_mode == UNSET_VALUE @management_network_mac = nil if @management_network_mac == UNSET_VALUE @management_network_guest_ipv6 = 'yes' if @management_network_guest_ipv6 == UNSET_VALUE @management_network_autostart = false if @management_network_autostart == UNSET_VALUE @management_network_pci_bus = nil if @management_network_pci_bus == UNSET_VALUE @management_network_pci_slot = nil if @management_network_pci_slot == UNSET_VALUE @system_uri = 'qemu:///system' if @system_uri == UNSET_VALUE @qemu_use_session = false if @qemu_use_session == UNSET_VALUE # generate a URI if none is supplied @uri = _generate_uri if @uri == UNSET_VALUE # Domain specific settings. @uuid = '' if @uuid == UNSET_VALUE @memory = 512 if @memory == UNSET_VALUE @memory_backing = [] if @memory_backing == UNSET_VALUE @cpus = 1 if @cpus == UNSET_VALUE @cpu_mode = 'host-model' if @cpu_mode == UNSET_VALUE @cpu_model = if (@cpu_model == UNSET_VALUE) && (@cpu_mode == 'custom') 'qemu64' elsif @cpu_mode != 'custom' '' else @cpu_model end @cpu_topology = {} if @cpu_topology == UNSET_VALUE @cpu_fallback = 'allow' if @cpu_fallback == UNSET_VALUE @cpu_features = [] if @cpu_features == UNSET_VALUE @features = ['acpi','apic','pae'] if @features == UNSET_VALUE @features_hyperv = [] if @features_hyperv == UNSET_VALUE @numa_nodes = @numa_nodes == UNSET_VALUE ? nil : _generate_numa @loader = nil if @loader == UNSET_VALUE @nvram = nil if @nvram == UNSET_VALUE @machine_type = nil if @machine_type == UNSET_VALUE @machine_arch = nil if @machine_arch == UNSET_VALUE @machine_virtual_size = nil if @machine_virtual_size == UNSET_VALUE @disk_bus = 'virtio' if @disk_bus == UNSET_VALUE @disk_device = 'vda' if @disk_device == UNSET_VALUE @nic_model_type = nil if @nic_model_type == UNSET_VALUE @nested = false if @nested == UNSET_VALUE @volume_cache = 'default' if @volume_cache == UNSET_VALUE @kernel = nil if @kernel == UNSET_VALUE @cmd_line = '' if @cmd_line == UNSET_VALUE @initrd = '' if @initrd == UNSET_VALUE @dtb = nil if @dtb == UNSET_VALUE @graphics_type = 'vnc' if @graphics_type == UNSET_VALUE @graphics_autoport = 'yes' if @graphics_port == UNSET_VALUE @graphics_autoport = 'no' if @graphics_port != UNSET_VALUE if (@graphics_type != 'vnc' && @graphics_type != 'spice') || @graphics_passwd == UNSET_VALUE @graphics_passwd = nil end @graphics_port = -1 if @graphics_port == UNSET_VALUE @graphics_ip = '127.0.0.1' if @graphics_ip == UNSET_VALUE @video_type = 'cirrus' if @video_type == UNSET_VALUE @video_vram = 9216 if @video_vram == UNSET_VALUE @sound_type = nil if @sound_type == UNSET_VALUE @keymap = 'en-us' if @keymap == UNSET_VALUE @kvm_hidden = false if @kvm_hidden == UNSET_VALUE @tpm_model = 'tpm-tis' if @tpm_model == UNSET_VALUE @tpm_type = 'passthrough' if @tpm_type == UNSET_VALUE @tpm_path = nil if @tpm_path == UNSET_VALUE @nic_adapter_count = 8 if @nic_adapter_count == UNSET_VALUE @emulator_path = nil if @emulator_path == UNSET_VALUE # Boot order @boot_order = [] if @boot_order == UNSET_VALUE # Storage @disks = [] if @disks == UNSET_VALUE @disks.map! do |disk| disk[:device] = _get_device(@disks) if disk[:device].nil? disk end @cdroms = [] if @cdroms == UNSET_VALUE @cdroms.map! do |cdrom| cdrom[:dev] = _get_cdrom_dev(@cdroms) if cdrom[:dev].nil? cdrom end # Inputs @inputs = [{ type: 'mouse', bus: 'ps2' }] if @inputs == UNSET_VALUE # Channels @channels = [] if @channels == UNSET_VALUE # PCI device passthrough @pcis = [] if @pcis == UNSET_VALUE # Random number generator passthrough @rng = {} if @rng == UNSET_VALUE # Watchdog device @watchdog_dev = {} if @watchdog_dev == UNSET_VALUE # USB controller @usbctl_dev = {} if @usbctl_dev == UNSET_VALUE # USB device passthrough @usbs = [] if @usbs == UNSET_VALUE # Redirected devices @redirdevs = [] if @redirdevs == UNSET_VALUE @redirfilters = [] if @redirfilters == UNSET_VALUE # smartcard device @smartcard_dev = {} if @smartcard_dev == UNSET_VALUE # Suspend mode @suspend_mode = 'pause' if @suspend_mode == UNSET_VALUE # Autostart @autostart = false if @autostart == UNSET_VALUE # Attach mgmt network @mgmt_attach = true if @mgmt_attach == UNSET_VALUE @qemu_args = [] if @qemu_args == UNSET_VALUE end def validate(machine) errors = _detected_errors machine.provider_config.disks.each do |disk| if disk[:path] && (disk[:path][0] == '/') errors << "absolute volume paths like '#{disk[:path]}' not yet supported" end end machine.config.vm.networks.each do |_type, opts| if opts[:mac] opts[:mac].downcase! if opts[:mac] =~ /\A([0-9a-f]{12})\z/ opts[:mac] = opts[:mac].scan(/../).join(':') end unless opts[:mac] =~ /\A([0-9a-f]{2}:){5}([0-9a-f]{2})\z/ errors << "Configured NIC MAC '#{opts[:mac]}' is not in 'xx:xx:xx:xx:xx:xx' or 'xxxxxxxxxxxx' format" end end end { 'Libvirt Provider' => errors } end def merge(other) super.tap do |result| c = disks.dup c += other.disks result.disks = c c = cdroms.dup c += other.cdroms result.cdroms = c end end end end end vagrant-libvirt-0.0.45/tools/0000755000004100000410000000000013363570025016107 5ustar www-datawww-datavagrant-libvirt-0.0.45/tools/prepare_redhat_for_box.sh0000755000004100000410000000771413363570025023162 0ustar www-datawww-data#!/bin/bash +x # This script should help to prepare RedHat and RedHat like OS (CentOS, # Scientific Linux, ...) for Vagrant box usage. # To create new box image, just install minimal base system in VM on top of not # fully allocated qcow2 image. Then upload this script to the VM and run it. # After script has finished, nothing else than halting machine should be done. # For more info about creating custom box refer to # https://github.com/vagrant-libvirt/vagrant-libvirt/tree/master/example_box # We need to set a hostname. if [ $# -ne 1 ]; then echo "Usage: $0 " echo "Hostname should be in format vagrant-[os-name], e.g. vagrant-redhat63." exit 1 fi # On which version of RedHet are we running? RHEL_MAJOR_VERSION=$(sed 's/.*release \([0-9]\)\..*/\1/' /etc/redhat-release) if [ $? -ne 0 ]; then echo "Is this a RedHat distro?" exit 1 fi echo "* Found RedHat ${RHEL_MAJOR_VERSION} version." # Setup hostname vagrant-something. FQDN="$1.vagrantup.com" if grep '^HOSTNAME=' /etc/sysconfig/network > /dev/null; then sed -i 's/HOSTNAME=\(.*\)/HOSTNAME='${FQDN}'/' /etc/sysconfig/network else echo "HOSTNAME=${FQDN}" >> /etc/sysconfig/network fi # Enable EPEL repository. yum -y install wget cd ~root if [ $RHEL_MAJOR_VERSION -eq 5 ]; then wget http://ftp.astral.ro/mirrors/fedora/pub/epel/5/i386/epel-release-5-4.noarch.rpm EPEL_PKG="epel-release-5-4.noarch.rpm" else wget http://ftp.astral.ro/mirrors/fedora/pub/epel/6/i386/epel-release-6-8.noarch.rpm EPEL_PKG="epel-release-6-8.noarch.rpm" fi rpm -i ~root/${EPEL_PKG} rm -f ~root/${EPEL_PKG} # Install some required software. yum -y install openssh-server openssh-clients sudo \ ruby ruby-devel make gcc rubygems rsync chkconfig sshd on # Users, groups, passwords and sudoers. echo 'vagrant' | passwd --stdin root grep 'vagrant' /etc/passwd > /dev/null if [ $? -ne 0 ]; then echo '* Creating user vagrant.' useradd vagrant echo 'vagrant' | passwd --stdin vagrant fi grep '^admin:' /etc/group > /dev/null || groupadd admin usermod -G admin vagrant echo 'Defaults env_keep += "SSH_AUTH_SOCK"' >> /etc/sudoers echo '%admin ALL=NOPASSWD: ALL' >> /etc/sudoers sed -i 's/Defaults\s*requiretty/Defaults !requiretty/' /etc/sudoers # SSH setup # Add Vagrant ssh key for root and vagrant accouts. sed -i 's/.*UseDNS.*/UseDNS no/' /etc/ssh/sshd_config [ -d ~root/.ssh ] || mkdir ~root/.ssh chmod 700 ~root/.ssh cat > ~root/.ssh/authorized_keys << EOF ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key EOF chmod 600 ~root/.ssh/authorized_keys [ -d ~vagrant/.ssh ] || mkdir ~vagrant/.ssh chmod 700 ~vagrant/.ssh cat > ~vagrant/.ssh/authorized_keys << EOF ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key EOF chmod 600 ~vagrant/.ssh/authorized_keys # Disable firewall and switch SELinux to permissive mode. chkconfig iptables off chkconfig ip6tables off # Networking setup.. # Don't fix ethX names to hw address. rm -f /etc/udev/rules.d/*persistent-net.rules rm -f /etc/udev/rules.d/*-net.rules rm -fr /var/lib/dhclient/* # Interface eth0 should get IP address via dhcp. cat > /etc/sysconfig/network-scripts/ifcfg-eth0 << EOF DEVICE="eth0" BOOTPROTO="dhcp" ONBOOT="yes" NM_CONTROLLED="no" EOF # Do some cleanup.. rm -f ~root/.bash_history rm -r "$(gem env gemdir)"/doc/* yum clean all halt vagrant-libvirt-0.0.45/tools/create_box.sh0000755000004100000410000000552313363570025020566 0ustar www-datawww-data#!/usr/bin/env bash #set -xu error() { local msg="${1}" echo "==> ERROR: ${msg}" exit 1 } usage() { echo "Usage: ${0} IMAGE [BOX] [Vagrantfile.add]" echo echo "Package a qcow2 image into a vagrant-libvirt reusable box" } # Print the image's backing file backing(){ local img=${1} qemu-img info "$img" | grep 'backing file:' | cut -d ':' -f2 } # Rebase the image rebase(){ local img=${1} qemu-img rebase -p -b "" "$img" [[ "$?" -ne 0 ]] && error "Error during rebase" } # Is absolute path isabspath(){ local path=${1} [[ "$path" =~ ^/.* ]] } if [ -z "$1" ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then usage exit 1 fi IMG=$(readlink -e "$1") [[ "$?" -ne 0 ]] && error "'$1': No such image" IMG_DIR=$(dirname "$IMG") IMG_BASENAME=$(basename "$IMG") BOX=${2:-} # If no box name is supplied infer one from image name if [[ -z "$BOX" ]]; then BOX_NAME=${IMG_BASENAME%.*} BOX=$BOX_NAME.box else BOX_NAME=$(basename "${BOX%.*}") fi [[ -f "$BOX" ]] && error "'$BOX': Already exists" CWD=$(pwd) TMP_DIR="$CWD/_tmp_package" TMP_IMG="$TMP_DIR/box.img" mkdir -p "$TMP_DIR" [[ ! -r "$IMG" ]] && error "'$IMG': Permission denied" if [ -n "$3" ] && [ -r "$3" ]; then VAGRANTFILE_ADD="$(cat $3)" fi # We move / copy (when the image has master) the image to the tempdir # ensure that it's moved back / removed again if [[ -n $(backing "$IMG") ]]; then echo "==> Image has backing image, copying image and rebasing ..." trap "rm -rf $TMP_DIR" EXIT cp "$IMG" "$TMP_IMG" rebase "$TMP_IMG" else if fuser -s "$IMG"; then error "Image '$IMG_BASENAME' is used by another process" fi # move the image to get a speed-up and use less space on disk trap 'mv "$TMP_IMG" "$IMG"; rm -rf "$TMP_DIR"' EXIT mv "$IMG" "$TMP_IMG" fi cd "$TMP_DIR" #Using the awk int function here to truncate the virtual image size to an #integer since the fog-libvirt library does not seem to properly handle #floating point. IMG_SIZE=$(qemu-img info --output=json "$TMP_IMG" | awk '/virtual-size/{s=int($2)/(1024^3); print (s == int(s)) ? s : int(s)+1 }') echo "{$IMG_SIZE}" cat > metadata.json < Vagrantfile < Creating box, tarring and gzipping" tar cvzf "$BOX" -S --totals ./metadata.json ./Vagrantfile ./box.img # if box is in tmpdir move it to CWD before removing tmpdir if ! isabspath "$BOX"; then mv "$BOX" "$CWD" fi echo "==> ${BOX} created" echo "==> You can now add the box:" echo "==> 'vagrant box add ${BOX} --name ${BOX_NAME}'" vagrant-libvirt-0.0.45/Gemfile0000644000004100000410000000123313363570025016241 0ustar www-datawww-datasource 'https://rubygems.org' # Specify your gem's dependencies in vagrant-libvirt.gemspec gemspec group :development do # We depend on Vagrant for development, but we don't add it as a # gem dependency because we expect to be installed within the # Vagrant environment itself using `vagrant plugin`. if ENV['VAGRANT_VERSION'] gem 'vagrant', :git => 'https://github.com/hashicorp/vagrant.git', tag: ENV['VAGRANT_VERSION'] else gem 'vagrant', :git => 'https://github.com/hashicorp/vagrant.git' end gem 'vagrant-spec', :github => 'hashicorp/vagrant-spec' gem 'pry' end group :plugins do gemspec end gem 'coveralls', require: false vagrant-libvirt-0.0.45/locales/0000755000004100000410000000000013363570025016371 5ustar www-datawww-datavagrant-libvirt-0.0.45/locales/en.yml0000644000004100000410000001535613363570025017530 0ustar www-datawww-dataen: vagrant_libvirt: already_created: |- The domain is already created. not_created: |- Domain is not created. Please run `vagrant up` first. not_running: |- Domain is not running. Please run `vagrant up` or `vagrant resume` first. not_suspended: |- Domain is not suspended. finding_volume: |- Checking if volume is available. creating_domain: |- Creating domain with the following settings... manual_resize_required: |- Created volume larger than box defaults, will require manual resizing of filesystems to utilize. uploading_volume: |- Uploading base box image as volume into libvirt storage... creating_domain_volume: |- Creating image (snapshot of base box volume). removing_domain_volume: |- Removing image (snapshot of base box volume). starting_domain: |- Starting domain. terminating: |- Removing domain... poweroff_domain: |- Poweroff domain. destroy_domain: |- Removing domain... halt_domain: |- Halting domain... resuming_domain: |- Resuming domain... suspending_domain: |- Suspending domain... package_domain: |- Packaging domain... waiting_for_ready: |- Waiting for domain to become "ready"... waiting_for_ip: |- Waiting for domain to get an IP address... waiting_for_ssh: |- Waiting for SSH to become available... booted: |- Machine is booted. rsync_folder: |- Rsyncing folder: %{hostpath} => %{guestpath} ready: |- Machine is booted and ready for use! remove_stale_volume: |- Remove stale volume... warnings: ignoring_virtual_size_too_small: |- Ignoring requested virtual disk size of '%{requested}' as it is below the minimum box image size of '%{minimum}'. forwarding_udp: |- Forwarding UDP ports is not supported. Ignoring. errors: package_not_supported: No support for package with libvirt. Create box manually. fog_error: |- There was an error talking to Libvirt. The error message is shown below: %{message} no_matching_volume: |- No matching volume was found! Please check your volume setting to make sure you have a valid volume chosen. no_storage_pool: |- No usable storage pool found! Please check if storage pool is created and available. no_box_volume: |- Volume for box image is missing in storage pools. Try to run vagrant again, or check if storage volume is accessible. domain_volume_exists: |- Volume for domain is already created. Please run 'vagrant destroy' first. no_domain_volume: |- Volume for domain is missing. Try to run 'vagrant up' again. interface_slot_not_available: |- Interface adapter number is already in use. Please specify other adapter number. interface_slot_exhausted: |- Available interface adapters have been exhausted. Please increase the nic_adapter_count. rsync_error: |- There was an error when attempting to rsync a share folder. Please inspect the error message below for more info. Host path: %{hostpath} Guest path: %{guestpath} Error: %{stderr} no_box_virtual_size: |- No image virtual size specified for box. no_box_format: |- No image format specified for box. wrong_box_format: |- Wrong image format specified for box. fog_libvirt_connection_error: |- Error while connecting to libvirt: %{error_message} fog_create_volume_error: |- Error while creating a storage pool volume: %{error_message} fog_create_domain_volume_error: |- Error while creating volume for domain: %{error_message} fog_create_server_error: |- Error while creating domain: %{error_message} domain_name_exists: |- Name `%{domain_name}` of domain about to create is already taken. Please try to run `vagrant up` command again. creating_storage_pool_error: |- There was error while creating libvirt storage pool: %{error_message} creating_volume_error: |- There was error while creating libvirt volume: %{error_message} image_upload_error: |- Error while uploading image to storage pool: %{error_message} no_domain_error: |- No domain found. %{error_message} attach_device_error: |- Error while attaching new device to domain. %{error_message} detach_device_error: |- Error while detaching device from domain. %{error_message} no_ip_address_error: |- No IP address found. management_network_error: |- Error in specification of management network: %{error_message}. network_name_and_address_mismatch: |- Address %{ip_address} does not match with network name %{network_name}. Please fix your configuration and run vagrant again. dhcp_mismatch: |- Network %{network_name} exists but does not have dhcp %{requested}. Please fix your configuration and run vagrant again. create_network_error: |- Error occurred while creating new network: %{error_message}. network_not_available_error: |- Network %{network_name} is not available. Specify available network name, or an ip address if you want to create a new network. activate_network_error: |- Error while activating network: %{error_message}. autostart_network_error: |- Error while setting up autostart on network: %{error_message}. destroy_network_error: |- Error while removing network %{network_name}. %{error_message}. delete_snapshot_error: |- Error while deleting snapshot: %{error_message}. tunnel_port_not_defined: |- tunnel UDP or TCP port not defined. management_network_required: |- Management network can't be disabled when VM use box. Please fix your configuration and run vagrant again. states: paused: |- The Libvirt domain is suspended. Run `vagrant resume` to resume it. shutting_down: |- The Libvirt domain is shutting down. Wait for it to complete and then run `vagrant up` to start it or `vagrant destroy` to remove. shutoff: |- The Libvirt domain is not running. Run `vagrant up` to start it. not_created: |- The Libvirt domain is not created. Run `vagrant up` to create it. running: |- The Libvirt domain is running. To stop this machine, you can run `vagrant halt`. To destroy the machine, you can run `vagrant destroy`. preparing: |- The vagrant machine is being prepared for creation, please wait for it to reach a steady state before issuing commands on it. vagrant-libvirt-0.0.45/.coveralls.yml0000644000004100000410000000003013363570025017533 0ustar www-datawww-dataservice_name: travis-ci vagrant-libvirt-0.0.45/.github/0000755000004100000410000000000013363570025016307 5ustar www-datawww-datavagrant-libvirt-0.0.45/.github/issue_template.md0000644000004100000410000000227313363570025021660 0ustar www-datawww-data ### Steps to reproduce 1. 2. 3. ### Expected behaviour Tell us what should happen ### Actual behaviour Tell us what happens instead ### System configuration **OS/Distro version:**: **Libvirt version:** **Output of `vagrant version; vagrant plugin list`:** **Output of `VAGRANT_LOG=debug vagrant ... --provider=libvirt`** ``` Insert debug output inside quotes here (replace ... with whatever command you use to trigger the issue) ``` **A Vagrantfile to reproduce the issue:** ``` Insert Vagrantfile inside quotes here (remove sensitive data if needed) ``` **Are you using upstream vagrant package or your distros package?** Upstream / Distro vagrant-libvirt-0.0.45/example_box/0000755000004100000410000000000013363570025017252 5ustar www-datawww-datavagrant-libvirt-0.0.45/example_box/README.md0000644000004100000410000000204413363570025020531 0ustar www-datawww-data# Vagrant Libvirt Example Box Vagrant providers each require a custom provider-specific box format. This folder shows the example contents of a box for the `libvirt` provider. To turn this into a box create a vagrant image according documentation (don't forget to install rsync command) and create box with following command: ``` $ tar cvzf custom_box.box ./metadata.json ./Vagrantfile ./box.img ``` This box works by using Vagrant's built-in Vagrantfile merging to setup defaults for Libvirt. These defaults can easily be overwritten by higher-level Vagrantfiles (such as project root Vagrantfiles). ## Box Metadata Libvirt box should define at least three data fields in `metadata.json` file. * provider - Provider name is libvirt. * format - Currently supported format is qcow2. * virtual_size - Virtual size of image in GBytes. ## Converting Boxes Instead of creating a box from scratch, you can use [vagrant-mutate](https://github.com/sciurus/vagrant-mutate) to take boxes created for other Vagrant providers and use them with vagrant-libvirt. vagrant-libvirt-0.0.45/example_box/metadata.json0000644000004100000410000000012513363570025021723 0ustar www-datawww-data{ "provider" : "libvirt", "format" : "qcow2", "virtual_size" : 16 } vagrant-libvirt-0.0.45/example_box/Vagrantfile0000644000004100000410000000335413363570025021444 0ustar www-datawww-data# -*- mode: ruby -*- # vi: set ft=ruby : Vagrant.configure("2") do |config| # Example configuration of new VM.. # #config.vm.define :test_vm do |test_vm| # Box name # #test_vm.vm.box = "centos64" # Domain Specific Options # # See README for more info. # #test_vm.vm.provider :libvirt do |domain| # domain.memory = 2048 # domain.cpus = 2 #end # Interfaces for VM # # Networking features in the form of `config.vm.network` # #test_vm.vm.network :private_network, :ip => '10.20.30.40' #test_vm.vm.network :public_network, :ip => '10.20.30.41' #end # Options for libvirt vagrant provider. config.vm.provider :libvirt do |libvirt| # A hypervisor name to access. Different drivers can be specified, but # this version of provider creates KVM machines only. Some examples of # drivers are kvm (qemu hardware accelerated), qemu (qemu emulated), # xen (Xen hypervisor), lxc (Linux Containers), # esx (VMware ESX), vmwarews (VMware Workstation) and more. Refer to # documentation for available drivers (http://libvirt.org/drivers.html). libvirt.driver = "kvm" # The name of the server, where libvirtd is running. # libvirt.host = "localhost" # If use ssh tunnel to connect to Libvirt. libvirt.connect_via_ssh = false # The username and password to access Libvirt. Password is not used when # connecting via ssh. libvirt.username = "root" #libvirt.password = "secret" # Libvirt storage pool name, where box image and instance snapshots will # be stored. libvirt.storage_pool_name = "default" # Set a prefix for the machines that's different than the project dir name. #libvirt.default_prefix = '' end end