pax_global_header00006660000000000000000000000064141423252650014516gustar00rootroot0000000000000052 comment=bb267e391049e22d872e5f700b5cd0aa2a4cdac7 vagrant-libvirt-0.7.0/000077500000000000000000000000001414232526500146355ustar00rootroot00000000000000vagrant-libvirt-0.7.0/.coveralls.yml000066400000000000000000000000351414232526500174260ustar00rootroot00000000000000service_name: github-actions vagrant-libvirt-0.7.0/.dockerignore000066400000000000000000000000211414232526500173020ustar00rootroot00000000000000Gemfile.Lock pkg vagrant-libvirt-0.7.0/.gitattributes000066400000000000000000000000541414232526500175270ustar00rootroot00000000000000lib/vagrant-libvirt/version.rb export-subst vagrant-libvirt-0.7.0/.github/000077500000000000000000000000001414232526500161755ustar00rootroot00000000000000vagrant-libvirt-0.7.0/.github/ISSUE_TEMPLATE/000077500000000000000000000000001414232526500203605ustar00rootroot00000000000000vagrant-libvirt-0.7.0/.github/ISSUE_TEMPLATE/bug_report.md000066400000000000000000000020251414232526500230510ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: '' assignees: '' --- **Describe the bug** A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior: 1. Go to '...' 2. Click on '....' 3. Scroll down to '....' 4. See error **Expected behavior** A clear and concise description of what you expected to happen. **Screenshots** If applicable, add screenshots to help explain your problem. **Versions (please complete the following information):**: - Libvirt version: - Vagrant version [output of `vagrant version`]: - Vagrant flavour [Upstream or Distro]: - Vagrant plugins versions (including vagrant-libvirt) [output of `vagrant plugin list`]: **Debug Log** Attach Output of `VAGRANT_LOG=debug vagrant ... --provider=libvirt >vagrant.log 2>&1` ``` Delete this text and drag and drop the log file for github to attach and add a link here ``` **A Vagrantfile to reproduce the issue:** ``` Insert Vagrantfile inside quotes here (remove sensitive data if needed) ``` vagrant-libvirt-0.7.0/.github/ISSUE_TEMPLATE/feature_request.md000066400000000000000000000011231414232526500241020ustar00rootroot00000000000000--- name: Feature request about: Suggest an idea for this project title: '' labels: '' assignees: '' --- **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] **Describe the solution you'd like** A clear and concise description of what you want to happen. **Describe alternatives you've considered** A clear and concise description of any alternative solutions or features you've considered. **Additional context** Add any other context or screenshots about the feature request here. vagrant-libvirt-0.7.0/.github/no-response.yml000066400000000000000000000012621414232526500211710ustar00rootroot00000000000000# Configuration for probot-no-response - https://github.com/probot/no-response # Number of days of inactivity before an Issue is closed for lack of response daysUntilClose: 30 # Label requiring a response responseRequiredLabel: needinfo # Comment to post when closing an Issue for lack of response. Set to `false` to disable closeComment: > This issue has been automatically closed because there has been no response to our request for more information from the original author. With only the information that is currently in the issue, we don't have enough information to take action. Please reach out if you have or find the answers we need so that we can investigate further. vagrant-libvirt-0.7.0/.github/workflows/000077500000000000000000000000001414232526500202325ustar00rootroot00000000000000vagrant-libvirt-0.7.0/.github/workflows/docker-image.yml000066400000000000000000000120001414232526500232750ustar00rootroot00000000000000name: docker-image on: push: branches: - master tags: - '*.*.*' pull_request: jobs: build-docker-image: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v2 with: fetch-depth: 0 - name: Prepare id: prep run: | DOCKER_IMAGE=${DOCKERHUB_ORGANIZATION:-vagrantlibvirt}/vagrant-libvirt VERSION=noop if [ "${{ github.event_name }}" = "schedule" ]; then VERSION=nightly elif [[ $GITHUB_REF == refs/tags/* ]]; then VERSION=${GITHUB_REF#refs/tags/} elif [[ $GITHUB_REF == refs/heads/* ]]; then VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then VERSION=edge fi elif [[ $GITHUB_REF == refs/pull/* ]]; then VERSION=pr-${{ github.event.number }} fi TAGS="${DOCKER_IMAGE}:${VERSION}" if [[ $VERSION =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then MINOR=${VERSION%.*} MAJOR=v${MINOR%.*} TAGS="$TAGS,${DOCKER_IMAGE}:${MINOR},${DOCKER_IMAGE}:${MAJOR},${DOCKER_IMAGE}:latest" elif [ "${{ github.event_name }}" = "push" ]; then TAGS="$TAGS,${DOCKER_IMAGE}:sha-${GITHUB_SHA::8}" fi echo ::set-output name=version::${VERSION} echo ::set-output name=tags::${TAGS} echo ::set-output name=slim_tags::$(echo $TAGS | sed "s/,/-slim,/g")-slim echo ::set-output name=created::$(date -u +'%Y-%m-%dT%H:%M:%SZ') env: DOCKERHUB_ORGANIZATION: ${{ secrets.DOCKERHUB_ORGANIZATION }} - name: Set up QEMU uses: docker/setup-qemu-action@v1 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 with: driver-opts: image=moby/buildkit:master - name: Cache Docker layers uses: actions/cache@v2 with: path: /tmp/.buildx-cache key: ${{ runner.os }}-buildx-${{ github.sha }} restore-keys: | ${{ runner.os }}-buildx- - name: Check have credentials id: have_credentials run: | echo ::set-output name=access::${{ secrets.DOCKERHUB_USERNAME != '' && github.event_name != 'pull_request' }} - name: Login to DockerHub if: steps.have_credentials.outputs.access == 'true' uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build and push main image id: docker_build uses: docker/build-push-action@v2 with: context: . platforms: linux/amd64 push: ${{ steps.have_credentials.outputs.access }} tags: ${{ steps.prep.outputs.tags }} target: final labels: | org.opencontainers.image.title=${{ github.event.repository.name }} org.opencontainers.image.description=${{ github.event.repository.description }} org.opencontainers.image.url=${{ github.event.repository.html_url }} org.opencontainers.image.source=${{ github.event.repository.clone_url }} org.opencontainers.image.version=${{ steps.prep.outputs.version }} org.opencontainers.image.created=${{ steps.prep.outputs.created }} org.opencontainers.image.revision=${{ github.sha }} org.opencontainers.image.licenses=${{ github.event.repository.license.spdx_id }} cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache,mode=max - name: Build and push slim image id: docker_build_slim uses: docker/build-push-action@v2 with: context: . platforms: linux/amd64 push: ${{ steps.have_credentials.outputs.access }} tags: ${{ steps.prep.outputs.slim_tags }} target: slim labels: | org.opencontainers.image.title=${{ github.event.repository.name }} org.opencontainers.image.description=${{ github.event.repository.description }} org.opencontainers.image.url=${{ github.event.repository.html_url }} org.opencontainers.image.source=${{ github.event.repository.clone_url }} org.opencontainers.image.version=${{ steps.prep.outputs.version }} org.opencontainers.image.created=${{ steps.prep.outputs.created }} org.opencontainers.image.revision=${{ github.sha }} org.opencontainers.image.licenses=${{ github.event.repository.license.spdx_id }} cache-from: type=local,src=/tmp/.buildx-cache cache-to: type=local,dest=/tmp/.buildx-cache,mode=max - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} - name: Image digest Slim run: echo ${{ steps.docker_build_slim.outputs.digest }} vagrant-libvirt-0.7.0/.github/workflows/integration-tests.yml000066400000000000000000000056341414232526500244500ustar00rootroot00000000000000name: Integration Tests on: push: branches: - master pull_request: jobs: generate-matrix: runs-on: ubuntu-latest outputs: matrix: ${{ steps.generate-matrix.outputs.matrix }} steps: - uses: actions/checkout@v2 - name: Generate matrix id: generate-matrix run: | tests="$(awk -f tests/parse_tests.awk < tests/runtests.bats)" echo "::set-output name=matrix::${tests}" run-tests: needs: generate-matrix runs-on: ubuntu-latest strategy: fail-fast: false matrix: test_name: ${{ fromJSON(needs.generate-matrix.outputs.matrix) }} env: NOKOGIRI_USE_SYSTEM_LIBRARIES: true VAGRANT_VERSION: v2.2.14 steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - name: Set up libvirt run: | sudo apt-get update sudo apt-get install -y \ bridge-utils \ dnsmasq-base \ ebtables \ libarchive-tools \ libguestfs-tools \ libvirt-clients \ libvirt-daemon \ libvirt-daemon-system \ qemu-kvm \ qemu-utils \ ; sudo apt-get install \ libvirt-dev \ libz-dev \ ; sudo apt-get install \ bats \ fping \ ; # start daemon sudo systemctl start libvirtd # add user to group sudo usermod -a -G libvirt $USER - uses: actions/cache@v2 with: path: vendor/bundle key: ${{ runner.os }}-gems-${{ hashFiles('**/Gemfile.lock') }} restore-keys: | ${{ runner.os }}-gems- - uses: actions/cache@v2 with: path: ~/.vagrant.d/boxes key: ${{ runner.os }}-${{ env.VAGRANT_VERSION }} restore-keys: | ${{ runner.os }}- - name: Set up Ruby uses: ruby/setup-ruby@v1 with: ruby-version: 2.6.6 - name: Set up rubygems run: | gem update --system --conservative || (gem i "rubygems-update:~>2.7" --no-document && update_rubygems) gem update bundler --conservative - name: Run bundler using cached path run: | bundle config path vendor/bundle bundle install --jobs 4 --retry 3 - name: Install binstubs run: | bundle binstubs --all --path=./bin 1>/dev/null ./bin/vagrant --version - name: Run tests run: | mkdir -p $HOME/.vagrant.d/ # use export with full path to avoid needing to resolve ~ export VAGRANT_HOME=$HOME/.vagrant.d # use software emulation due to lack of nested emulation cat < $HOME/.vagrant.d/Vagrantfile Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.driver = "qemu" end end EOF # run under libvirt group sg libvirt -c "bats -f '${{ matrix.test_name }}' ./tests/runtests.bats" vagrant-libvirt-0.7.0/.github/workflows/release.yml000066400000000000000000000062101414232526500223740ustar00rootroot00000000000000name: release on: push: tags: - '*.*.*' jobs: release: name: Create Release runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v2 with: fetch-depth: 0 - name: Determine remote default branch uses: actions/github-script@v3.0.0 id: remote_default_branch with: github-token: ${{secrets.GITHUB_TOKEN}} script: | repo = await github.repos.get({ owner: context.repo.owner, repo: context.repo.repo, }) return repo.data.default_branch - name: Prepare id: prep run: | set -eu -o pipefail # exclude unreleased in case re-running workflow echo "unreleased=false" >> .github_changelog_generator echo "max-issues=200" >> .github_changelog_generator # in case we've tagged subsequently ensure consistent run NEXT_TAG_SHA1="$(git rev-list --tags --skip=0 --no-walk | grep -B 1 ${{ github.sha }} || true)" if [[ "${NEXT_TAG_SHA1:-}" != "${{ github.sha }}" ]] then NEXT_TAG=$(git describe --tags --abbrev=0 ${NEXT_TAG_SHA1}) echo "due-tag=${NEXT_TAG}" >> .github_changelog_generator fi # limit list to next tag if any LAST_TAG=$(git describe --tags --abbrev=0 HEAD~1 2>/dev/null || true) # to ensure that releases continue to get 'x commits since release' # need to pass the branch name in addition to the tag to the create # release. Work out which branch the current tag is on DEFAULT_BRANCH=origin/${{ steps.remote_default_branch.outputs.result }} if git merge-base --is-ancestor ${{ github.ref }} ${DEFAULT_BRANCH} then # if the tag is in the history of the default remote branch BRANCH=${DEFAULT_BRANCH} else # otherwise take the first match and hope for the best BRANCH=$(git branch -r --format="%(refname:short)" --contains ${{ github.ref }} | head -n1) fi BRANCH=${BRANCH#origin/} # set outputs for usage echo ::set-output name=previous_version::${LAST_TAG} echo ::set-output name=release_branch::${BRANCH} - name: Cache Github API requests id: cache-changelog-api-requests uses: actions/cache@v2 with: key: github-changelog-cache path: .github_changelog_generator_cache - name: Changelog Generation uses: CharMixer/auto-changelog-action@v1.1 with: token: ${{ secrets.GITHUB_TOKEN }} since_tag: ${{ steps.prep.outputs.previous_version }} output: release_notes.md - name: Create Release id: create_release uses: actions/create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ github.ref }} commitish: ${{ steps.prep.outputs.release_branch }} release_name: Release ${{ github.ref }} body_path: release_notes.md vagrant-libvirt-0.7.0/.github/workflows/unit-tests.yml000066400000000000000000000100121414232526500230660ustar00rootroot00000000000000# This workflow uses actions that are not certified by GitHub. # They are provided by a third-party and are governed by # separate terms of service, privacy policy, and support # documentation. # This workflow will download a prebuilt Ruby version, install dependencies and run tests with Rake # For more information see: https://github.com/marketplace/actions/setup-ruby-jruby-and-truffleruby name: CI on: push: branches: - master pull_request: jobs: test: runs-on: ubuntu-latest continue-on-error: ${{ matrix.allow_fail }} strategy: fail-fast: false matrix: # need to define one entry with a single entry for each of the options # to allow include to expand the matrix correctly. ruby: [2.6.6] vagrant: [main] allow_fail: [true] include: - ruby: 2.2.10 vagrant: v2.0.4 allow_fail: false - ruby: 2.3.5 vagrant: v2.1.5 allow_fail: false - ruby: 2.4.10 vagrant: v2.2.4 allow_fail: false - ruby: 2.6.6 vagrant: v2.2.14 allow_fail: false - ruby: 3.0.0 vagrant: allow_fail: false steps: - uses: actions/checkout@v2 with: fetch-depth: 0 - name: Clone vagrant for ruby 3.0 support if: ${{ matrix.ruby == '3.0.0' }} uses: actions/checkout@v2 with: repository: hashicorp/vagrant path: .deps/vagrant ref: f7973f00edb9438d0b36085f210c80af71cfe5c5 - name: Clone ruby-libvirt for ruby 3.0 support if: ${{ matrix.ruby == '3.0.0' }} uses: actions/checkout@v2 with: repository: libvirt/libvirt-ruby path: .deps/libvirt-ruby ref: 43444be184e4d877c5ce110ee5475c952d7590f7 - name: Set up libvirt run: | sudo apt-get update sudo apt-get install libvirt-dev - uses: actions/cache@v2 with: path: vendor/bundle key: ${{ runner.os }}-gems-${{ hashFiles('**/Gemfile.lock') }} restore-keys: | ${{ runner.os }}-gems- - name: Set up Ruby uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby }} - name: Set up rubygems run: | gem update --system --conservative || (gem i "rubygems-update:~>2.7" --no-document && update_rubygems) gem update bundler --conservative - name: Handle additional ruby 3.0 setup if: ${{ matrix.ruby == '3.0.0' }} run: | # ensure vagrant gemspec allows ruby 3.0 pushd .deps/vagrant/ # ensure main branch exists git checkout -b main sed -i -e 's@s.required_ruby_version.*=.*@s.required_ruby_version = "~> 3.0"@' vagrant.gemspec popd bundle config local.vagrant ${PWD}/.deps/vagrant/ # build gem of latest bindings that contain fix for ruby include path pushd .deps/libvirt-ruby rake gem popd mkdir -p vendor/bundle/ruby/3.0.0/cache/ cp .deps/libvirt-ruby/pkg/ruby-libvirt-*.gem vendor/bundle/ruby/3.0.0/cache/ # need the following to allow the local provided gem to be used instead of the # one from rubygems bundle config set --local disable_checksum_validation true - name: Run bundler using cached path run: | bundle config path vendor/bundle bundle install --jobs 4 --retry 3 env: VAGRANT_VERSION: ${{ matrix.vagrant }} - name: Run tests run: | bundle exec rspec --color --format documentation env: VAGRANT_VERSION: ${{ matrix.vagrant }} - name: Coveralls Parallel uses: coverallsapp/github-action@master with: github-token: ${{ secrets.github_token }} parallel: true path-to-lcov: ./coverage/lcov.info finish: needs: test runs-on: ubuntu-latest steps: - name: Coveralls Finished uses: coverallsapp/github-action@master with: github-token: ${{ secrets.github_token }} parallel-finished: true vagrant-libvirt-0.7.0/.gitignore000066400000000000000000000004601414232526500166250ustar00rootroot00000000000000*.gem *.rbc .bundle .config .yardoc Gemfile.lock InstalledFiles _yardoc bin/ coverage doc/ lib/bundler/man pkg rdoc spec/reports test/tmp test/version_tmp tmp Vagrantfile !tests/*/Vagrantfile !example_box/Vagrantfile .vagrant *.swp .deps # don't commit the generated version lib/vagrant-libvirt/version vagrant-libvirt-0.7.0/Dockerfile000066400000000000000000000034541414232526500166350ustar00rootroot00000000000000# syntax = docker/dockerfile:1.0-experimental ARG VAGRANT_VERSION=2.2.18 FROM ubuntu:bionic as base RUN apt update \ && apt install -y --no-install-recommends \ bash \ ca-certificates \ curl \ git \ gosu \ kmod \ libvirt-bin \ openssh-client \ qemu-utils \ rsync \ && rm -rf /var/lib/apt/lists \ ; RUN mkdir /vagrant ENV VAGRANT_HOME /vagrant ARG VAGRANT_VERSION ENV VAGRANT_VERSION ${VAGRANT_VERSION} RUN set -e \ && curl https://releases.hashicorp.com/vagrant/${VAGRANT_VERSION}/vagrant_${VAGRANT_VERSION}_x86_64.deb -o vagrant.deb \ && apt update \ && apt install -y ./vagrant.deb \ && rm -rf /var/lib/apt/lists/* \ && rm -f vagrant.deb \ ; ENV VAGRANT_DEFAULT_PROVIDER=libvirt FROM base as build # allow caching of packages for build RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache RUN sed -i '/deb-src/s/^# //' /etc/apt/sources.list RUN --mount=type=cache,target=/var/cache/apt --mount=type=cache,target=/var/lib/apt \ apt update \ && apt build-dep -y \ vagrant \ ruby-libvirt \ && apt install -y --no-install-recommends \ libxslt-dev \ libxml2-dev \ libvirt-dev \ ruby-bundler \ ruby-dev \ zlib1g-dev \ ; WORKDIR /build COPY . . RUN rake build RUN vagrant plugin install ./pkg/vagrant-libvirt*.gem RUN for dir in boxes data tmp; \ do \ touch /vagrant/${dir}/.remove; \ done \ ; FROM base as slim COPY --from=build /vagrant /vagrant COPY entrypoint.sh /usr/local/bin/ ENTRYPOINT ["entrypoint.sh"] FROM build as final COPY entrypoint.sh /usr/local/bin/ ENTRYPOINT ["entrypoint.sh"] # vim: set expandtab sw=4: vagrant-libvirt-0.7.0/Gemfile000066400000000000000000000030371414232526500161330ustar00rootroot00000000000000# frozen_string_literal: true source 'https://rubygems.org' # Specify your gem's dependencies in vagrant-libvirt.gemspec gemspec group :development do # We depend on Vagrant for development, but we don't add it as a # gem dependency because we expect to be installed within the # Vagrant environment itself using `vagrant plugin`. vagrant_version = ENV['VAGRANT_VERSION'] if !vagrant_version.nil? && !vagrant_version.empty? gem 'vagrant', :git => 'https://github.com/hashicorp/vagrant.git', :ref => vagrant_version else gem 'vagrant', :git => 'https://github.com/hashicorp/vagrant.git', :branch => 'main' end begin raise if vagrant_version.empty? vagrant_version = vagrant_version[1..-1] if vagrant_version && vagrant_version.start_with?('v') vagrant_gem_version = Gem::Version.new(vagrant_version) rescue # default to newer if unable to parse vagrant_gem_version = Gem::Version.new('2.2.8') end vagrant_spec_verison = ENV['VAGRANT_SPEC_VERSION'] if !vagrant_spec_verison.nil? && !vagrant_spec_verison.empty? gem 'vagrant-spec', :github => 'hashicorp/vagrant-spec', :ref => vagrant_spec_verison elsif vagrant_gem_version <= Gem::Version.new('2.2.7') gem 'vagrant-spec', :github => 'hashicorp/vagrant-spec', :ref => '161128f2216cee8edb7bcd30da18bd4dea86f98a' else gem 'vagrant-spec', :github => 'hashicorp/vagrant-spec', :branch => "main" end if Gem::Version.new(RUBY_VERSION) >= Gem::Version.new('3.0.0') gem 'rexml' end gem 'pry' end group :plugins do gemspec end vagrant-libvirt-0.7.0/LICENSE000066400000000000000000000020551414232526500156440ustar00rootroot00000000000000Copyright (c) 2013 Lukas Stanek MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. vagrant-libvirt-0.7.0/README.md000066400000000000000000002551031414232526500161220ustar00rootroot00000000000000# Vagrant Libvirt Provider [![Join the chat at https://gitter.im/vagrant-libvirt/vagrant-libvirt](https://badges.gitter.im/vagrant-libvirt/vagrant-libvirt.svg)](https://gitter.im/vagrant-libvirt/vagrant-libvirt?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Build Status](https://github.com/vagrant-libvirt/vagrant-libvirt/actions/workflows/unit-tests.yml/badge.svg)](https://github.com/vagrant-libvirt/vagrant-libvirt/actions/workflows/unit-tests.yml) [![Coverage Status](https://coveralls.io/repos/github/vagrant-libvirt/vagrant-libvirt/badge.svg?branch=master)](https://coveralls.io/github/vagrant-libvirt/vagrant-libvirt?branch=master) [![Gem Version](https://badge.fury.io/rb/vagrant-libvirt.svg)](https://badge.fury.io/rb/vagrant-libvirt) This is a [Vagrant](http://www.vagrantup.com) plugin that adds a [Libvirt](http://libvirt.org) provider to Vagrant, allowing Vagrant to control and provision machines via Libvirt toolkit. **Note:** Actual version is still a development one. Feedback is welcome and can help a lot :-) ## Index * [Features](#features) * [Future work](#future-work) * [Using the container image](#using-the-container-image) * [Using Docker](#using-docker) * [Using Podman](#using-podman) * [Extending the Docker image with additional vagrant plugins](#extending-the-docker-image-with-additional-vagrant-plugins) * [Installation](#installation) * [Possible problems with plugin installation on Linux](#possible-problems-with-plugin-installation-on-linux) * [Additional Notes for Fedora and Similar Linux Distributions](#additional-notes-for-fedora-and-similar-linux-distributions) * [Vagrant Project Preparation](#vagrant-project-preparation) * [Add Box](#add-box) * [Create Vagrantfile](#create-vagrantfile) * [Start VM](#start-vm) * [How Project Is Created](#how-project-is-created) * [Libvirt Configuration](#libvirt-configuration) * [Provider Options](#provider-options) * [Domain Specific Options](#domain-specific-options) * [Reload behavior](#reload-behavior) * [Networks](#networks) * [Private Network Options](#private-network-options) * [Public Network Options](#public-network-options) * [Management Network](#management-network) * [Additional Disks](#additional-disks) * [Reload behavior](#reload-behavior-1) * [CDROMs](#cdroms) * [Input](#input) * [PCI device passthrough](#pci-device-passthrough) * [Using USB Devices](#using-usb-devices) * [USB Controller Configuration](#usb-controller-configuration) * [USB Device Passthrough](#usb-device-passthrough) * [USB Redirector Devices](#usb-redirector-devices) * [Filter for USB Redirector Devices](#filter-for-usb-redirector-devices) * [Random number generator passthrough](#random-number-generator-passthrough) * [Serial Console Devices](#serial-console-devices) * [Watchdog device](#watchdog-device) * [Smartcard device](#smartcard-device) * [Hypervisor Features](#hypervisor-features) * [Clock](#clock) * [CPU features](#cpu-features) * [Memory Backing](#memory-backing) * [No box and PXE boot](#no-box-and-pxe-boot) * [SSH Access To VM](#ssh-access-to-vm) * [Forwarded Ports](#forwarded-ports) * [Forwarding the ssh-port](#forwarding-the-ssh-port) * [Synced Folders](#synced-folders) * [QEMU Session Support](#qemu-session-support) * [Customized Graphics](#customized-graphics) * [TPM Devices](#tpm-devices) * [Memory balloon](#memory-balloon) * [Libvirt communication channels](#libvirt-communication-channels) * [Custom command line arguments and environment variables](#custom-command-line-arguments-and-environment-variables) * [Box Formats](#box-formats) * [Version 1](#version-1) * [Version 2 (Experimental)](#version-2-experimental) * [Create Box](#create-box) * [Package Box from VM](#package-box-from-vm) * [Troubleshooting VMs](#troubleshooting-vms) * [Development](#development) * [Contributing](#contributing) ## Features * Control local Libvirt hypervisors. * Vagrant `up`, `destroy`, `suspend`, `resume`, `halt`, `ssh`, `reload`, `package` and `provision` commands. * Upload box image (qcow2 format) to Libvirt storage pool. * Create volume as COW diff image for domains. * Create private networks. * Create and boot Libvirt domains. * SSH into domains. * Setup hostname and network interfaces. * Provision domains with any built-in Vagrant provisioner. * Synced folder support via `rsync`, `nfs`, `9p` or `virtiofs`. * Snapshots via [sahara](https://github.com/jedi4ever/sahara). * Package caching via [vagrant-cachier](http://fgrehm.viewdocs.io/vagrant-cachier/). * Use boxes from other Vagrant providers via [vagrant-mutate](https://github.com/sciurus/vagrant-mutate). * Support VMs with no box for PXE boot purposes (Vagrant 1.6 and up) ## Future work * Take a look at [open issues](https://github.com/vagrant-libvirt/vagrant-libvirt/issues?state=open). ## Using the container image Due to the number of issues encountered around compatibility between the ruby runtime environment that is part of the upstream vagrant installation and the library dependencies of libvirt that this project requires to communicate with libvirt, there is a docker image build and published. This should allow users to execute vagrant with vagrant-libvirt without needing to deal with the compatibility issues, though you may need to extend the image for your own needs should you make use of additional plugins. Note the default image contains the full toolchain required to build and install vagrant-libvirt and it's dependencies. There is also a smaller image published with the `-slim` suffix if you just need vagrant-libvirt and don't need to install any additional plugins for your environment. If you are connecting to a remote system libvirt, you may omit the `-v /var/run/libvirt/:/var/run/libvirt/` mount bind. Some distributions patch the local vagrant environment to ensure vagrant-libvirt uses `qemu:///session`, which means you may need to set the environment variable `LIBVIRT_DEFAULT_URI` to the same value if looking to use this in place of your distribution provided installation. ### Using Docker To get the image with the most recent release: ```bash docker pull vagrantlibvirt/vagrant-libvirt:latest ``` --- **Note** If you want the very latest code you can use the `edge` tag instead. ```bash docker pull vagrantlibvirt/vagrant-libvirt:edge ``` --- Running the image: ```bash docker run -it --rm \ -e LIBVIRT_DEFAULT_URI \ -v /var/run/libvirt/:/var/run/libvirt/ \ -v ~/.vagrant.d:/.vagrant.d \ -v $(realpath "${PWD}"):${PWD} \ -w $(realpath "${PWD}") \ --network host \ vagrantlibvirt/vagrant-libvirt:latest \ vagrant status ``` It's possible to define a function in `~/.bashrc`, for example: ```bash vagrant(){ docker run -it --rm \ -e LIBVIRT_DEFAULT_URI \ -v /var/run/libvirt/:/var/run/libvirt/ \ -v ~/.vagrant.d:/.vagrant.d \ -v $(realpath "${PWD}"):${PWD} \ -w $(realpath "${PWD}") \ --network host \ vagrantlibvirt/vagrant-libvirt:latest \ vagrant $@ } ``` ### Using Podman Preparing the podman run, only once: ```bash mkdir -p ~/.vagrant.d/{boxes,data,tmp} ``` _N.B. This is needed until the entrypoint works for podman to only mount the `~/.vagrant.d` directory_ To run with Podman you need to include ```bash --entrypoint /bin/bash \ --security-opt label=disable \ -v ~/.vagrant.d/boxes:/vagrant/boxes \ -v ~/.vagrant.d/data:/vagrant/data \ -v ~/.vagrant.d/data:/vagrant/tmp \ ``` for example: ```bash vagrant(){ podman run -it --rm \ -e LIBVIRT_DEFAULT_URI \ -v /var/run/libvirt/:/var/run/libvirt/ \ -v ~/.vagrant.d/boxes:/vagrant/boxes \ -v ~/.vagrant.d/data:/vagrant/data \ -v ~/.vagrant.d/data:/vagrant/tmp \ -v $(realpath "${PWD}"):${PWD} \ -w $(realpath "${PWD}") \ --network host \ --entrypoint /bin/bash \ --security-opt label=disable \ docker.io/vagrantlibvirt/vagrant-libvirt:latest \ vagrant $@ } ``` Running Podman in rootless mode maps the root user inside the container to your host user so we need to bypass [entrypoint.sh](https://github.com/vagrant-libvirt/vagrant-libvirt/blob/master/entrypoint.sh) and mount persistent storage directly to `/vagrant`. ### Extending the Docker image with additional vagrant plugins By default the image published and used contains the entire tool chain required to reinstall the vagrant-libvirt plugin and it's dependencies, as this is the default behaviour of vagrant anytime a new plugin is installed. This means it should be possible to use a simple `FROM` statement and ask vagrant to install additional plugins. ``` FROM vagrantlibvirt/vagrant-libvirt:latest RUN vagrant plugin install ``` ## Installation First, you should have both QEMU and Libvirt installed if you plan to run VMs on your local system. For instructions, refer to your Linux distribution's documentation. **NOTE:** Before you start using vagrant-libvirt, please make sure your Libvirt and QEMU installation is working correctly and you are able to create QEMU or KVM type virtual machines with `virsh` or `virt-manager`. Next, you must have [Vagrant installed](http://docs.vagrantup.com/v2/installation/index.html). Vagrant-libvirt supports Vagrant 2.0, 2.1 & 2.2. It should also work with earlier releases from 1.5 onwards but they are not actively tested. Check the [unit tests](https://github.com/vagrant-libvirt/vagrant-libvirt/blob/master/.github/workflows/unit-tests.yml) for the current list of tested versions. *We only test with the upstream version!* If you decide to install your distro's version and you run into problems, as a first step you should switch to upstream. Now you need to make sure your have all the build dependencies installed for vagrant-libvirt. This depends on your distro. An overview: * Ubuntu 18.10, Debian 9 and up: ```shell apt-get build-dep vagrant ruby-libvirt apt-get install qemu libvirt-daemon-system libvirt-clients ebtables dnsmasq-base apt-get install libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev apt-get install libguestfs-tools ``` * Ubuntu 18.04, Debian 8 and older: ```shell apt-get build-dep vagrant ruby-libvirt apt-get install qemu libvirt-bin ebtables dnsmasq-base apt-get install libxslt-dev libxml2-dev libvirt-dev zlib1g-dev ruby-dev apt-get install libguestfs-tools ``` (It is possible some users will already have libraries from the third line installed, but this is the way to make it work OOTB.) * CentOS 6, 7, Fedora 21: ```shell yum install qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm libguestfs-tools ``` * Fedora 22 and up: ```shell dnf install -y gcc libvirt libvirt-devel libxml2-devel make ruby-devel libguestfs-tools ``` * OpenSUSE leap 15.1: ```shell zypper install qemu libvirt libvirt-devel ruby-devel gcc qemu-kvm libguestfs ``` * Arch Linux: please read the related [ArchWiki](https://wiki.archlinux.org/index.php/Vagrant#vagrant-libvirt) page. ```shell pacman -S vagrant ``` Now you're ready to install vagrant-libvirt using standard [Vagrant plugin](http://docs.vagrantup.com/v2/plugins/usage.html) installation methods. For some distributions you will need to specify `CONFIGURE_ARGS` variable before running `vagrant plugin install`: * Fedora 32 + upstream Vagrant: ```shell export CONFIGURE_ARGS="with-libvirt-include=/usr/include/libvirt with-libvirt-lib=/usr/lib64" ``` ```shell vagrant plugin install vagrant-libvirt ``` ### Possible problems with plugin installation on Linux In case of problems with building nokogiri and ruby-libvirt gem, install missing development libraries for libxslt, libxml2 and libvirt. On Ubuntu, Debian, make sure you are running all three of the `apt` commands above with `sudo`. On RedHat, Centos, Fedora, ... ```shell $ sudo dnf install libxslt-devel libxml2-devel libvirt-devel ruby-devel gcc ``` On Arch Linux it is recommended to follow [steps from ArchWiki](https://wiki.archlinux.org/index.php/Vagrant#vagrant-libvirt). If have problem with installation - check your linker. It should be `ld.gold`: ```shell sudo alternatives --set ld /usr/bin/ld.gold # OR sudo ln -fs /usr/bin/ld.gold /usr/bin/ld ``` If you have issues building ruby-libvirt, try the following: ```shell CONFIGURE_ARGS='with-ldflags=-L/opt/vagrant/embedded/lib with-libvirt-include=/usr/include/libvirt with-libvirt-lib=/usr/lib' GEM_HOME=~/.vagrant.d/gems GEM_PATH=$GEM_HOME:/opt/vagrant/embedded/gems PATH=/opt/vagrant/embedded/bin:$PATH vagrant plugin install vagrant-libvirt ``` ### Additional Notes for Fedora and Similar Linux Distributions If you encounter the following load error when using the vagrant-libvirt plugin (note the required by libssh): ```shell /opt/vagrant/embedded/lib/ruby/2.4.0/rubygems/core_ext/kernel_require.rb:55:in `require': /opt/vagrant/embedded/lib64/libcrypto.so.1.1: version `OPENSSL_1_1_1b' not found (required by /lib64/libssh.so.4) - /home/xxx/.vagrant.d/gems/2.4.6/gems/ruby-libvirt-0.7.1/lib/_libvirt.so (LoadError) ``` then the following steps have been found to resolve the problem. Thanks to James Reynolds (see https://github.com/hashicorp/vagrant/issues/11020#issuecomment-540043472). The specific version of libssh will change over time so references to the rpm in the commands below will need to be adjusted accordingly. ```shell # Fedora dnf download --source libssh # centos 8 stream, doesn't provide source RPMs, so you need to download like so git clone https://git.centos.org/centos-git-common # centos-git-common needs its tools in PATH export PATH=$(readlink -f ./centos-git-common):$PATH git clone https://git.centos.org/rpms/libssh cd libssh git checkout imports/c8s/libssh-0.9.4-1.el8 into_srpm.sh -d c8s cd SRPMS # common commands (make sure to adjust verison accordingly) rpm2cpio libssh-0.9.0-5.fc30.src.rpm | cpio -imdV tar xf libssh-0.9.0.tar.xz mkdir build cd build cmake ../libssh-0.9.0 -DOPENSSL_ROOT_DIR=/opt/vagrant/embedded/ make sudo cp lib/libssh* /opt/vagrant/embedded/lib64 ``` If you encounter the following load error when using the vagrant-libvirt plugin (note the required by libk5crypto): ```shell /opt/vagrant/embedded/lib/ruby/2.4.0/rubygems/core_ext/kernel_require.rb:55:in `require': /usr/lib64/libk5crypto.so.3: undefined symbol: EVP_KDF_ctrl, version OPENSSL_1_1_1b - /home/rbelgrave/.vagrant.d/gems/2.4.9/gems/ruby-libvirt-0.7.1/lib/_libvirt.so (LoadError) ``` then the following steps have been found to resolve the problem. After the steps below are complete, then reinstall the vagrant-libvirt plugin without setting the `CONFIGURE_ARGS`. Thanks to Marco Bevc (see https://github.com/hashicorp/vagrant/issues/11020#issuecomment-625801983): ```shell # Fedora dnf download --source krb5-libs # centos 8 stream, doesn't provide source RPMs, so you need to download like so git clone https://git.centos.org/centos-git-common # centos-git-common needs its tools in PATH export PATH=$(readlink -f ./centos-git-common):$PATH git clone https://git.centos.org/rpms/krb5 cd krb5 git checkout imports/c8s/krb5-1.18.2-8.el8 into_srpm.sh -d c8s cd SRPMS # common commands (make sure to adjust verison accordingly) rpm2cpio krb5-1.18-1.fc32.src.rpm | cpio -imdV tar xf krb5-1.18.tar.gz cd krb5-1.18/src ./configure make sudo cp -P lib/crypto/libk5crypto.* /opt/vagrant/embedded/lib64/ ``` ## Vagrant Project Preparation ### Add Box After installing the plugin (instructions above), the quickest way to get started is to add Libvirt box and specify all the details manually within a `config.vm.provider` block. So first, add Libvirt box using any name you want. You can find more Libvirt-ready boxes at [Vagrant Cloud](https://app.vagrantup.com/boxes/search?provider=libvirt). For example: ```shell vagrant init fedora/32-cloud-base ``` ### Create Vagrantfile And then make a Vagrantfile that looks like the following, filling in your information where necessary. For example: ```ruby Vagrant.configure("2") do |config| config.vm.define :test_vm do |test_vm| test_vm.vm.box = "fedora/32-cloud-base" end end ``` ### Start VM In prepared project directory, run following command: ```shell $ vagrant up --provider=libvirt ``` Vagrant needs to know that we want to use Libvirt and not default VirtualBox. That's why there is `--provider=libvirt` option specified. Other way to tell Vagrant to use Libvirt provider is to setup environment variable ```shell export VAGRANT_DEFAULT_PROVIDER=libvirt ``` ### How Project Is Created Vagrant goes through steps below when creating new project: 1. Connect to Libvirt locally or remotely via SSH. 2. Check if box image is available in Libvirt storage pool. If not, upload it to remote Libvirt storage pool as new volume. 3. Create COW diff image of base box image for new Libvirt domain. 4. Create and start new domain on Libvirt host. 5. Check for DHCP lease from dnsmasq server. 6. Wait till SSH is available. 7. Sync folders and run Vagrant provisioner on new domain if setup in Vagrantfile. ### Libvirt Configuration ### Provider Options Although it should work without any configuration for most people, this provider exposes quite a few provider-specific configuration options. The following options allow you to configure how vagrant-libvirt connects to Libvirt, and are used to generate the [Libvirt connection URI](http://libvirt.org/uri.html): * `driver` - A hypervisor name to access. For now only KVM and QEMU are supported * `host` - The name of the server, where Libvirtd is running * `connect_via_ssh` - If use ssh tunnel to connect to Libvirt. Absolutely needed to access Libvirt on remote host. It will not be able to get the IP address of a started VM otherwise. * `username` - Username and password to access Libvirt * `password` - Password to access Libvirt * `id_ssh_key_file` - If not nil, uses this ssh private key to access Libvirt. Default is `$HOME/.ssh/id_rsa`. Prepends `$HOME/.ssh/` if no directory * `socket` - Path to the Libvirt unix socket (e.g. `/var/run/libvirt/libvirt-sock`) * `proxy_command` - For advanced usage. When connecting to remote libvirt instances, if the default constructed proxy\_command which uses `-W %h:%p` does not work, set this as needed. It performs interpolation using `{key}` and supports only `{host}`, `{username}`, and `{id_ssh_key_file}`. This is to try and avoid issues with escaping `%` and `$` which might be necessary to the ssh command itself. e.g.: `libvirt.proxy_command = "ssh {host} -l {username} -i {id_ssh_key_file} nc %h %p"` * `uri` - For advanced usage. Directly specifies what Libvirt connection URI vagrant-libvirt should use. Overrides all other connection configuration options In the event that none of these are set (excluding the `driver` option) the provider will attempt to retrieve the uri from the environment variable `LIBVIRT_DEFAULT_URI` similar to how virsh works. If any of them are set, it will ignore the environment variable. The reason the driver option is ignored is that it is not uncommon for this to be explicitly set on the box itself and there is no easily to determine whether it is being set by the user or the box packager. Connection-independent options: * `storage_pool_name` - Libvirt storage pool name, where box image and instance snapshots (if `snapshot_pool_name` is not set) will be stored. * `snapshot_pool_name` - Libvirt storage pool name. If set, the created snapshot of the instance will be stored at this location instead of `storage_pool_name`. For example: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.host = "example.com" end end ``` ### Domain Specific Options * `title` - A short description of the domain. * `description` - A human readable description of the virtual machine. * `disk_bus` - The type of disk device to emulate. Defaults to virtio if not set. Possible values are documented in Libvirt's [description for _target_](http://libvirt.org/formatdomain.html#elementsDisks). NOTE: this option applies only to disks associated with a box image. To set the bus type on additional disks, see the [Additional Disks](#additional-disks) section. * `disk_device` - The disk device to emulate. Defaults to vda if not set, which should be fine for paravirtualized guests, but some fully virtualized guests may require hda. NOTE: this option also applies only to disks associated with a box image. * `disk_driver` - Extra options for the main disk driver ([see Libvirt documentation](http://libvirt.org/formatdomain.html#elementsDisks)). NOTE: this option also applies only to disks associated with a box image. In all cases, the value `nil` can be used to force the hypervisor default behaviour (e.g. to override settings defined in top-level Vagrantfiles). Supported options include: * `:cache` - Controls the cache mechanism. Possible values are "default", "none", "writethrough", "writeback", "directsync" and "unsafe". * `:io` - Controls specific policies on I/O. Possible values are "threads" and "native". * `:copy_on_read` - Controls whether to copy read backing file into the image file. The value can be either "on" or "off". * `:discard` - Controls whether discard requests (also known as "trim" or "unmap") are ignored or passed to the filesystem. Possible values are "unmap" or "ignore". Note: for discard to work, you will likely also need to set `disk_bus = 'scsi'` * `:detect_zeroes` - Controls whether to detect zero write requests. The value can be "off", "on" or "unmap". * `nic_model_type` - parameter specifies the model of the network adapter when you create a domain value by default virtio KVM believe possible values, see the [documentation for Libvirt](https://libvirt.org/formatdomain.html#elementsNICSModel). * `shares` - Proportional weighted share for the domain relative to others. For more details see [documentation](https://libvirt.org/formatdomain.html#elementsCPUTuning). * `memory` - Amount of memory in MBytes. Defaults to 512 if not set. * `cpus` - Number of virtual cpus. Defaults to 1 if not set. * `cpuset` - Physical cpus to which the vcpus can be pinned. For more details see [documentation](https://libvirt.org/formatdomain.html#elementsCPUAllocation). * `cputopology` - Number of CPU sockets, cores and threads running per core. All fields of `:sockets`, `:cores` and `:threads` are mandatory, `cpus` domain option must be present and must be equal to total count of **sockets * cores * threads**. For more details see [documentation](https://libvirt.org/formatdomain.html#elementsCPU). * `nodeset` - Physical NUMA nodes where virtual memory can be pinned. For more details see [documentation](https://libvirt.org/formatdomain.html#elementsNUMATuning). ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.cpus = 4 libvirt.cpuset = '1-4,^3,6' libvirt.cputopology :sockets => '2', :cores => '2', :threads => '1' end end ``` * `nested` - [Enable nested virtualization](https://docs.fedoraproject.org/en-US/quick-docs/using-nested-virtualization-in-kvm/). Default is false. * `cpu_mode` - [CPU emulation mode](https://libvirt.org/formatdomain.html#elementsCPU). Defaults to 'host-model' if not set. Allowed values: host-model, host-passthrough, custom. * `cpu_model` - CPU Model. Defaults to 'qemu64' if not set and `cpu_mode` is `custom` and to '' otherwise. This can really only be used when setting `cpu_mode` to `custom`. * `cpu_fallback` - Whether to allow Libvirt to fall back to a CPU model close to the specified model if features in the guest CPU are not supported on the host. Defaults to 'allow' if not set. Allowed values: `allow`, `forbid`. * `numa_nodes` - Specify an array of NUMA nodes for the guest. The syntax is similar to what would be set in the domain XML. `memory` must be in MB. Symmetrical and asymmetrical topologies are supported but make sure your total count of defined CPUs adds up to `v.cpus`. The sum of all the memory defined here will act as your total memory for your guest VM. **This sum will override what is set in `v.memory`** ``` v.cpus = 4 v.numa_nodes = [ {:cpus => "0-1", :memory => "1024"}, {:cpus => "2-3", :memory => "4096"} ] ``` * `loader` - Sets path to custom UEFI loader. * `kernel` - To launch the guest with a kernel residing on host filesystems. Equivalent to qemu `-kernel`. * `initrd` - To specify the initramfs/initrd to use for the guest. Equivalent to qemu `-initrd`. * `random_hostname` - To create a domain name with extra information on the end to prevent hostname conflicts. * `default_prefix` - The default Libvirt guest name becomes a concatenation of the `_`. The current working directory is the default prefix to the guest name. The `default_prefix` options allow you to set the guest name prefix. * `cmd_line` - Arguments passed on to the guest kernel initramfs or initrd to use. Equivalent to qemu `-append`, only possible to use in combination with `initrd` and `kernel`. * `graphics_type` - Sets the protocol used to expose the guest display. Defaults to `vnc`. Possible values are "sdl", "curses", "none", "gtk", "vnc" or "spice". * `graphics_port` - Sets the port for the display protocol to bind to. Defaults to 5900. * `graphics_ip` - Sets the IP for the display protocol to bind to. Defaults to "127.0.0.1". * `graphics_passwd` - Sets the password for the display protocol. Working for vnc and Spice. by default working without passsword. * `graphics_autoport` - Sets autoport for graphics, Libvirt in this case ignores graphics_port value, Defaults to 'yes'. Possible value are "yes" and "no" * `graphics_gl` - Set to `true` to enable OpenGL. Defaults to `true` if `video_accel3d` is `true`. * `keymap` - Set keymap for vm. default: en-us * `kvm_hidden` - [Hide the hypervisor from the guest](https://libvirt.org/formatdomain.html#elementsFeatures). Useful for [GPU passthrough](#pci-device-passthrough) on stubborn drivers. Default is false. * `video_type` - Sets the graphics card type exposed to the guest. Defaults to "cirrus". [Possible values](http://libvirt.org/formatdomain.html#elementsVideo) are "vga", "cirrus", "vmvga", "xen", "vbox", or "qxl". * `video_vram` - Used by some graphics card types to vary the amount of RAM dedicated to video. Defaults to 9216. * `video_accel3d` - Set to `true` to enable 3D acceleration. Defaults to `false`. * `sound_type` - [Set the virtual sound card](https://libvirt.org/formatdomain.html#elementsSound) Defaults to "ich6". * `machine_type` - Sets machine type. Equivalent to qemu `-machine`. Use `qemu-system-x86_64 -machine help` to get a list of supported machines. * `machine_arch` - Sets machine architecture. This helps Libvirt to determine the correct emulator type. Possible values depend on your version of QEMU. For possible values, see which emulator executable `qemu-system-*` your system provides. Common examples are `aarch64`, `alpha`, `arm`, `cris`, `i386`, `lm32`, `m68k`, `microblaze`, `microblazeel`, `mips`, `mips64`, `mips64el`, `mipsel`, `moxie`, `or32`, `ppc`, `ppc64`, `ppcemb`, `s390x`, `sh4`, `sh4eb`, `sparc`, `sparc64`, `tricore`, `unicore32`, `x86_64`, `xtensa`, `xtensaeb`. * `machine_virtual_size` - Sets the disk size in GB for the machine overriding the default specified in the box. Allows boxes to defined with a minimal size disk by default and to be grown to a larger size at creation time. Will ignore sizes smaller than the size specified by the box metadata. Note that currently there is no support for automatically resizing the filesystem to take advantage of the larger disk. * `emulator_path` - Explicitly select which device model emulator to use by providing the path, e.g. `/usr/bin/qemu-system-x86_64`. This is especially useful on systems that fail to select it automatically based on `machine_arch` which then results in a capability error. * `boot` - Change the boot order and enables the boot menu. Possible options are "hd", "network", "cdrom". Defaults to "hd" with boot menu disabled. When "network" is set without "hd", only all NICs will be tried; see below for more detail. * `nic_adapter_count` - Defaults to '8'. Only use case for increasing this count is for VMs that virtualize switches such as Cumulus Linux. Max value for Cumulus Linux VMs is 33. * `uuid` - Force a domain UUID. Defaults to autogenerated value by Libvirt if not set. * `suspend_mode` - What is done on vagrant suspend. Possible values: 'pause', 'managedsave'. Pause mode executes a la `virsh suspend`, which just pauses execution of a VM, not freeing resources. Managed save mode does a la `virsh managedsave` which frees resources suspending a domain. * `tpm_model` - The model of the TPM to which you wish to connect. * `tpm_type` - The type of TPM device to which you are connecting. * `tpm_path` - The path to the TPM device on the host system. * `tpm_version` - The TPM version to use. * `dtb` - The device tree blob file, mostly used for non-x86 platforms. In case the device tree isn't added in-line to the kernel, it can be manually specified here. * `autostart` - Automatically start the domain when the host boots. Defaults to 'false'. * `channel` - [Libvirt channels](https://libvirt.org/formatdomain.html#elementCharChannel). Configure a private communication channel between the host and guest, e.g. for use by the [QEMU guest agent](http://wiki.libvirt.org/page/Qemu_guest_agent) and the Spice/QXL graphics type. * `mgmt_attach` - Decide if VM has interface in mgmt network. If set to 'false' it is not possible to communicate with VM through `vagrant ssh` or run provisioning. Setting to 'false' is only possible when VM doesn't use box. Defaults set to 'true'. * `serial` - [libvirt serial devices](https://libvirt.org/formatdomain.html#elementsConsole). Configure a serial/console port to communicate with the guest. Can be used to log to file boot time messages sent to ttyS0 console by the guest. Specific domain settings can be set for each domain separately in multi-VM environment. Example below shows a part of Vagrantfile, where specific options are set for dbserver domain. ```ruby Vagrant.configure("2") do |config| config.vm.define :dbserver do |dbserver| dbserver.vm.box = "centos64" dbserver.vm.provider :libvirt do |domain| domain.memory = 2048 domain.cpus = 2 domain.nested = true domain.disk_driver :cache => 'none' end end # ... ``` The following example shows part of a Vagrantfile that enables the VM to boot from a network interface first and a hard disk second. This could be used to run VMs that are meant to be a PXE booted machines. Be aware that if `hd` is not specified as a boot option, it will never be tried. ```ruby Vagrant.configure("2") do |config| config.vm.define :pxeclient do |pxeclient| pxeclient.vm.box = "centos64" pxeclient.vm.provider :libvirt do |domain| domain.boot 'network' domain.boot 'hd' end end # ... ``` #### Reload behavior On `vagrant reload` the following domain specific attributes are updated in defined domain: * `disk_bus` - Is updated only on disks. It skips CDROMs * `nic_model_type` - Updated * `memory` - Updated * `cpus` - Updated * `nested` - Updated * `cpu_mode` - Updated. Pay attention that custom mode is not supported * `graphics_type` - Updated * `graphics_port` - Updated * `graphics_ip` - Updated * `graphics_passwd` - Updated * `graphics_autoport` - Updated * `keymap` - Updated * `video_type` - Updated * `video_vram` - Updated * `tpm_model` - Updated * `tpm_type` - Updated * `tpm_path` - Updated * `tpm_version` - Updated ## Networks Networking features in the form of `config.vm.network` support private networks concept. It supports both the virtual network switch routing types and the point to point Guest OS to Guest OS setting using UDP/Mcast/TCP tunnel interfaces. http://wiki.libvirt.org/page/VirtualNetworking https://libvirt.org/formatdomain.html#elementsNICSTCP http://libvirt.org/formatdomain.html#elementsNICSMulticast http://libvirt.org/formatdomain.html#elementsNICSUDP _(in Libvirt v1.2.20 and higher)_ Public Network interfaces are currently implemented using the macvtap driver. The macvtap driver is only available with the Linux Kernel version >= 2.6.24. See the following Libvirt documentation for the details of the macvtap usage. http://www.libvirt.org/formatdomain.html#elementsNICSDirect An examples of network interface definitions: ```ruby # Private network using virtual network switching config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :private_network, :ip => "10.20.30.40" end # Private network using DHCP and a custom network config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :private_network, :type => "dhcp", :libvirt__network_address => '10.20.30.0' end # Private network (as above) using a domain name config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :private_network, :ip => "10.20.30.40", :libvirt__domain_name => "test.local" end # Private network. Point to Point between 2 Guest OS using a TCP tunnel # Guest 1 config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :private_network, :libvirt__tunnel_type => 'server', # default is 127.0.0.1 if omitted # :libvirt__tunnel_ip => '127.0.0.1', :libvirt__tunnel_port => '11111' # network with ipv6 support test_vm1.vm.network :private_network, :ip => "10.20.5.42", :libvirt__guest_ipv6 => "yes", :libvirt__ipv6_address => "2001:db8:ca2:6::1", :libvirt__ipv6_prefix => "64" # Guest 2 config.vm.define :test_vm2 do |test_vm2| test_vm2.vm.network :private_network, :libvirt__tunnel_type => 'client', # default is 127.0.0.1 if omitted # :libvirt__tunnel_ip => '127.0.0.1', :libvirt__tunnel_port => '11111' # network with ipv6 support test_vm2.vm.network :private_network, :ip => "10.20.5.45", :libvirt__guest_ipv6 => "yes", :libvirt__ipv6_address => "2001:db8:ca2:6::1", :libvirt__ipv6_prefix => "64" # Public Network config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :public_network, :dev => "virbr0", :mode => "bridge", :type => "bridge" end ``` In example below, one network interface is configured for VM `test_vm1`. After you run `vagrant up`, VM will be accessible on IP address `10.20.30.40`. So if you install a web server via provisioner, you will be able to access your testing server on `http://10.20.30.40` URL. But beware that this address is private to Libvirt host only. It's not visible outside of the hypervisor box. If network `10.20.30.0/24` doesn't exist, provider will create it. By default created networks are NATed to outside world, so your VM will be able to connect to the internet (if hypervisor can). And by default, DHCP is offering addresses on newly created networks. The second interface is created and bridged into the physical device `eth0`. This mechanism uses the macvtap Kernel driver and therefore does not require an existing bridge device. This configuration assumes that DHCP and DNS services are being provided by the public network. This public interface should be reachable by anyone with access to the public network. ### Private Network Options *Note: These options are not applicable to public network interfaces.* There is a way to pass specific options for Libvirt provider when using `config.vm.network` to configure new network interface. Each parameter name starts with `libvirt__` string. Here is a list of those options: * `:libvirt__network_name` - Name of Libvirt network to connect to. By default, network 'default' is used. * `:libvirt__netmask` - Used only together with `:ip` option. Default is '255.255.255.0'. * `:libvirt__network_address` - Used only when `:type` is set to `dhcp`. Only `/24` subnet is supported. Default is `172.28.128.0`. * `:libvirt__host_ip` - Address to use for the host (not guest). Default is first possible address (after network address). * `:libvirt__domain_name` - DNS domain of the DHCP server. Used only when creating new network. * `:libvirt__dhcp_enabled` - If DHCP will offer addresses, or not. Used only when creating new network. Default is true. * `:libvirt__dhcp_start` - First address given out via DHCP. Default is third address in range (after network name and gateway). * `:libvirt__dhcp_stop` - Last address given out via DHCP. Default is last possible address in range (before broadcast address). * `:libvirt__dhcp_bootp_file` - The file to be used for the boot image. Used only when dhcp is enabled. * `:libvirt__dhcp_bootp_server` - The server that runs the DHCP server. Used only when dhcp is enabled.By default is the same host that runs the DHCP server. * `:libvirt__adapter` - Number specifiyng sequence number of interface. * `:libvirt__forward_mode` - Specify one of `veryisolated`, `none`, `open`, `nat` or `route` options. This option is used only when creating new network. Mode `none` will create isolated network without NATing or routing outside. You will want to use NATed forwarding typically to reach networks outside of hypervisor. Routed forwarding is typically useful to reach other networks within hypervisor. `veryisolated` described [here](https://libvirt.org/formatnetwork.html#examplesNoGateway). By default, option `nat` is used. * `:libvirt__forward_device` - Name of interface/device, where network should be forwarded (NATed or routed). Used only when creating new network. By default, all physical interfaces are used. * `:libvirt__tunnel_type` - Set to 'udp' if using UDP unicast tunnel mode (libvirt v1.2.20 or higher). Set this to either "server" or "client" for tcp tunneling. Set this to 'mcast' if using multicast tunneling. This configuration type uses tunnels to generate point to point connections between Guests. Useful for Switch VMs like Cumulus Linux. No virtual switch setting like `libvirt__network_name` applies with tunnel interfaces and will be ignored if configured. * `:libvirt__tunnel_ip` - Sets the source IP of the Libvirt tunnel interface. By default this is `127.0.0.1` for TCP and UDP tunnels and `239.255.1.1` for Multicast tunnels. It populates the address field in the `` of the interface xml configuration. * `:libvirt__tunnel_port` - Sets the source port the tcp/udp/mcast tunnel with use. This port information is placed in the `` section of interface xml configuration. * `:libvirt__tunnel_local_port` - Sets the local port used by the udp tunnel interface type. It populates the port field in the `` section of the interface xml configuration. _(This feature only works in Libvirt 1.2.20 and higher)_ * `:libvirt__tunnel_local_ip` - Sets the local IP used by the udp tunnel interface type. It populates the ip entry of the `` section of the interface xml configuration. _(This feature only works in Libvirt 1.2.20 and higher)_ * `:libvirt__guest_ipv6` - Enable or disable guest-to-guest IPv6 communication. See [here](https://libvirt.org/formatnetwork.html#examplesPrivate6), and [here](http://libvirt.org/git/?p=libvirt.git;a=commitdiff;h=705e67d40b09a905cd6a4b8b418d5cb94eaa95a8) for for more information. *Note: takes either 'yes' or 'no' for value* * `:libvirt__ipv6_address` - Define ipv6 address, require also prefix. * `:libvirt__ipv6_prefix` - Define ipv6 prefix. generate string `` * `:libvirt__iface_name` - Define a name for the private network interface. With this feature one can [simulate physical link failures](https://github.com/vagrant-libvirt/vagrant-libvirt/pull/498) * `:mac` - MAC address for the interface. *Note: specify this in lowercase since Vagrant network scripts assume it will be!* * `:libvirt__mtu` - MTU size for the Libvirt network, if not defined, the created network will use the Libvirt default (1500). VMs still need to set the MTU accordingly. * `:model_type` - parameter specifies the model of the network adapter when you create a domain value by default virtio KVM believe possible values, see the documentation for Libvirt * `:libvirt__driver_name` - Define which network driver to use. [More info](https://libvirt.org/formatdomain.html#elementsDriverBackendOptions) * `:libvirt__driver_queues` - Define a number of queues to be used for network interface. Set equal to numer of vCPUs for best performance. [More info](http://www.linux-kvm.org/page/Multiqueue) * `:autostart` - Automatic startup of network by the Libvirt daemon. If not specified the default is 'false'. * `:bus` - The bus of the PCI device. Both :bus and :slot have to be defined. * `:slot` - The slot of the PCI device. Both :bus and :slot have to be defined. * `:libvirt__always_destroy` - Allow domains that use but did not create a network to destroy it when the domain is destroyed (default: `true`). Set to `false` to only allow the domain that created the network to destroy it. When the option `:libvirt__dhcp_enabled` is to to 'false' it shouldn't matter whether the virtual network contains a DHCP server or not and vagrant-libvirt should not fail on it. The only situation where vagrant-libvirt should fail is when DHCP is requested but isn't configured on a matching already existing virtual network. ### Public Network Options * `:dev` - Physical device that the public interface should use. Default is 'eth0'. * `:mode` - The mode in which the public interface should operate in. Supported modes are available from the [libvirt documentation](http://www.libvirt.org/formatdomain.html#elementsNICSDirect). Default mode is 'bridge'. * `:type` - is type of interface.(``) * `:mac` - MAC address for the interface. * `:network_name` - Name of Libvirt network to connect to. * `:portgroup` - Name of Libvirt portgroup to connect to. * `:ovs` - Support to connect to an Open vSwitch bridge device. Default is 'false'. * :ovs_interfaceid - Add Open vSwitch 'interfaceid' parameter. * `:trust_guest_rx_filters` - Support trustGuestRxFilters attribute. Details are listed [here](http://www.libvirt.org/formatdomain.html#elementsNICSDirect). Default is 'false'. ### Management Network vagrant-libvirt uses a private network to perform some management operations on VMs. All VMs will have an interface connected to this network and an IP address dynamically assigned by Libvirt unless you set `:mgmt_attach` to 'false'. This is in addition to any networks you configure. The name and address used by this network are configurable at the provider level. * `management_network_name` - Name of Libvirt network to which all VMs will be connected. If not specified the default is 'vagrant-libvirt'. * `management_network_address` - Address of network to which all VMs will be connected. Must include the address and subnet mask. If not specified the default is '192.168.121.0/24'. * `management_network_mode` - Network mode for the Libvirt management network. Specify one of veryisolated, none, open, nat or route options. Further documented under [Private Networks](#private-network-options) * `management_network_guest_ipv6` - Enable or disable guest-to-guest IPv6 communication. See [here](https://libvirt.org/formatnetwork.html#examplesPrivate6), and [here](http://libvirt.org/git/?p=libvirt.git;a=commitdiff;h=705e67d40b09a905cd6a4b8b418d5cb94eaa95a8) for for more information. * `management_network_autostart` - Automatic startup of mgmt network, if not specified the default is 'false'. * `management_network_pci_bus` - The bus of the PCI device. * `management_network_pci_slot` - The slot of the PCI device. * `management_network_mac` - MAC address of management network interface. * `management_network_domain` - Domain name assigned to the management network. * `management_network_mtu` - MTU size of management network. If not specified, the Libvirt default (1500) will be used. You may wonder how vagrant-libvirt knows the IP address a VM received. Libvirt doesn't provide a standard way to find out the IP address of a running domain. But we do know the MAC address of the virtual machine's interface on the management network. Libvirt is closely connected with dnsmasq, which acts as a DHCP server. dnsmasq writes lease information in the `/var/lib/libvirt/dnsmasq` directory. Vagrant-libvirt looks for the MAC address in this file and extracts the corresponding IP address. It is also possible to use the Qemu Agent to extract the management interface configuration from the booted virtual machine. This is helpful in libvirt environments where no local dnsmasq is used for automatic address assigment, but external dhcp services via bridged libvirt networks. Prerequisite is to enable the qemu agent channel via ([Libvirt communication channels](#libvirt-communication-channels)) and the virtual machine image must have the agent pre-installed before deploy. The agent will start automatically if it detects an attached channel during boot. * `qemu_use_agent` - false by default, if set to true, attempt to extract configured ip address via qemu agent. To use the management network interface with an external dhcp service you need to setup a bridged host network manually and define it via `management_network_name` in your Vagrantfile. ## Additional Disks You can create and attach additional disks to a VM via `libvirt.storage :file`. It has a number of options: * `path` - Location of the disk image. If unspecified, a path is automtically chosen in the same storage pool as the VMs primary disk. * `device` - Name of the device node the disk image will have in the VM, e.g. *vdb*. If unspecified, the next available device is chosen. * `size` - Size of the disk image. If unspecified, defaults to 10G. * `type` - Type of disk image to create. Defaults to *qcow2*. * `bus` - Type of bus to connect device to. Defaults to *virtio*. * `allow_existing` - Set to true if you want to allow the VM to use a pre-existing disk. If the disk doesn't exist it will be created. Disks with this option set to true need to be removed manually. * `shareable` - Set to true if you want to simulate shared SAN storage. * `serial` - Serial number of the disk device. * `wwn` - WWN number of the disk device. The following disk performance options can also be configured (see the [libvirt documentation for possible values](http://libvirt.org/formatdomain.html#elementsDisks) or [here](https://www.suse.com/documentation/sles11/book_kvm/data/sect1_chapter_book_kvm.html) for a fuller explanation). In all cases, the options use the hypervisor default if not specified, or if set to `nil`. * `cache` - Cache mode to use. Value may be `default`, `none`, `writeback`, `writethrough`, `directsync` or `unsafe`. * `io` - Controls specific policies on I/O. Value may be `threads` or `native`. * `copy_on_read` - Controls whether to copy read backing file into the image file. Value may be `on` or `off`. * `discard` - Controls whether discard requests (also known as "trim" or "unmap") are ignored or passed to the filesystem. Value may be `unmap` or `ignore`. Note: for discard to work, you will likely also need to set `:bus => 'scsi'` * `detect_zeroes` - Controls whether to detect zero write requests. Value may be `off`, `on` or `unmap`. The following example creates two additional disks. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.storage :file, :size => '20G' libvirt.storage :file, :size => '40G', :bus => 'scsi', :type => 'raw', :discard => 'unmap', :detect_zeroes => 'on' end end ``` For shared SAN storage to work the following example can be used: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.storage :file, :size => '20G', :path => 'my_shared_disk.img', :allow_existing => true, :shareable => true, :type => 'raw' end end ``` ### Reload behavior On `vagrant reload` the following additional disk attributes are updated in defined domain: * `bus` - Updated. Uses `device` as a search marker. It is not required to define `device`, but it's recommended. If `device` is defined then the order of addtitional disk definition becomes irrelevant. ## CDROMs You can attach up to four CDROMs to a VM via `libvirt.storage :file, :device => :cdrom`. Available options are: * `path` - The path to the iso to be used for the CDROM drive. * `dev` - The device to use (`hda`, `hdb`, `hdc`, or `hdd`). This will be automatically determined if unspecified. * `bus` - The bus to use for the CDROM drive. Defaults to `ide` The following example creates three CDROM drives in the VM: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.storage :file, :device => :cdrom, :path => '/path/to/iso1.iso' libvirt.storage :file, :device => :cdrom, :path => '/path/to/iso2.iso' libvirt.storage :file, :device => :cdrom, :path => '/path/to/iso3.iso' end end ``` ## Input You can specify multiple inputs to the VM via `libvirt.input`. Available options are listed below. Note that both options are required: * `type` - The type of the input * `bus` - The bus of the input ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # this is the default # libvirt.input :type => "mouse", :bus => "ps2" # very useful when having mouse issues when viewing VM via VNC libvirt.input :type => "tablet", :bus => "usb" end end ``` ## PCI device passthrough You can specify multiple PCI devices to passthrough to the VM via `libvirt.pci`. Available options are listed below. Note that all options are required, except domain, which defaults to `0x0000`: * `domain` - The domain of the PCI device * `bus` - The bus of the PCI device * `slot` - The slot of the PCI device * `function` - The function of the PCI device You can extract that information from output of `lspci` command. First characters of each line are in format `[]:[]:[].[]`. For example: ```shell $ lspci| grep NVIDIA 0000:03:00.0 VGA compatible controller: NVIDIA Corporation GK110B [GeForce GTX TITAN Black] (rev a1) ``` In that case `domain` is `0x0000`, `bus` is `0x03`, `slot` is `0x00` and `function` is `0x0`. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.pci :domain => '0x0000', :bus => '0x06', :slot => '0x12', :function => '0x5' # Add another one if it is neccessary libvirt.pci :domain => '0x0000', :bus => '0x03', :slot => '0x00', :function => '0x0' end end ``` Note! Above options affect configuration only at domain creation. It won't change VM behaviour on `vagrant reload` after domain was created. Don't forget to [set](#domain-specific-options) `kvm_hidden` option to `true` especially if you are passthroughing NVIDIA GPUs. Otherwise GPU is visible from VM but cannot be operated. ## Using USB Devices There are several ways to pass a USB device through to a running instance: * Use `libvirt.usb` to [attach a USB device at boot](#usb-device-passthrough), with the device ID specified in the Vagrantfile * Use a client (such as `virt-viewer` or `virt-manager`) to attach the device at runtime [via USB redirectors](#usb-redirector-devices) * Use `virsh attach-device` once the VM is running (however, this is outside the scope of this readme) In all cases, if you wish to use a high-speed USB device, you will need to use `libvirt.usb_controller` to specify a USB2 or USB3 controller, as the default configuration only exposes a USB1.1 controller. ### USB Controller Configuration The USB controller can be configured using `libvirt.usb_controller`, with the following options: * `model` - The USB controller device model to emulate. (mandatory) * `ports` - The number of devices that can be connected to the controller. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Set up a USB3 controller libvirt.usb_controller :model => "qemu-xhci" end end ``` See the [libvirt documentation](https://libvirt.org/formatdomain.html#elementsControllers) for a list of valid models. If any USB devices are passed through by setting `libvirt.usb` or `libvirt.redirdev`, a default controller will be added using the model `qemu-xhci` in the absence of a user specified one. This should help ensure more devices work out of the box as the default configured by libvirt is pii3-uhci, which appears to only work for USB 1 devices and does not work as expected when connected via a USB 2 controller, while the xhci stack should work for all versions of USB. ### USB Device Passthrough You can specify multiple USB devices to passthrough to the VM via `libvirt.usb`. The device can be specified by the following options: * `bus` - The USB bus ID, e.g. "1" * `device` - The USB device ID, e.g. "2" * `vendor` - The USB devices vendor ID (VID), e.g. "0x1234" * `product` - The USB devices product ID (PID), e.g. "0xabcd" At least one of these has to be specified, and `bus` and `device` may only be used together. The example values above match the device from the following output of `lsusb`: ``` Bus 001 Device 002: ID 1234:abcd Example device ``` ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # pass through specific device based on identifying it libvirt.usbdev :vendor => '0x1234', :product => '0xabcd' # pass through a host device where multiple of the same vendor/product exist libvirt.usbdev :bus => '1', :device => '1' end end ``` Additionally, the following options can be used: * `startupPolicy` - Is passed through to Libvirt and controls if the device has to exist. Libvirt currently allows the following values: "mandatory", "requisite", "optional". ### USB Redirector Devices You can specify multiple redirect devices via `libvirt.redirdev`. There are two types, `tcp` and `spicevmc` supported, for forwarding USB-devices to the guest. Available options are listed below. * `type` - The type of the USB redirector device. (`tcp` or `spicevmc`) * `host` - The host where the device is attached to. (mandatory for type `tcp`) * `port` - The port where the device is listening. (mandatory for type `tcp`) ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # add two devices using spicevmc channel (1..2).each do libvirt.redirdev :type => "spicevmc" end # add device, provided by localhost:4000 libvirt.redirdev :type => "tcp", :host => "localhost", :port => "4000" end end ``` Note that in order to enable USB redirection with Spice clients, you may need to also set `libvirt.graphics_type = "spice"` #### Filter for USB Redirector Devices You can define filter for redirected devices. These filters can be positiv or negative, by setting the mandatory option `allow=yes` or `allow=no`. All available options are listed below. Note the option `allow` is mandatory. * `class` - The device class of the USB device. A list of device classes is available on [Wikipedia](https://en.wikipedia.org/wiki/USB#Device_classes). * `vendor` - The vendor of the USB device. * `product` - The product id of the USB device. * `version` - The version of the USB device. Note that this is the version of `bcdDevice` * `allow` - allow or disallow redirecting this device. (mandatory) You can extract that information from output of `lsusb` command. Every line contains the information in format `Bus [] Device []: ID [:[]`. The `version` can be extracted from the detailed output of the device using `lsusb -D /dev/usb/[]/[]`. For example: ```shell # get bcdDevice from $: lsusb Bus 001 Device 009: ID 08e6:3437 Gemalto (was Gemplus) GemPC Twin SmartCard Reader $: lsusb -D /dev/bus/usb/001/009 | grep bcdDevice bcdDevice 2.00 ``` In this case, the USB device with `class 0x0b`, `vendor 0x08e6`, `product 0x3437` and `bcdDevice version 2.00` is allowed to be redirected to the guest. All other devices will be refused. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.redirdev :type => "spicevmc" libvirt.redirfilter :class => "0x0b", :vendor => "0x08e6", :product => "0x3437", :version => "2.00", :allow => "yes" libvirt.redirfilter :allow => "no" end end ``` ## Serial Console Devices You can define settings to redirect output from the serial console of any VM brought up with libvirt to a file or other devices that are listening. [See libvirt documentation](https://libvirt.org/formatdomain.html#elementCharSerial). Currently only redirecting to a file is supported. * `type` - only value that has an effect is file, in the future support may be added for virtual console, pty, dev, pipe, tcp, udp, unix socket, spiceport & nmdm. * `source` - options pertaining to how the connection attaches to the host, contains sub-settings dependent on `type`. `source` options for type `file` * `path` - file on host to connect to the serial port to record all output. May be created by qemu system user causing some permissions issues. ```ruby Vagrant.configure("2") do |config| config.vm.define :test do |test| test.vm.provider :libvirt do |domain| domain.serial :type => "file", :source => {:path => "/var/log/vm_consoles/test.log} end end end ``` ## Random number generator passthrough You can pass through `/dev/random` to your VM by configuring the domain like this: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Pass through /dev/random from the host to the VM libvirt.random :model => 'random' end end ``` At the moment only the `random` backend is supported. ## Watchdog device A virtual hardware watchdog device can be added to the guest via the `libvirt.watchdog` element. The option `model` is mandatory and could have on of the following values. * `i6300esb` - the recommended device, emulating a PCI Intel 6300ESB * 'ib700` - emulating an ISA iBase IB700 * `diag288` - emulating an S390 DIAG288 device The optional action attribute describes what `action` to take when the watchdog expires. Valid values are specific to the underlying hypervisor. The default behavior is `reset`. * `reset` - default, forcefully reset the guest * `shutdown` - gracefully shutdown the guest (not recommended) * `poweroff` - forcefully power off the guest * `pause` - pause the guest * `none` - do nothing * `dump` - automatically dump the guest * `inject-nmi` - inject a non-maskable interrupt into the guest ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Add Libvirt watchdog device model i6300esb libvirt.watchdog :model => 'i6300esb', :action => 'reset' end end ``` ## Smartcard device A virtual smartcard device can be supplied to the guest via the `libvirt.smartcard` element. The option `mode` is mandatory and currently only value `passthrough` is supported. The value `spicevmc` for option `type` is default value and can be supressed. On using `type = tcp`, the options `source_mode`, `source_host` and `source_service` are mandatory. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Add smartcard device with type 'spicevmc' libvirt.smartcard :mode => 'passthrough', :type => 'spicevmc' end end ``` ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Add smartcard device with type 'tcp' domain.smartcard :mode => 'passthrough', :type => 'tcp', :source_mode => 'bind', :source_host => '127.0.0.1', :source_service => '2001' end end ``` ## Hypervisor Features Hypervisor features can be specified via `libvirt.features` as a list. The default options that are enabled are `acpi`, `apic` and `pae`. If you define `libvirt.features` you overwrite the defaults, so keep that in mind. An example: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Specify the default hypervisor features libvirt.features = ['acpi', 'apic', 'pae' ] end end ``` A different example for ARM boards: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Specify the default hypervisor features libvirt.features = ["apic", "gic version='2'" ] end end ``` You can also specify a special set of features that help improve the behavior of guests running Microsoft Windows. You can specify HyperV features via `libvirt.hyperv_feature`. Available options are listed below. Note that both options are required: * `name` - The name of the feature Hypervisor feature (see Libvirt doc) * `state` - The state for this feature which can be either `on` or `off`. ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Relax constraints on timers libvirt.hyperv_feature :name => 'relaxed', :state => 'on' # Enable virtual APIC libvirt.hyperv_feature :name => 'vapic', :state => 'on' # Enable spinlocks (requires retries to be specified) libvirt.hyperv_feature :name => 'spinlocks', :state => 'on', :retries => '8191' end end ``` ## Clock Clock offset can be specified via `libvirt.clock_offset`. (Default is utc) Additionally timers can be specified via `libvirt.clock_timer`. Available options for timers are: name, track, tickpolicy, frequency, mode, present ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Set clock offset to localtime libvirt.clock_offset = 'localtime' # Timers ... libvirt.clock_timer :name => 'rtc', :tickpolicy => 'catchup' libvirt.clock_timer :name => 'pit', :tickpolicy => 'delay' libvirt.clock_timer :name => 'hpet', :present => 'no' libvirt.clock_timer :name => 'hypervclock', :present => 'yes' end end ``` ## CPU features You can specify CPU feature policies via `libvirt.cpu_feature`. Available options are listed below. Note that both options are required: * `name` - The name of the feature for the chosen CPU (see Libvirt's `cpu_map.xml`) * `policy` - The policy for this feature (one of `force`, `require`, `optional`, `disable` and `forbid` - see Libvirt documentation) ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # The feature will not be supported by virtual CPU. libvirt.cpu_feature :name => 'hypervisor', :policy => 'disable' # Guest creation will fail unless the feature is supported by host CPU. libvirt.cpu_feature :name => 'vmx', :policy => 'require' # The virtual CPU will claim the feature is supported regardless of it being supported by host CPU. libvirt.cpu_feature :name => 'pdpe1gb', :policy => 'force' end end ``` ## Memory Backing You can specify memoryBacking options via `libvirt.memorybacking`. Available options are shown below. Full documentation is available at the [libvirt _memoryBacking_ section](https://libvirt.org/formatdomain.html#elementsMemoryBacking). NOTE: The hugepages `` element is not yet supported ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.memorybacking :hugepages libvirt.memorybacking :nosharepages libvirt.memorybacking :locked libvirt.memorybacking :source, :type => 'file' libvirt.memorybacking :access, :mode => 'shared' libvirt.memorybacking :allocation, :mode => 'immediate' end end ``` ## No box and PXE boot There is support for PXE booting VMs with no disks as well as PXE booting VMs with blank disks. There are some limitations: * Requires Vagrant 1.6.0 or newer * No provisioning scripts are ran * No network configuration is being applied to the VM * No SSH connection can be made * `vagrant halt` will only work cleanly if the VM handles ACPI shutdown signals In short, VMs without a box can be created, halted and destroyed but all other functionality cannot be used. An example for a PXE booted VM with no disks whatsoever: ```ruby Vagrant.configure("2") do |config| config.vm.define :pxeclient do |pxeclient| pxeclient.vm.provider :libvirt do |domain| domain.boot 'network' end end end ``` And an example for a PXE booted VM with no box but a blank disk which will boot from this HD if the NICs fail to PXE boot: ```ruby Vagrant.configure("2") do |config| config.vm.define :pxeclient do |pxeclient| pxeclient.vm.provider :libvirt do |domain| domain.storage :file, :size => '100G', :type => 'qcow2' domain.boot 'network' domain.boot 'hd' end end end ``` Example for vm with 2 networks and only 1 is bootable and has dhcp server in this subnet, for example foreman with dhcp server Name of network "foreman_managed" is key for define boot order ```ruby config.vm.define :pxeclient do |pxeclient| pxeclient.vm.network :private_network,ip: '10.0.0.5', libvirt__network_name: "foreman_managed", libvirt__dhcp_enabled: false, libvirt__host_ip: '10.0.0.1' pxeclient.vm.provider :libvirt do |domain| domain.memory = 1000 boot_network = {'network' => 'foreman_managed'} domain.storage :file, :size => '100G', :type => 'qcow2' domain.boot boot_network domain.boot 'hd' end end ``` An example VM that is PXE booted from the `br1` device (which must already be configured in the host machine), and if that fails, is booted from the disk: ```ruby Vagrant.configure("2") do |config| config.vm.define :pxeclient do |pxeclient| pxeclient.vm.network :public_network, dev: 'br1', auto_config: false pxeclient.vm.provider :libvirt do |domain| boot_network = {'dev' => 'br1'} domain.storage :file, :size => '100G' domain.boot boot_network domain.boot 'hd' end end end ``` ## SSH Access To VM vagrant-libvirt supports vagrant's [standard ssh settings](https://docs.vagrantup.com/v2/vagrantfile/ssh_settings.html). ## Forwarded Ports vagrant-libvirt supports Forwarded Ports via ssh port forwarding. Please note that due to a well known limitation only the TCP protocol is supported. For each `forwarded_port` directive you specify in your Vagrantfile, vagrant-libvirt will maintain an active ssh process for the lifetime of the VM. If your VM should happen to be rebooted, the SSH session will need to be restablished by halting the VM and bringing it back up. vagrant-libvirt supports an additional `forwarded_port` option `gateway_ports` which defaults to `false`, but can be set to `true` if you want the forwarded port to be accessible from outside the Vagrant host. In this case you should also set the `host_ip` option to `'*'` since it defaults to `'localhost'`. You can also provide a custom adapter to forward from by 'adapter' option. Default is `eth0`. **Internally Accessible Port Forward** `config.vm.network :forwarded_port, guest: 80, host: 2000` **Externally Accessible Port Forward** `config.vm.network :forwarded_port, guest: 80, host: 2000, host_ip: "0.0.0.0"` ### Forwarding the ssh-port Vagrant-libvirt now supports forwarding the standard ssh-port on port 2222 from the localhost to allow for consistent provisioning steps/ports to be used when defining across multiple providers. To enable, set the following: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Enable forwarding of forwarded_port with id 'ssh'. libvirt.forward_ssh_port = true end end ``` Previously by default libvirt skipped the forwarding of the ssh-port because you can access the machine directly. In the future it is expected that this will be enabled by default once autocorrect support is added to handle port collisions for multi machine environments gracefully. ## Synced Folders Vagrant automatically syncs the project folder on the host to `/vagrant` in the guest. You can also configure additional synced folders. **SECURITY NOTE:** for remote Libvirt, nfs synced folders requires a bridged public network interface and you must connect to Libvirt via ssh. **NFS** `vagrant-libvirt` supports [NFS](https://www.vagrantup.com/docs/synced-folders/nfs) as default with bidirectional synced folders. Example with NFS: ``` ruby Vagrant.configure("2") do |config| config.vm.synced_folder "./", "/vagrant" end ``` **RSync** `vagrant-libvirt` supports [rsync](https://www.vagrantup.com/docs/synced-folders/rsync) with unidirectional synced folders. Example with rsync: ``` ruby Vagrant.configure("2") do |config| config.vm.synced_folder "./", "/vagrant", type: "rsync" end ``` **9P** `vagrant-libvirt` supports [VirtFS](http://www.linux-kvm.org/page/VirtFS) ([9p or Plan 9](https://en.wikipedia.org/wiki/9P_\(protocol\))) with bidirectional synced folders. Difference between NFS and 9p is explained [here](https://unix.stackexchange.com/questions/240281/virtfs-plan-9-vs-nfs-as-tool-for-share-folder-for-virtual-machine). For 9p shares, a `mount: false` option allows to define synced folders without mounting them at boot. Example for `accessmode: "squash"` with 9p: ``` ruby Vagrant.configure("2") do |config| config.vm.synced_folder "./", "/vagrant", type: "9p", disabled: false, accessmode: "squash", owner: "1000" end ``` Example for `accessmode: "mapped"` with 9p: ``` ruby Vagrant.configure("2") do |config| config.vm.synced_folder "./", "/vagrant", type: "9p", disabled: false, accessmode: "mapped", mount: false end ``` Further documentation on using 9p can be found in [kernel docs](https://www.kernel.org/doc/Documentation/filesystems/9p.txt) and in [QEMU wiki](https://wiki.qemu.org/Documentation/9psetup#Starting_the_Guest_directly). Please do note that 9p depends on support in the guest and not all distros come with the 9p module by default. **Virtio-fs** `vagrant-libvirt` supports [Virtio-fs](https://virtio-fs.gitlab.io/) with bidirectional synced folders. For virtiofs shares, a `mount: false` option allows to define synced folders without mounting them at boot. So far, passthrough is the only supported access mode and it requires running the virtiofsd daemon as root. QEMU needs to allocate the backing memory for all the guest RAM as shared memory, e.g. [Use file-backed memory](https://libvirt.org/kbase/virtiofs.html#host-setup) by enable `memory_backing_dir` option in `/etc/libvirt/qemu.conf`: ``` shell memory_backing_dir = "/dev/shm" ``` Example for Libvirt \>= 6.2.0 (e.g. Ubuntu 20.10 with Linux 5.8.0 + QEMU 5.0 + Libvirt 6.6.0, i.e. NUMA nodes required) with virtiofs: ``` ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.cpus = 2 libvirt.numa_nodes = [{ :cpus => "0-1", :memory => 8192, :memAccess => "shared" }] libvirt.memorybacking :access, :mode => "shared" end config.vm.synced_folder "./", "/vagrant", type: "virtiofs" end ``` Example for Libvirt \>= 6.9.0 (e.g. Ubuntu 21.04 with Linux 5.11.0 + QEMU 5.2 + Libvirt 7.0.0, or Ubuntu 20.04 + [PPA enabled](https://launchpad.net/~savoury1/+archive/ubuntu/virtualisation)) with virtiofs: ``` ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.cpus = 2 libvirt.memory = 8192 libvirt.memorybacking :access, :mode => "shared" end config.vm.synced_folder "./", "/vagrant", type: "virtiofs" end ``` Further documentation on using virtiofs can be found in [official HowTo](https://virtio-fs.gitlab.io/index.html#howto) and in [Libvirt KB](https://libvirt.org/kbase/virtiofs.html). Please do note that virtiofs depends on: - Host: Linux \>= 5.4, QEMU \>= 4.2 and Libvirt \>= 6.2 (e.g. Ubuntu 20.10) - Guest: Linux \>= 5.4 (e.g. Ubuntu 20.04) ## QEMU Session Support vagrant-libvirt supports using QEMU user sessions to maintain Vagrant VMs. As the session connection does not have root access to the system features which require root will not work. Access to networks created by the system QEMU connection can be granted by using the [QEMU bridge helper](https://wiki.qemu.org/Features/HelperNetworking). The bridge helper is enabled by default on some distros but may need to be enabled/installed on others. There must be a virbr network defined in the QEMU system session. The libvirt `default` network which comes by default, the vagrant `vagrant-libvirt` network which is generated if you run a Vagrantfile using the System session, or a manually defined network can be used. These networks can be set to autostart with `sudo virsh net-autostart `, which'll mean no further root access is required even after reboots. The QEMU bridge helper is configured via `/etc/qemu/bridge.conf`. This file must include the virbr you wish to use (e.g. virbr0, virbr1, etc). You can find this out via `sudo virsh net-dumpxml `. ``` allow virbr0 ``` An example configuration of a machine using the QEMU session connection: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| # Use QEMU session instead of system connection libvirt.qemu_use_session = true # URI of QEMU session connection, default is as below libvirt.uri = 'qemu:///session' # URI of QEMU system connection, use to obtain IP address for management, default is below libvirt.system_uri = 'qemu:///system' # Path to store Libvirt images for the virtual machine, default is as ~/.local/share/libvirt/images libvirt.storage_pool_path = '/home/user/.local/share/libvirt/images' # Management network device, default is below libvirt.management_network_device = 'virbr0' end # Public network configuration using existing network device # Note: Private networks do not work with QEMU session enabled as root access is required to create new network devices config.vm.network :public_network, :dev => "virbr1", :mode => "bridge", :type => "bridge" end ``` ## Customized Graphics vagrant-libvirt supports customizing the display and video settings of the managed guest. This is probably most useful for VNC-type displays with multiple guests. It lets you specify the exact port for each guest to use deterministically. Here is an example of using custom display options: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.graphics_port = 5901 libvirt.graphics_ip = '0.0.0.0' libvirt.video_type = 'qxl' end end ``` ## TPM Devices Modern versions of Libvirt support connecting to TPM devices on the host system. This allows you to enable Trusted Boot Extensions, among other features, on your guest VMs. To passthrough a hardware TPM, you will generally only need to modify the `tpm_path` variable in your guest configuration. However, advanced usage, such as the application of a Software TPM, may require modifying the `tpm_model`, `tpm_type` and `tpm_version` variables. The TPM options will only be used if you specify a TPM path or version. Declarations of any TPM options without specifying a path or version will result in those options being ignored. Here is an example of using the TPM options: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.tpm_model = 'tpm-tis' libvirt.tpm_type = 'passthrough' libvirt.tpm_path = '/dev/tpm0' end end ``` It's also possible for Libvirt to start an emulated TPM device on the host. Requires `swtpm` and `swtpm-tools` ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.tpm_model = "tpm-crb" libvirt.tpm_type = "emulator" libvirt.tpm_version = "2.0" end end ``` ## Memory balloon The configuration of the memory balloon device can be overridden. By default, libvirt will automatically attach a memory balloon; this behavior is preserved by not configuring any memballoon-related options. The memory balloon can be explicitly disabled by setting `memballoon_enabled` to `false`. Setting `memballoon_enabled` to `true` will allow additional configuration of memballoon-related options. Here is an example of using the memballoon options: ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.memballoon_enabled = true libvirt.memballoon_model = 'virtio' libvirt.memballoon_pci_bus = '0x00' libvirt.memballoon_pci_slot = '0x0f' end end ``` ## Libvirt communication channels For certain functionality to be available within a guest, a private communication channel must be established with the host. Two notable examples of this are the QEMU guest agent, and the Spice/QXL graphics type. Below is a simple example which exposes a virtio serial channel to the guest. Note: in a multi-VM environment, the channel would be created for all VMs. ```ruby vagrant.configure(2) do |config| config.vm.provider :libvirt do |libvirt| libvirt.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' end end ``` Below is the syntax for creating a spicevmc channel for use by a qxl graphics card. ```ruby vagrant.configure(2) do |config| config.vm.provider :libvirt do |libvirt| libvirt.channel :type => 'spicevmc', :target_name => 'com.redhat.spice.0', :target_type => 'virtio' end end ``` These settings can be specified on a per-VM basis, however the per-guest settings will OVERRIDE any global 'config' setting. In the following example, we create 3 VMs with the following configuration: * **master**: No channel settings specified, so we default to the provider setting of a single virtio guest agent channel. * **node1**: Override the channel setting, setting both the guest agent channel, and a spicevmc channel * **node2**: Override the channel setting, setting both the guest agent channel, and a 'guestfwd' channel. TCP traffic sent by the guest to the given IP address and port is forwarded to the host socket `/tmp/foo`. Note: this device must be unique for each VM. For example: ```ruby Vagrant.configure(2) do |config| config.vm.box = "fedora/32-cloud-base" config.vm.provider :libvirt do |libvirt| libvirt.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' end config.vm.define "master" do |master| master.vm.provider :libvirt do |domain| domain.memory = 1024 end end config.vm.define "node1" do |node1| node1.vm.provider :libvirt do |domain| domain.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' domain.channel :type => 'spicevmc', :target_name => 'com.redhat.spice.0', :target_type => 'virtio' end end config.vm.define "node2" do |node2| node2.vm.provider :libvirt do |domain| domain.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' domain.channel :type => 'unix', :target_type => 'guestfwd', :target_address => '192.0.2.42', :target_port => '4242', :source_path => '/tmp/foo' end end end ``` ## Custom command line arguments and environment variables You can also specify multiple qemuargs arguments or qemuenv environment variables for qemu-system * `value` - Value ```ruby Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.qemuargs :value => "-device" libvirt.qemuargs :value => "intel-iommu" libvirt.qemuenv QEMU_AUDIO_DRV: 'pa' libvirt.qemuenv QEMU_AUDIO_TIMER_PERIOD: '150' libvirt.qemuenv QEMU_PA_SAMPLES: '1024', QEMU_PA_SERVER: '/run/user/1000/pulse/native' end end ``` ## Box Formats ### Version 1 This is the original format that most boxes currently use. You can view an example box in the [`example_box/directory`](https://github.com/vagrant-libvirt/vagrant-libvirt/tree/master/example_box). That directory also contains instructions on how to build a box. The box is a tarball containing: * qcow2 image file named `box.img` * `metadata.json` file describing box image (`provider`, `virtual_size`, `format`) * `Vagrantfile` that does default settings for the provider-specific configuration for this provider ### Version 2 (Experimental) Due to the limitation of only being able to handle a single disk with the version 1 format, a new format was added to support boxes that need to specify multiple disks. This is still currently experimental and as such support for packaging has yet to be added. There is a script in the tools folder (tools/create_box_with_two_disks.sh) that should provide a guideline on how to create such a box for those that wish to experiment and provide early feedback. At it's most basic, it expects an array of disks to allow a specific order to be presented. Disks will be attached in this order and as such assume device names base on this within the VM. The 'path' attribute is required, and is expected to be relative to the base of the box. This should allow placing the disk images within a nested directory within the box if it useful for those with a larger number of disks. The name allows overriding the target volume name that will be used in the libvirt storage pool. Note that vagrant-libvirt will still prefix the volume name with `#{box_name}_vagrant_box_image_#{box_version}_` to avoid accidental clashes with other boxes. Format and virtual size need no longer be specified as they are now retrieved directly from the provided image using `qemu-img info ...`. Example format: ```json { 'disks': [ { 'path': 'disk1.img' }, { 'path': 'disk2.img', 'name': 'secondary_disk' }, { 'path': 'disk3.img' } ], 'provider': 'libvirt' } ``` ## Create Box If creating a box from a modified vagrant-libvirt machine, ensure that you have set the `config.ssh.insert_key = false` in the original Vagrantfile as otherwise Vagrant will replace the default connection key-pair that is required on first boot with one specific to the machine and prevent the default key from working on the exported result. ```ruby Vagrant.configure("2") do |config| # this setting is only recommended if planning to export the # resulting machine config.ssh.insert_key = false config.vm.define :test_vm do |test_vm| test_vm.vm.box = "fedora/32-cloud-base" end end ``` To create a vagrant-libvirt box from a qcow2 image, run `create_box.sh` (located in the tools directory): ```shell $ create_box.sh ubuntu14.qcow2 ``` You can also create a box by using [Packer](https://packer.io). Packer templates for use with vagrant-libvirt are available at https://github.com/jakobadam/packer-qemu-templates. After cloning that project you can build a vagrant-libvirt box by running: ```shell $ cd packer-qemu-templates $ packer build ubuntu-14.04-server-amd64-vagrant.json ``` ## Package Box from VM vagrant-libvirt has native support for [`vagrant package`](https://www.vagrantup.com/docs/cli/package.html) via libguestfs [virt-sysprep](http://libguestfs.org/virt-sysprep.1.html). virt-sysprep operations can be customized via the `VAGRANT_LIBVIRT_VIRT_SYSPREP_OPERATIONS` environment variable; see the [upstream documentation](http://libguestfs.org/virt-sysprep.1.html#operations) for further details especially on default sysprep operations enabled for your system. Options to the virt-sysprep command call can be passed via `VAGRANT_LIBVIRT_VIRT_SYSPREP_OPTIONS` environment variable. ```shell $ export VAGRANT_LIBVIRT_VIRT_SYSPREP_OPTIONS="--delete /etc/hostname" $ vagrant package ``` For example, on Chef [bento](https://github.com/chef/bento) VMs that require SSH hostkeys already set (e.g. bento/debian-7) as well as leave existing LVM UUIDs untouched (e.g. bento/ubuntu-18.04), these can be packaged into vagrant-libvirt boxes like so: ```shell $ export VAGRANT_LIBVIRT_VIRT_SYSPREP_OPERATIONS="defaults,-ssh-userdir,-ssh-hostkeys,-lvm-uuids" $ vagrant package ``` ## Troubleshooting VMs The first step for troubleshooting a VM image that appears to not boot correctly, or hangs waiting to get an IP, is to check it with a VNC viewer. A key thing to remember is that if the VM doesn't get an IP, then vagrant can't communicate with it to configure anything, so a problem at this stage is likely to come from the VM, but we'll outline the tools and common problems to help you troubleshoot that. By default, when you create a new VM, a vnc server will listen on `127.0.0.1` on port `TCP5900`. If you connect with a vnc viewer you can see the boot process. If your VM isn't listening on `5900` by default, you can use `virsh dumpxml` to find out which port it's listening on, or can configure it with `graphics_port` and `graphics_ip` (see 'Domain Specific Options' above). Note: Connecting with the console (`virsh console`) requires additional config, so some VMs may not show anything on the console at all, instead displaying it in the VNC console. The issue with the text console is that you also need to build the image used to tell the kernel to output to the console during boot, and typically most do not have this built in. Problems we've seen in the past include: - Forgetting to remove `/etc/udev/rules.d/70-persistent-net.rules` before packaging the VM - VMs expecting a specific disk device to be connected If you're still confused, check the Github Issues for this repo for anything that looks similar to your problem. [Github Issue #1032](https://github.com/vagrant-libvirt/vagrant-libvirt/issues/1032) contains some historical troubleshooting for VMs that appeared to hang. Did you hit a problem that you'd like to note here to save time in the future? Please do! ## Development To work on the `vagrant-libvirt` plugin, clone this repository out, and use [Bundler](http://gembundler.com) to get the dependencies: ```shell $ git clone https://github.com/vagrant-libvirt/vagrant-libvirt.git $ cd vagrant-libvirt $ bundle install ``` Once you have the dependencies, verify the unit tests pass with `rspec`: ```shell $ export VAGRANT_HOME=$(mktemp -d) $ bundle exec rspec --fail-fast --color --format documentation ``` If those pass, you're ready to start developing the plugin. Setting `VAGRANT_HOME` is to avoid issues with conflicting with other plugins/gems or data already present under `~/.vagrant.d`. Additionally if you wish to test against a specific version of vagrant you can control the version using the following before running the tests: ```shell $ export VAGRANT_VERSION=v2.2.14 ``` **Note** rvm is used by the maintainers to help provide an environment to test against multiple ruby versions that align with the ones used by vagrant for their embedded ruby depending on the release. You can see what version is used by looking at the current [unit tests](.github/workflows/unit-tests.yml) workflow. You can test the plugin without installing it into your Vagrant environment by just creating a `Vagrantfile` in the top level of this directory (it is gitignored) that uses it. You can add the following line to your Vagrantfile while in development to ensure vagrant checks that the plugin is installed: ```ruby Vagrant.configure("2") do |config| config.vagrant.plugins = "vagrant-libvirt" end ``` Or add the following to the top of the file to ensure that any required plugins are installed globally: ```ruby REQUIRED_PLUGINS = %w(vagrant-libvirt) exit unless REQUIRED_PLUGINS.all? do |plugin| Vagrant.has_plugin?(plugin) || ( puts "The #{plugin} plugin is required. Please install it with:" puts "$ vagrant plugin install #{plugin}" false ) end ``` Now you can use bundler to execute Vagrant: ```shell $ bundle exec vagrant up --provider=libvirt ``` **IMPORTANT NOTE:** bundle is crucial. You need to use bundled Vagrant. ## Contributing 1. Fork it 2. Create your feature branch (`git checkout -b my-new-feature`) 3. Commit your changes (`git commit -am 'Add some feature'`) 4. Push to the branch (`git push origin my-new-feature`) 5. Create new Pull Request vagrant-libvirt-0.7.0/Rakefile000066400000000000000000000011071414232526500163010ustar00rootroot00000000000000# frozen_string_literal: true #require 'rubygems' #require 'bundler/setup' require 'bundler/gem_tasks' require File.expand_path('../lib/vagrant-libvirt/version', __FILE__) Bundler::GemHelper.install_tasks task default: [:deftask] task :deftask do puts 'call rake -T' end task :write_version do VagrantPlugins::ProviderLibvirt.write_version() end task :clean_version do rm_rf File.expand_path('../lib/vagrant-libvirt/version', __FILE__) end task "clean" => :clean_version task :write_version => :clean_version task "build" => :write_version task "release" => :write_version vagrant-libvirt-0.7.0/entrypoint.sh000077500000000000000000000101051414232526500174040ustar00rootroot00000000000000#!/bin/bash set -u -o pipefail vdir="/.vagrant.d" if [[ ! -d ${vdir} ]] then echo "Require the user ~/.vagrant.d to be bind mounted at ${vdir}" echo echo "Typically use '-v ~/.vagrant.d:${vdir}' with the docker run command." exit 2 fi vdir_mnt=$(stat -c %m ${vdir}) case "${vdir_mnt%%/}" in /*) # user mounted vagrant home is not mounted on /, so # presumably it is a mount bind or mounted volume and should # be able to persist boxes and machine index. # ;; *) echo -n "${vdir} is not set to a bind mounted volume, may not be able " echo -n "to persist the machine index which may result in some unexpected " echo "behaviour." ;; esac # To determine default user to use search for the Vagrantfile starting with # the current working directory. If it can't be found, use the owner/group # from the current working directory anyway vagrantfile="${VAGRANT_VAGRANTFILE:-Vagrantfile}" path="$(pwd)" while [[ "$path" != "" && ! -e "$path/$vagrantfile" ]] do path=${path%/*} done if [[ "$path" == "" ]] then path="$(pwd)" fi USER_UID=${USER_UID:-$(stat -c %u ${path})} || exit 3 USER_GID=${USER_GID:-$(stat -c %g ${path})} || exit 3 if [[ ${USER_UID} -eq 0 ]] then if [[ -z "${IGNORE_RUN_AS_ROOT:-}" ]] then echo "WARNING! Running as root, if this breaks, you get to keep both pieces" fi else vdir_uid=$(stat -c %u ${vdir}) if [[ "${vdir_uid}" != "${USER_UID}" ]] then if [[ -z "$(ls -A ${vdir})" ]] then # vdir has just been created and is owned by the wrong user # modify the ownership to allow the required directories to # be created chown ${USER_UID}:${USER_GID} ${vdir} else echo -n "ERROR: Attempting to use a directory on ${vdir} that is not " echo -n "owned by the user that owns ${path}/${vagrantfile} is not " echo "supported!" exit 2 fi fi fi export USER=vagrant export GROUP=users export HOME=/home/${USER} echo "Starting with UID: ${USER_UID}, GID: ${USER_GID}" if [[ "${USER_GID}" != "0" ]] then if getent group ${GROUP} > /dev/null then GROUPCMD=groupmod else GROUPCMD=groupadd fi ${GROUPCMD} -g ${USER_GID} ${GROUP} >/dev/null || exit 3 fi if [[ "${USER_UID}" != "0" ]] then if getent passwd ${USER} > /dev/null then USERCMD=usermod else USERCMD=useradd fi ${USERCMD} --shell /bin/bash -u ${USER_UID} -g ${USER_GID} -o -c "" -m ${USER} >/dev/null 2>&1 || exit 3 fi # Perform switching in of boxes, data directory containing machine index # and temporary directory from the user mounted environment for dir in boxes data tmp do # if the directory hasn't been explicitly mounted over, remove it. if [[ -e "/vagrant/${dir}/.remove" ]] then rm -rf /vagrant/${dir} [[ ! -e ${vdir}/${dir} ]] && gosu ${USER} mkdir ${vdir}/${dir} ln -s ${vdir}/${dir} /vagrant/${dir} fi done # make sure the directories can be written to by vagrant otherwise will # get a start up error find ${VAGRANT_HOME} -maxdepth 1 ! -exec chown -h ${USER}:${GROUP} {} \+ LIBVIRT_SOCK=/var/run/libvirt/libvirt-sock if [[ ! -S ${LIBVIRT_SOCK} ]] then if [[ -z "${IGNORE_MISSING_LIBVIRT_SOCK:-}" ]] then echo "Unless you are using this to connect to a remote libvirtd it is" echo "necessary to mount the libvirt socket in as ${LIBVIRT_SOCK}" echo echo "Set IGNORE_MISSING_LIBVIRT_SOCK to silence this warning" fi else LIBVIRT_GID=$(stat -c %g ${LIBVIRT_SOCK}) # only do this if the host uses a non-root group for libvirt if [[ ${LIBVIRT_GID} -ne 0 ]] then if getent group libvirt >/dev/null then GROUPCMD=groupmod else GROUPCMD=groupadd fi ${GROUPCMD} -g ${LIBVIRT_GID} libvirt >/dev/null || exit 3 usermod -a -G libvirt ${USER} || exit 3 fi fi if [[ $# -eq 0 ]] then # if no command provided exec gosu ${USER} vagrant help fi exec gosu ${USER} "$@" vagrant-libvirt-0.7.0/example_box/000077500000000000000000000000001414232526500171405ustar00rootroot00000000000000vagrant-libvirt-0.7.0/example_box/README.md000066400000000000000000000020441414232526500204170ustar00rootroot00000000000000# Vagrant Libvirt Example Box Vagrant providers each require a custom provider-specific box format. This folder shows the example contents of a box for the `libvirt` provider. To turn this into a box create a Vagrant image according documentation (don't forget to install rsync command) and create box with following command: ``` $ tar cvzf custom_box.box ./metadata.json ./Vagrantfile ./box.img ``` This box works by using Vagrant's built-in Vagrantfile merging to setup defaults for Libvirt. These defaults can easily be overwritten by higher-level Vagrantfiles (such as project root Vagrantfiles). ## Box Metadata Libvirt box should define at least three data fields in `metadata.json` file. * provider - Provider name is libvirt. * format - Currently supported format is qcow2. * virtual_size - Virtual size of image in GBytes. ## Converting Boxes Instead of creating a box from scratch, you can use [vagrant-mutate](https://github.com/sciurus/vagrant-mutate) to take boxes created for other Vagrant providers and use them with vagrant-libvirt. vagrant-libvirt-0.7.0/example_box/Vagrantfile000066400000000000000000000034131414232526500213260ustar00rootroot00000000000000# frozen_string_literal: true # -*- mode: ruby -*- # vi: set ft=ruby : Vagrant.configure("2") do |config| # Example configuration of new VM.. # #config.vm.define :test_vm do |test_vm| # Box name # #test_vm.vm.box = "centos64" # Domain Specific Options # # See README for more info. # #test_vm.vm.provider :libvirt do |domain| # domain.memory = 2048 # domain.cpus = 2 #end # Interfaces for VM # # Networking features in the form of `config.vm.network` # #test_vm.vm.network :private_network, :ip => '10.20.30.40' #test_vm.vm.network :public_network, :ip => '10.20.30.41' #end # Options for Libvirt Vagrant provider. config.vm.provider :libvirt do |libvirt| # A hypervisor name to access. Different drivers can be specified, but # this version of provider creates KVM machines only. Some examples of # drivers are KVM (QEMU hardware accelerated), QEMU (QEMU emulated), # Xen (Xen hypervisor), lxc (Linux Containers), # esx (VMware ESX), vmwarews (VMware Workstation) and more. Refer to # documentation for available drivers (http://libvirt.org/drivers.html). libvirt.driver = "kvm" # The name of the server, where Libvirtd is running. # libvirt.host = "localhost" # If use ssh tunnel to connect to Libvirt. libvirt.connect_via_ssh = false # The username and password to access Libvirt. Password is not used when # connecting via ssh. libvirt.username = "root" #libvirt.password = "secret" # Libvirt storage pool name, where box image and instance snapshots will # be stored. libvirt.storage_pool_name = "default" # Set a prefix for the machines that's different than the project dir name. #libvirt.default_prefix = '' end end vagrant-libvirt-0.7.0/example_box/metadata.json000066400000000000000000000001251414232526500216110ustar00rootroot00000000000000{ "provider" : "libvirt", "format" : "qcow2", "virtual_size" : 16 } vagrant-libvirt-0.7.0/lib/000077500000000000000000000000001414232526500154035ustar00rootroot00000000000000vagrant-libvirt-0.7.0/lib/vagrant-libvirt.rb000066400000000000000000000015251414232526500210460ustar00rootroot00000000000000# frozen_string_literal: true require 'pathname' module VagrantPlugins module ProviderLibvirt lib_path = Pathname.new(File.expand_path('../vagrant-libvirt', __FILE__)) autoload :Action, lib_path.join('action') autoload :Errors, lib_path.join('errors') autoload :Util, lib_path.join('util') def self.source_root @source_root ||= Pathname.new(File.expand_path('../../', __FILE__)) end end end begin require 'vagrant' rescue LoadError raise 'The Vagrant Libvirt plugin must be run within Vagrant.' end # This is a sanity check to make sure no one is attempting to install # this into an early Vagrant version. if Vagrant::VERSION < '1.5.0' raise 'The Vagrant Libvirt plugin is only compatible with Vagrant 1.5+.' end # make sure base module class defined before loading plugin require 'vagrant-libvirt/plugin' vagrant-libvirt-0.7.0/lib/vagrant-libvirt/000077500000000000000000000000001414232526500205165ustar00rootroot00000000000000vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action.rb000066400000000000000000000327771414232526500223400ustar00rootroot00000000000000# frozen_string_literal: true require 'vagrant/action/builder' require 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Include the built-in & general modules so we can use them as top-level things. include Vagrant::Action::Builtin include Vagrant::Action::General @logger = Log4r::Logger.new('vagrant_libvirt::action') # remove image from Libvirt storage pool def self.remove_libvirt_image Vagrant::Action::Builder.new.tap do |b| b.use RemoveLibvirtImage end end # This action is called to bring the box up from nothing. def self.action_up Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use BoxCheckOutdated b.use Call, IsCreated do |env, b2| # Create VM if not yet created. if !env[:result] b2.use SetNameOfDomain if !env[:machine].config.vm.box b2.use CreateDomain b2.use CreateNetworks b2.use CreateNetworkInterfaces b2.use SetBootOrder b2.use StartDomain else b2.use HandleStoragePool b2.use HandleBox b2.use HandleBoxImage b2.use CreateDomainVolume b2.use CreateDomain b2.use Provision b2.use PrepareNFSValidIds b2.use SyncedFolderCleanup b2.use SyncedFolders b2.use PrepareNFSSettings b2.use ShareFolders b2.use CreateNetworks b2.use CreateNetworkInterfaces b2.use SetBootOrder b2.use StartDomain b2.use WaitTillUp b2.use WaitForCommunicator, [:running] b2.use ForwardPorts b2.use SetHostname # b2.use SyncFolders end else env[:halt_on_error] = true b2.use action_start end end end end # Assuming VM is created, just start it. This action is not called # directly by any subcommand. VM can be suspended, already running or in # poweroff state. def self.action_start Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsRunning do |env, b2| # If the VM is running, run the necessary provisioners if env[:result] b2.use action_provision next end b2.use Call, IsSuspended do |env2, b3| # if vm is suspended resume it then exit if env2[:result] b3.use CreateNetworks b3.use ResumeDomain next end if !env[:machine].config.vm.box # With no box, we just care about network creation and starting it b3.use CreateNetworks b3.use SetBootOrder b3.use StartDomain else # VM is not running or suspended. b3.use Provision # Ensure networks are created and active b3.use CreateNetworks b3.use SetBootOrder b3.use PrepareNFSValidIds b3.use SyncedFolderCleanup b3.use SyncedFolders # Start it.. b3.use StartDomain # Machine should gain IP address when comming up, # so wait for dhcp lease and store IP into machines data_dir. b3.use WaitTillUp b3.use WaitForCommunicator, [:running] b3.use ForwardPorts b3.use PrepareNFSSettings b3.use ShareFolders end end end end end # This is the action that is primarily responsible for halting the # virtual machine. def self.action_halt Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use ClearForwardedPorts b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsSuspended do |env2, b3| b3.use CreateNetworks if env2[:result] b3.use ResumeDomain if env2[:result] end # only perform shutdown if VM is running b2.use Call, IsRunning do |env2, b3| next unless env2[:result] b3.use StartShutdownTimer b3.use Call, GracefulHalt, :shutoff, :running do |env3, b4| if !env3[:result] b4.use Call, ShutdownDomain, :shutoff, :running do |env4, b5| if !env4[:result] b5.use HaltDomain end end end end end end end end # This is the action implements the reload command # It uses the halt and start actions def self.action_reload Vagrant::Action::Builder.new.tap do |b| b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use ConfigValidate b2.use action_halt b2.use action_start end end end # not implemented and looks like not require def self.action_package Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use PackageSetupFolders b2.use PackageSetupFiles b2.use action_halt b2.use Package b2.use PackageDomain end end end # This is the action that is primarily responsible for completely # freeing the resources of the underlying virtual machine. def self.action_destroy Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] # Try to remove stale volumes anyway b2.use SetNameOfDomain b2.use RemoveStaleVolume if env[:machine].config.vm.box b2.use CleanMachineFolder, quiet: true b2.use MessageNotCreated unless env[:result] next end b2.use Call, DestroyConfirm do |env2, b3| if env2[:result] b3.use ProvisionerCleanup, :before b3.use ClearForwardedPorts b3.use PruneNFSExports b3.use DestroyDomain b3.use DestroyNetworks b3.use CleanMachineFolder else b3.use MessageWillNotDestroy end end end end end # This action is called to SSH into the machine. def self.action_ssh Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] raise Vagrant::Errors::VMNotCreatedError end b2.use Call, IsRunning do |env2, b3| unless env2[:result] raise Vagrant::Errors::VMNotRunningError end b3.use SSHExec end end end end # This action is called when `vagrant provision` is called. def self.action_provision Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsRunning do |env2, b3| unless env2[:result] b3.use MessageNotRunning next end b3.use Provision # b3.use SyncFolders end end end end # This is the action that is primarily responsible for suspending # the virtual machine. def self.action_suspend Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsRunning do |env2, b3| unless env2[:result] b3.use MessageNotRunning next end b3.use SuspendDomain end end end end # This is the action that is primarily responsible for resuming # suspended machines. def self.action_resume Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] b2.use MessageNotCreated next end b2.use Call, IsSuspended do |env2, b3| unless env2[:result] b3.use MessageNotSuspended next end b3.use CreateNetworks b3.use ResumeDomain end end end end def self.action_read_mac_addresses Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use ReadMacAddresses end end # This is the action that will run a single SSH command. def self.action_ssh_run Vagrant::Action::Builder.new.tap do |b| b.use ConfigValidate b.use Call, IsCreated do |env, b2| unless env[:result] raise Vagrant::Errors::VMNotCreatedError end b2.use Call, IsRunning do |env2, b3| unless env2[:result] raise Vagrant::Errors::VMNotRunningError end b3.use SSHRun end end end end action_root = Pathname.new(File.expand_path('../action', __FILE__)) autoload :PackageDomain, action_root.join('package_domain') autoload :CreateDomain, action_root.join('create_domain') autoload :CreateDomainVolume, action_root.join('create_domain_volume') autoload :CreateNetworkInterfaces, action_root.join('create_network_interfaces') autoload :CreateNetworks, action_root.join('create_networks') autoload :CleanMachineFolder, action_root.join('clean_machine_folder') autoload :DestroyDomain, action_root.join('destroy_domain') autoload :DestroyNetworks, action_root.join('destroy_networks') autoload :ForwardPorts, action_root.join('forward_ports') autoload :ClearForwardedPorts, action_root.join('forward_ports') autoload :HaltDomain, action_root.join('halt_domain') autoload :StartShutdownTimer, action_root.join('shutdown_domain') autoload :ShutdownDomain, action_root.join('shutdown_domain') autoload :HandleBoxImage, action_root.join('handle_box_image') autoload :HandleStoragePool, action_root.join('handle_storage_pool') autoload :RemoveLibvirtImage, action_root.join('remove_libvirt_image') autoload :IsCreated, action_root.join('is_created') autoload :IsRunning, action_root.join('is_running') autoload :IsSuspended, action_root.join('is_suspended') autoload :MessageAlreadyCreated, action_root.join('message_already_created') autoload :MessageNotCreated, action_root.join('message_not_created') autoload :MessageNotRunning, action_root.join('message_not_running') autoload :MessageNotSuspended, action_root.join('message_not_suspended') autoload :MessageWillNotDestroy, action_root.join('message_will_not_destroy') autoload :RemoveStaleVolume, action_root.join('remove_stale_volume') autoload :PrepareNFSSettings, action_root.join('prepare_nfs_settings') autoload :PrepareNFSValidIds, action_root.join('prepare_nfs_valid_ids') autoload :PruneNFSExports, action_root.join('prune_nfs_exports') autoload :ReadMacAddresses, action_root.join('read_mac_addresses') autoload :ResumeDomain, action_root.join('resume_domain') autoload :SetNameOfDomain, action_root.join('set_name_of_domain') autoload :SetBootOrder, action_root.join('set_boot_order') # I don't think we need it anymore autoload :ShareFolders, action_root.join('share_folders') autoload :StartDomain, action_root.join('start_domain') autoload :SuspendDomain, action_root.join('suspend_domain') autoload :TimedProvision, action_root.join('timed_provision') autoload :WaitTillUp, action_root.join('wait_till_up') autoload :PrepareNFSValidIds, action_root.join('prepare_nfs_valid_ids') autoload :Package, 'vagrant/action/general/package' autoload :PackageSetupFiles, 'vagrant/action/general/package_setup_files' autoload :PackageSetupFolders, 'vagrant/action/general/package_setup_folders' autoload :SSHRun, 'vagrant/action/builtin/ssh_run' autoload :HandleBox, 'vagrant/action/builtin/handle_box' autoload :SyncedFolders, 'vagrant/action/builtin/synced_folders' autoload :SyncedFolderCleanup, 'vagrant/action/builtin/synced_folder_cleanup' autoload :ProvisionerCleanup, 'vagrant/action/builtin/provisioner_cleanup' autoload :WaitForCommunicator, 'vagrant/action/builtin/wait_for_communicator' end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/000077500000000000000000000000001414232526500217735ustar00rootroot00000000000000vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/clean_machine_folder.rb000066400000000000000000000015001414232526500264150ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action class CleanMachineFolder def initialize(app, env, options=nil) @logger = Log4r::Logger.new('vagrant_libvirt::action::create_domain') @app = app @ui = env[:ui] @quiet = (options || {}).fetch(:quiet, false) end def call(env) machine_folder = env[:machine].data_dir @ui.info("Deleting the machine folder") unless @quiet @logger.debug("Recursively removing: #{machine_folder}") FileUtils.rm_rf(machine_folder, :secure => true) # need to recreate to prevent exception during a cancelled up FileUtils.mkdir_p(machine_folder) @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/create_domain.rb000066400000000000000000000433211414232526500251150ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action class CreateDomain include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::StorageUtil def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::create_domain') @app = app end def _disk_name(name, disk) "#{name}-#{disk[:device]}.#{disk[:type]}" # disk name end def _disks_print(disks) disks.collect do |x| "#{x[:device]}(#{x[:type]},#{x[:size]})" end.join(', ') end def _cdroms_print(cdroms) cdroms.collect { |x| x[:dev] }.join(', ') end def call(env) # Get config. config = env[:machine].provider_config # Gather some info about domain @name = env[:domain_name] @title = config.title vagrantfile = File.join(env[:root_path], (env[:vagrantfile_name] || "Vagrantfile")) @description = !config.description.empty? ? config.description : "Source: #{vagrantfile}" @uuid = config.uuid @cpus = config.cpus.to_i @cpuset = config.cpuset @cpu_features = config.cpu_features @cpu_topology = config.cpu_topology @nodeset = config.nodeset @features = config.features @features_hyperv = config.features_hyperv @clock_offset = config.clock_offset @clock_timers = config.clock_timers @shares = config.shares @cpu_mode = config.cpu_mode @cpu_model = config.cpu_model @cpu_fallback = config.cpu_fallback @numa_nodes = config.numa_nodes @loader = config.loader @nvram = config.nvram @machine_type = config.machine_type @machine_arch = config.machine_arch @disk_bus = config.disk_bus @disk_device = config.disk_device @disk_driver_opts = config.disk_driver_opts @nested = config.nested @memory_size = config.memory.to_i * 1024 @memory_backing = config.memory_backing @management_network_mac = config.management_network_mac @domain_volume_cache = config.volume_cache || 'default' @kernel = config.kernel @cmd_line = config.cmd_line @emulator_path = config.emulator_path @initrd = config.initrd @dtb = config.dtb @graphics_type = config.graphics_type @graphics_autoport = config.graphics_autoport @graphics_port = config.graphics_port @graphics_ip = config.graphics_ip @graphics_passwd = if config.graphics_passwd.to_s.empty? '' else "passwd='#{config.graphics_passwd}'" end @graphics_gl = config.graphics_gl @video_type = config.video_type @sound_type = config.sound_type @video_vram = config.video_vram @video_accel3d = config.video_accel3d @keymap = config.keymap @kvm_hidden = config.kvm_hidden @tpm_model = config.tpm_model @tpm_type = config.tpm_type @tpm_path = config.tpm_path @tpm_version = config.tpm_version # Boot order @boot_order = config.boot_order # Storage @storage_pool_name = config.storage_pool_name @snapshot_pool_name = config.snapshot_pool_name @domain_volumes = [] @disks = config.disks @cdroms = config.cdroms # Input @inputs = config.inputs # Channels @channels = config.channels # PCI device passthrough @pcis = config.pcis # Watchdog device @watchdog_dev = config.watchdog_dev # USB controller @usbctl_dev = config.usbctl_dev # USB device passthrough @usbs = config.usbs # Redirected devices @redirdevs = config.redirdevs @redirfilters = config.redirfilters # Additional QEMU commandline arguments @qemu_args = config.qemu_args # Additional QEMU commandline environment variables @qemu_env = config.qemu_env # smartcard device @smartcard_dev = config.smartcard_dev # RNG device passthrough @rng = config.rng config = env[:machine].provider_config @domain_type = config.driver @os_type = 'hvm' # Get path to domain image from the storage pool selected if we have a box. if env[:machine].config.vm.box if @snapshot_pool_name != @storage_pool_name pool_name = @snapshot_pool_name else pool_name = @storage_pool_name end @logger.debug "Search for volumes in pool: #{pool_name}" env[:box_volumes].each_index do |index| suffix_index = index > 0 ? "_#{index}" : '' domain_volume = env[:machine].provider.driver.connection.volumes.all( name: "#{@name}#{suffix_index}.img" ).find { |x| x.pool_name == pool_name } raise Errors::DomainVolumeExists if domain_volume.nil? @domain_volumes.push({ :dev => (index+1).vdev.to_s, :cache => @domain_volume_cache, :bus => @disk_bus, :path => domain_volume.path, :virtual_size => env[:box_volumes][index][:virtual_size] }) end end # If we have a box, take the path from the domain volume and set our storage_prefix. # If not, we dump the storage pool xml to get its defined path. # the default storage prefix is typically: /var/lib/libvirt/images/ if env[:machine].config.vm.box storage_prefix = File.dirname(@domain_volumes[0][:path]) + '/' # steal else storage_prefix = get_disk_storage_prefix(env, @storage_pool_name) end @serials = config.serials @serials.each do |serial| next unless serial[:source] && serial[:source][:path] dir = File.dirname(serial[:source][:path]) begin FileUtils.mkdir_p(dir) rescue ::Errno::EACCES raise Errors::SerialCannotCreatePathError, path: dir end end @disks.each do |disk| disk[:path] ||= _disk_name(@name, disk) # On volume creation, the element inside # is oddly ignored; instead the path is taken from the # element: # http://www.redhat.com/archives/libvir-list/2008-August/msg00329.html disk[:name] = disk[:path] disk[:absolute_path] = storage_prefix + disk[:path] if not disk[:pool].nil? disk_pool_name = disk[:pool] @logger.debug "Overriding pool name with: #{disk_pool_name}" disk_storage_prefix = get_disk_storage_prefix(env, disk_pool_name) disk[:absolute_path] = disk_storage_prefix + disk[:path] @logger.debug "Overriding disk path with: #{disk[:absolute_path]}" else disk_pool_name = @storage_pool_name end # make the disk. equivalent to: # qemu-img create -f qcow2 5g begin env[:machine].provider.driver.connection.volumes.create( name: disk[:name], format_type: disk[:type], path: disk[:absolute_path], capacity: disk[:size], owner: storage_uid(env), group: storage_gid(env), #:allocation => ?, pool_name: disk_pool_name ) rescue Libvirt::Error => e # It is hard to believe that e contains just a string # and no useful error code! msg = "Call to virStorageVolCreateXML failed: " + "storage volume '#{disk[:path]}' exists already" if e.message == msg and disk[:allow_existing] disk[:preexisting] = true else raise Errors::FogCreateDomainVolumeError, error_message: e.message end end end # Output the settings we're going to use to the user env[:ui].info(I18n.t('vagrant_libvirt.creating_domain')) env[:ui].info(" -- Name: #{@name}") env[:ui].info(" -- Title: #{@title}") if @title != '' env[:ui].info(" -- Description: #{@description}") if @description != '' env[:ui].info(" -- Forced UUID: #{@uuid}") if @uuid != '' env[:ui].info(" -- Domain type: #{@domain_type}") env[:ui].info(" -- Cpus: #{@cpus}") unless @cpuset.nil? env[:ui].info(" -- Cpuset: #{@cpuset}") end if not @cpu_topology.empty? env[:ui].info(" -- CPU topology: sockets=#{@cpu_topology[:sockets]}, cores=#{@cpu_topology[:cores]}, threads=#{@cpu_topology[:threads]}") end @cpu_features.each do |cpu_feature| env[:ui].info(" -- CPU Feature: name=#{cpu_feature[:name]}, policy=#{cpu_feature[:policy]}") end @features.each do |feature| env[:ui].info(" -- Feature: #{feature}") end @features_hyperv.each do |feature| if feature[:name] == 'spinlocks' env[:ui].info(" -- Feature (HyperV): name=#{feature[:name]}, state=#{feature[:state]}, retries=#{feature[:retries]}") else env[:ui].info(" -- Feature (HyperV): name=#{feature[:name]}, state=#{feature[:state]}") end end env[:ui].info(" -- Clock offset: #{@clock_offset}") @clock_timers.each do |timer| env[:ui].info(" -- Clock timer: #{timer.map { |k,v| "#{k}=#{v}"}.join(', ')}") end env[:ui].info(" -- Memory: #{@memory_size / 1024}M") unless @nodeset.nil? env[:ui].info(" -- Nodeset: #{@nodeset}") end @memory_backing.each do |backing| env[:ui].info(" -- Memory Backing: #{backing[:name]}: #{backing[:config].map { |k,v| "#{k}='#{v}'"}.join(' ')}") end unless @shares.nil? env[:ui].info(" -- Shares: #{@shares}") end env[:ui].info(" -- Management MAC: #{@management_network_mac}") env[:ui].info(" -- Loader: #{@loader}") env[:ui].info(" -- Nvram: #{@nvram}") if env[:machine].config.vm.box env[:ui].info(" -- Base box: #{env[:machine].box.name}") end env[:ui].info(" -- Storage pool: #{@storage_pool_name}") @domain_volumes.each do |volume| env[:ui].info(" -- Image(#{volume[:device]}): #{volume[:path]}, #{volume[:virtual_size].to_GB}G") end if not @disk_driver_opts.empty? env[:ui].info(" -- Disk driver opts: #{@disk_driver_opts.reject { |k,v| v.nil? }.map { |k,v| "#{k}='#{v}'"}.join(' ')}") else env[:ui].info(" -- Disk driver opts: cache='#{@domain_volume_cache}'") end env[:ui].info(" -- Kernel: #{@kernel}") env[:ui].info(" -- Initrd: #{@initrd}") env[:ui].info(" -- Graphics Type: #{@graphics_type}") env[:ui].info(" -- Graphics Port: #{@graphics_port}") env[:ui].info(" -- Graphics IP: #{@graphics_ip}") env[:ui].info(" -- Graphics Password: #{@graphics_passwd.empty? ? 'Not defined' : 'Defined'}") env[:ui].info(" -- Video Type: #{@video_type}") env[:ui].info(" -- Video VRAM: #{@video_vram}") env[:ui].info(" -- Video 3D accel: #{@video_accel3d}") env[:ui].info(" -- Sound Type: #{@sound_type}") env[:ui].info(" -- Keymap: #{@keymap}") env[:ui].info(" -- TPM Backend: #{@tpm_type}") if @tpm_type == 'emulator' env[:ui].info(" -- TPM Model: #{@tpm_model}") env[:ui].info(" -- TPM Version: #{@tpm_version}") else env[:ui].info(" -- TPM Path: #{@tpm_path}") end @boot_order.each do |device| env[:ui].info(" -- Boot device: #{device}") end unless @disks.empty? env[:ui].info(" -- Disks: #{_disks_print(@disks)}") end @disks.each do |disk| msg = " -- Disk(#{disk[:device]}): #{disk[:absolute_path]}" msg += ' Shared' if disk[:shareable] msg += ' (Remove only manually)' if disk[:allow_existing] msg += ' Not created - using existed.' if disk[:preexisting] env[:ui].info(msg) end unless @cdroms.empty? env[:ui].info(" -- CDROMS: #{_cdroms_print(@cdroms)}") end @cdroms.each do |cdrom| env[:ui].info(" -- CDROM(#{cdrom[:dev]}): #{cdrom[:path]}") end @inputs.each do |input| env[:ui].info(" -- INPUT: type=#{input[:type]}, bus=#{input[:bus]}") end @channels.each do |channel| env[:ui].info(" -- CHANNEL: type=#{channel[:type]}, mode=#{channel[:source_mode]}") env[:ui].info(" -- CHANNEL: target_type=#{channel[:target_type]}, target_name=#{channel[:target_name]}") end @pcis.each do |pci| env[:ui].info(" -- PCI passthrough: #{pci[:domain]}:#{pci[:bus]}:#{pci[:slot]}.#{pci[:function]}") end unless @rng[:model].nil? env[:ui].info(" -- RNG device model: #{@rng[:model]}") end if not @watchdog_dev.empty? env[:ui].info(" -- Watchdog device: model=#{@watchdog_dev[:model]}, action=#{@watchdog_dev[:action]}") end if not @usbctl_dev.empty? msg = " -- USB controller: model=#{@usbctl_dev[:model]}" msg += ", ports=#{@usbctl_dev[:ports]}" if @usbctl_dev[:ports] env[:ui].info(msg) end @usbs.each do |usb| usb_dev = [] usb_dev.push("bus=#{usb[:bus]}") if usb[:bus] usb_dev.push("device=#{usb[:device]}") if usb[:device] usb_dev.push("vendor=#{usb[:vendor]}") if usb[:vendor] usb_dev.push("product=#{usb[:product]}") if usb[:product] env[:ui].info(" -- USB passthrough: #{usb_dev.join(', ')}") end unless @redirdevs.empty? env[:ui].info(' -- Redirected Devices: ') @redirdevs.each do |redirdev| msg = " -> bus=usb, type=#{redirdev[:type]}" env[:ui].info(msg) end end unless @redirfilters.empty? env[:ui].info(' -- USB Device filter for Redirected Devices: ') @redirfilters.each do |redirfilter| msg = " -> class=#{redirfilter[:class]}, " msg += "vendor=#{redirfilter[:vendor]}, " msg += "product=#{redirfilter[:product]}, " msg += "version=#{redirfilter[:version]}, " msg += "allow=#{redirfilter[:allow]}" env[:ui].info(msg) end end if not @smartcard_dev.empty? env[:ui].info(" -- smartcard device: mode=#{@smartcard_dev[:mode]}, type=#{@smartcard_dev[:type]}") end @serials.each_with_index do |serial, port| if serial[:source] env[:ui].info(" -- SERIAL(COM#{port}: redirect to #{serial[:source][:path]}") env[:ui].warn(I18n.t('vagrant_libvirt.warnings.creating_domain_console_access_disabled')) end end unless @qemu_args.empty? env[:ui].info(' -- Command line args: ') @qemu_args.each do |arg| msg = " -> value=#{arg[:value]}, " env[:ui].info(msg) end end unless @qemu_env.empty? env[:ui].info(' -- Command line environment variables: ') @qemu_env.each do |env_var, env_value| msg = " -> #{env_var}=#{env_value}, " env[:ui].info(msg) end end env[:ui].info(" -- Command line : #{@cmd_line}") unless @cmd_line.empty? # Create Libvirt domain. # Is there a way to tell fog to create new domain with already # existing volume? Use domain creation from template.. xml = to_xml('domain') @logger.debug { "Creating Domain with XML:\n#{xml}" } begin server = env[:machine].provider.driver.connection.servers.create( xml: xml ) rescue Fog::Errors::Error => e raise Errors::FogCreateServerError, error_message: e.message end # Immediately save the ID since it is created at this point. env[:machine].id = server.id @app.call(env) end private def get_disk_storage_prefix(env, disk_pool_name) disk_storage_pool = env[:machine].provider.driver.connection.client.lookup_storage_pool_by_name(disk_pool_name) raise Errors::NoStoragePool if disk_storage_pool.nil? xml = Nokogiri::XML(disk_storage_pool.xml_desc) disk_storage_prefix = xml.xpath('/pool/target/path').inner_text.to_s + '/' end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/create_domain_volume.rb000066400000000000000000000072611414232526500265070ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Create a snapshot of base box image. This new snapshot is just new # cow image with backing storage pointing to base box image. Use this # image as new domain volume. class CreateDomainVolume include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::StorageUtil def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::create_domain_volume') @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.creating_domain_volume')) env[:box_volumes].each_index do |index| suffix_index = index > 0 ? "_#{index}" : '' # Get config options. config = env[:machine].provider_config # This is name of newly created image for vm. @name = "#{env[:domain_name]}#{suffix_index}.img" # Verify the volume doesn't exist already. domain_volume = env[:machine].provider.driver.connection.volumes.all( name: @name ).first raise Errors::DomainVolumeExists if domain_volume && domain_volume.id # Get path to backing image - box volume. box_volume = env[:machine].provider.driver.connection.volumes.all( name: env[:box_volumes][index][:name] ).first @backing_file = box_volume.path # Virtual size of image. Take value worked out by HandleBoxImage @capacity = env[:box_volumes][index][:virtual_size].to_B # Byte # Create new volume from xml template. Fog currently doesn't support # volume snapshots directly. begin xml = Nokogiri::XML::Builder.new do |xml| xml.volume do xml.name(@name) xml.capacity(@capacity, unit: 'B') xml.target do xml.format(type: 'qcow2') xml.permissions do xml.owner storage_uid(env) xml.group storage_gid(env) xml.label 'virt_image_t' end end xml.backingStore do xml.path(@backing_file) xml.format(type: 'qcow2') xml.permissions do xml.owner storage_uid(env) xml.group storage_gid(env) xml.label 'virt_image_t' end end end end.to_xml( save_with: Nokogiri::XML::Node::SaveOptions::NO_DECLARATION | Nokogiri::XML::Node::SaveOptions::NO_EMPTY_TAGS | Nokogiri::XML::Node::SaveOptions::FORMAT ) if config.snapshot_pool_name != config.storage_pool_name pool_name = config.snapshot_pool_name else pool_name = config.storage_pool_name end @logger.debug { "Creating Volume with XML:\n#{xml}" } @logger.debug "Using pool #{pool_name} for base box snapshot" domain_volume = env[:machine].provider.driver.connection.volumes.create( xml: xml, pool_name: pool_name ) rescue Fog::Errors::Error => e raise Errors::FogDomainVolumeCreateError, error_message: e.message end end @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/create_network_interfaces.rb000066400000000000000000000315241414232526500275440ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' require 'vagrant/util/network_ip' require 'vagrant/util/scoped_hash_override' module VagrantPlugins module ProviderLibvirt module Action # Create network interfaces for domain, before domain is running. # Networks for connecting those interfaces should be already prepared. class CreateNetworkInterfaces include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::NetworkUtil include Vagrant::Util::NetworkIP include Vagrant::Util::ScopedHashOverride def initialize(app, env) @logger = Log4r::Logger.new('vagrant_libvirt::action::create_network_interfaces') @management_network_name = env[:machine].provider_config.management_network_name config = env[:machine].provider_config @nic_model_type = config.nic_model_type || 'virtio' @nic_adapter_count = config.nic_adapter_count @app = app end def call(env) # Get domain first. begin domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid( env[:machine].id.to_s ) rescue => e raise Errors::NoDomainError, error_message: e.message end # Setup list of interfaces before creating them. adapters = [] # Vagrant gives you adapter 0 by default # Assign interfaces to slots. configured_networks(env, @logger).each do |options| # dont need to create interface for this type next if options[:iface_type] == :forwarded_port # TODO: fill first ifaces with adapter option specified. if options[:adapter] if adapters[options[:adapter]] raise Errors::InterfaceSlotNotAvailable end free_slot = options[:adapter].to_i @logger.debug "Using specified adapter slot #{free_slot}" else free_slot = find_empty(adapters) @logger.debug "Adapter not specified so found slot #{free_slot}" raise Errors::InterfaceSlotExhausted if free_slot.nil? end # We have slot for interface, fill it with interface configuration. adapters[free_slot] = options adapters[free_slot][:network_name] = interface_network( env[:machine].provider.driver.connection.client, adapters[free_slot] ) end # Create each interface as new domain device. @macs_per_network = Hash.new(0) adapters.each_with_index do |iface_configuration, slot_number| @iface_number = slot_number @network_name = iface_configuration[:network_name] @source_options = { network: @network_name } @mac = iface_configuration.fetch(:mac, false) @model_type = iface_configuration.fetch(:model_type, @nic_model_type) @driver_name = iface_configuration.fetch(:driver_name, false) @driver_queues = iface_configuration.fetch(:driver_queues, false) @device_name = iface_configuration.fetch(:iface_name, false) @mtu = iface_configuration.fetch(:mtu, nil) @pci_bus = iface_configuration.fetch(:bus, nil) @pci_slot = iface_configuration.fetch(:slot, nil) template_name = 'interface' @type = nil @udp_tunnel = nil # Configuration for public interfaces which use the macvtap driver if iface_configuration[:iface_type] == :public_network @device = iface_configuration.fetch(:dev, 'eth0') @mode = iface_configuration.fetch(:mode, 'bridge') @type = iface_configuration.fetch(:type, 'direct') @model_type = iface_configuration.fetch(:model_type, @nic_model_type) @driver_name = iface_configuration.fetch(:driver_name, false) @driver_queues = iface_configuration.fetch(:driver_queues, false) @portgroup = iface_configuration.fetch(:portgroup, nil) @network_name = iface_configuration.fetch(:network_name, @network_name) template_name = 'public_interface' @logger.info("Setting up public interface using device #{@device} in mode #{@mode}") @ovs = iface_configuration.fetch(:ovs, false) @ovs_interfaceid = iface_configuration.fetch(:ovs_interfaceid, false) @trust_guest_rx_filters = iface_configuration.fetch(:trust_guest_rx_filters, false) # configuration for udp or tcp tunnel interfaces (p2p conn btwn guest OSes) elsif iface_configuration.fetch(:tunnel_type, nil) @type = iface_configuration.fetch(:tunnel_type) @tunnel_port = iface_configuration.fetch(:tunnel_port, nil) raise Errors::TunnelPortNotDefined if @tunnel_port.nil? if @type == 'udp' # default udp tunnel source to 127.0.0.1 @udp_tunnel={ address: iface_configuration.fetch(:tunnel_local_ip,'127.0.0.1'), port: iface_configuration.fetch(:tunnel_local_port) } end # default mcast tunnel to 239.255.1.1. Web search says this # 239.255.x.x is a safe range to use for general use mcast default_ip = if @type == 'mcast' '239.255.1.1' else '127.0.0.1' end @source_options = { address: iface_configuration.fetch(:tunnel_ip, default_ip), port: @tunnel_port } @tunnel_type = iface_configuration.fetch(:model_type, @nic_model_type) @driver_name = iface_configuration.fetch(:driver_name, false) @driver_queues = iface_configuration.fetch(:driver_queues, false) template_name = 'tunnel_interface' @logger.info("Setting up #{@type} tunnel interface using #{@tunnel_ip} port #{@tunnel_port}") end message = "Creating network interface eth#{@iface_number}" message += " connected to network #{@network_name}." if @mac @mac = @mac.scan(/(\h{2})/).join(':') message += " Using MAC address: #{@mac}" end @logger.info(message) begin # FIXME: all options for network driver should be hash from Vagrantfile driver_options = {} driver_options[:name] = @driver_name if @driver_name driver_options[:queues] = @driver_queues if @driver_queues @udp_tunnel ||= {} xml = if template_name == 'interface' or template_name == 'tunnel_interface' interface_xml(@type, @source_options, @mac, @device_name, @iface_number, @model_type, @mtu, driver_options, @udp_tunnel, @pci_bus, @pci_slot) else to_xml(template_name) end @logger.debug { "Attaching Network Device with XML:\n#{xml}" } domain.attach_device(xml) rescue => e raise Errors::AttachDeviceError, error_message: e.message end # Re-read the network configuration and grab the MAC address if iface_configuration[:iface_type] == :public_network xml = Nokogiri::XML(domain.xml_desc) source = "@network='#{@network_name}'" if @type == 'direct' source = "@dev='#{@device}'" elsif @portgroup.nil? source = "@bridge='#{@device}'" end if not @mac macs = xml.xpath("/domain/devices/interface[source[#{source}]]/mac/@address") @mac = macs[@macs_per_network[source]] iface_configuration[:mac] = @mac.to_s end @macs_per_network[source] += 1 end end # Continue the middleware chain. @app.call(env) if env[:machine].config.vm.box # Configure interfaces that user requested. Machine should be up and # running now. networks_to_configure = [] adapters.each_with_index do |options, slot_number| # Skip configuring the management network, which is on the first interface. # It's used for provisioning and it has to be available during provisioning, # ifdown command is not acceptable here. next if slot_number.zero? next if options[:auto_config] === false @logger.debug "Configuring interface slot_number #{slot_number} options #{options}" network = { interface: slot_number, use_dhcp_assigned_default_route: options[:use_dhcp_assigned_default_route], mac_address: options[:mac] } if options[:ip] network = { type: :static, ip: options[:ip], netmask: options[:netmask], gateway: options[:gateway] }.merge(network) else network[:type] = :dhcp end networks_to_configure << network end unless networks_to_configure.empty? env[:ui].info I18n.t('vagrant.actions.vm.network.configuring') env[:machine].guest.capability( :configure_networks, networks_to_configure ) end end end private def target_dev_name(device_name, type, iface_number) if device_name device_name elsif type == 'network' "vnet#{iface_number}" else # TODO can we use same name vnet#ifnum? #"tnet#{iface_number}" FIXME plugin vagrant-libvirt trying to create second tnet0 interface "vnet#{iface_number}" end end def interface_xml(type, source_options, mac, device_name, iface_number, model_type, mtu, driver_options, udp_tunnel={}, pci_bus, pci_slot) Nokogiri::XML::Builder.new do |xml| xml.interface(type: type || 'network') do xml.alias(name: "ua-net-#{iface_number}") xml.source(source_options) do xml.local(udp_tunnel) if type == 'udp' end xml.mac(address: mac) if mac xml.target(dev: target_dev_name(device_name, type, iface_number)) xml.alias(name: "net#{iface_number}") xml.model(type: model_type.to_s) xml.mtu(size: Integer(mtu)) if mtu xml.driver(driver_options) xml.address(type: 'pci', bus: pci_bus, slot: pci_slot) if pci_bus and pci_slot end end.to_xml( save_with: Nokogiri::XML::Node::SaveOptions::NO_DECLARATION | Nokogiri::XML::Node::SaveOptions::NO_EMPTY_TAGS | Nokogiri::XML::Node::SaveOptions::FORMAT ) end def find_empty(array, start = 0, stop = @nic_adapter_count) (start..stop).each do |i| return i unless array[i] end nil end # Return network name according to interface options. def interface_network(libvirt_client, options) # no need to get interface network for tcp tunnel config return 'tunnel_interface' if options.fetch(:tunnel_type, nil) if options[:network_name] @logger.debug 'Found network by name' return options[:network_name] end # Get list of all (active and inactive) Libvirt networks. available_networks = libvirt_networks(libvirt_client) return 'public' if options[:iface_type] == :public_network if options[:ip] address = network_address(options[:ip], options[:netmask]) available_networks.each do |network| if address == network[:network_address] @logger.debug 'Found network by ip' return network[:name] end end end raise Errors::NetworkNotAvailableError, network_name: options[:ip] end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/create_networks.rb000066400000000000000000000367011414232526500255260ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' require 'vagrant/util/network_ip' require 'vagrant/util/scoped_hash_override' require 'ipaddr' require 'thread' module VagrantPlugins module ProviderLibvirt module Action # Prepare all networks needed for domain connections. class CreateNetworks include Vagrant::Util::NetworkIP include Vagrant::Util::ScopedHashOverride include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::NetworkUtil @@lock = Mutex.new def initialize(app, env) mess = 'vagrant_libvirt::action::create_networks' @logger = Log4r::Logger.new(mess) @app = app @available_networks = [] @options = {} @libvirt_client = env[:machine].provider.driver.connection.client end def call(env) if env[:machine].provider_config.qemu_use_session @app.call(env) return end # only one vm at a time should try to set up networks # otherwise they'll have inconsitent views of current state # and conduct redundant operations that cause errors @@lock.synchronize do # Iterate over networks If some network is not # available, create it if possible. Otherwise raise an error. configured_networks(env, @logger).each do |options| # Only need to create private networks next if options[:iface_type] != :private_network || options.fetch(:tunnel_type, nil) @logger.debug "Searching for network with options #{options}" # should fix other methods so this doesn't have to be instance var @options = options # Get a list of all (active and inactive) Libvirt networks. This # list is used throughout this class and should be easier to # process than Libvirt API calls. @available_networks = libvirt_networks( env[:machine].provider.driver.connection.client ) current_network = @available_networks.detect { |network| network[:name] == @options[:network_name] } # Prepare a hash describing network for this specific interface. @interface_network = { name: nil, ip_address: nil, netmask: @options[:netmask], network_address: nil, bridge_name: nil, domain_name: nil, ipv6_address: options[:ipv6_address] || nil, ipv6_prefix: options[:ipv6_prefix] || nil, created: current_network.nil? ? false : true, active: current_network.nil? ? false : current_network[:active], autostart: options[:autostart] || false, guest_ipv6: @options[:guest_ipv6] || 'yes', libvirt_network: current_network.nil? ? nil : current_network[:libvirt_network] } if @options[:ip] handle_ip_option(env) elsif @options[:type].to_s == 'dhcp' handle_dhcp_private_network(env) elsif @options[:network_name] handle_network_name_option(env) else raise Errors::CreateNetworkError, error_message: @options end autostart_network if @interface_network[:autostart] activate_network unless @interface_network[:active] end end @app.call(env) end private def lookup_network_by_ip(ip) @logger.debug "looking up network with ip == #{ip}" @available_networks.find { |network| network[:network_address] == ip } end # Return hash of network for specified name, or nil if not found. def lookup_network_by_name(network_name) @logger.debug "looking up network named #{network_name}" @available_networks.find { |network| network[:name] == network_name } end # Return hash of network for specified bridge, or nil if not found. def lookup_bridge_by_name(bridge_name) @logger.debug "looking up bridge named #{bridge_name}" @available_networks.find { |network| network[:bridge_name] == bridge_name } end # Throw an error if dhcp setting for an existing network does not # match what was configured in the vagrantfile # since we always enable dhcp for the management network # this ensures we wont start a vm vagrant cant reach # Allow the situation where DHCP is not requested (:libvirt__dhcp_enabled == false) # but where it is enabled on the virtual network def verify_dhcp if @interface_network[:dhcp_enabled] == true && @options[:dhcp_enabled] == false raise Errors::DHCPMismatch, network_name: @interface_network[:name], requested: @options[:dhcp_enabled] ? 'enabled' : 'disabled' end end # Handle only situations, when ip is specified. Variables @options and # @available_networks should be filled before calling this function. def handle_ip_option(env) return unless @options[:ip] net_address = nil unless @options[:forward_mode] == 'veryisolated' net_address = network_address(@options[:ip], @options[:netmask]) # Set IP address of network (actually bridge). It will be used as # gateway address for machines connected to this network. @interface_network[:ip_address] = get_host_ip_addr(net_address) end @interface_network[:network_address] = net_address # if network is veryisolated, search by name network = if @options[:libvirt__forward_mode] == 'veryisolated' lookup_network_by_name(@options[:network_name]) elsif net_address # otherwise, search by ip (if set) lookup_network_by_ip(net_address) else # leaving this here to mimic prior behavior. If we get # here, something's probably broken. lookup_network_by_name(@options[:network_name]) end @interface_network = network if network verify_dhcp if @interface_network[:created] if @options[:network_name] @logger.debug 'Checking that network name does not clash with ip' if @interface_network[:created] # Just check for mismatch error here - if name and ip from # config match together. if @options[:network_name] != @interface_network[:name] and @qemu_use_agent == false raise Errors::NetworkNameAndAddressMismatch, ip_address: @options[:ip], network_name: @options[:network_name] end else # Network is not created, but name is set. We need to check, # whether network name from config doesn't already exist. if lookup_network_by_name @options[:network_name] raise Errors::NetworkNameAndAddressMismatch, ip_address: @options[:ip], network_name: @options[:network_name] end # Network with 'name' doesn't exist. Set it as name for new # network. @interface_network[:name] = @options[:network_name] end end # Do we need to create new network? unless @interface_network[:created] # TODO: stop after some loops. Don't create infinite loops. # Is name for new network set? If not, generate a unique one. count = 0 while @interface_network[:name].nil? @logger.debug 'generating name for network' # Generate a network name. network_name = env[:root_path].basename.to_s.dup network_name << count.to_s count += 1 # Check if network name is unique. next if lookup_network_by_name(network_name) @interface_network[:name] = network_name end # Generate a unique name for network bridge. @interface_network[:bridge_name] = generate_bridge_name # Create a private network. create_private_network(env) write_created_network(env) else write_created_network(env) unless @options[:always_destroy] == false end end # Handle network_name option, if ip was not specified. Variables # @options and @available_networks should be filled before calling this # function. def handle_network_name_option(env) return if @options[:ip] || \ !@options[:network_name] || \ !@options[:libvirt__forward_mode] == 'veryisolated' network = lookup_network_by_name(@options[:network_name]) @interface_network = network if network if @options[:libvirt__forward_mode] == 'veryisolated' # if this interface has a network address, something's wrong. if @interface_network[:network_address] raise Errors::NetworkNotAvailableError, network_name: @options[:network_name] end else if !@interface_network raise Errors::NetworkNotAvailableError, network_name: @options[:network_name] else verify_dhcp end end # Do we need to create new network? unless @interface_network[:created] @interface_network[:name] = @options[:network_name] @interface_network[:ip_address] ||= @options[:host_ip] # Generate a unique name for network bridge. @interface_network[:bridge_name] = generate_bridge_name # Create a private network. create_private_network(env) write_created_network(env) else write_created_network(env) unless @options[:always_destroy] == false end end def handle_dhcp_private_network(env) net_address = @options[:libvirt__network_address] net_address = '172.28.128.0' unless net_address network = lookup_network_by_ip(net_address) @interface_network = network if network # Do we need to create new network? unless @interface_network[:created] @interface_network[:name] = @options[:network_name] ? @options[:network_name] : 'vagrant-private-dhcp' @interface_network[:network_address] = net_address # Set IP address of network (actually bridge). It will be used as # gateway address for machines connected to this network. @interface_network[:ip_address] = get_host_ip_addr(net_address) # Generate a unique name for network bridge. @interface_network[:bridge_name] = generate_bridge_name # Create a private network. create_private_network(env) write_created_network(env) else write_created_network(env) unless @options[:always_destroy] == false end end # Return provided address or first address of network otherwise def get_host_ip_addr(network) @options[:host_ip] ? IPAddr.new(@options[:host_ip]) : IPAddr.new(network).succ end # Return the first available virbr interface name def generate_bridge_name @logger.debug 'generating name for bridge' count = 0 while lookup_bridge_by_name(bridge_name = "virbr#{count}") count += 1 end @logger.debug "found available bridge name #{bridge_name}" bridge_name end def create_private_network(env) @network_name = @interface_network[:name] @network_bridge_name = @interface_network[:bridge_name] @network_address = @interface_network[:ip_address] @network_netmask = @interface_network[:netmask] @network_mtu = Integer(@options[:mtu]) if @options[:mtu] @guest_ipv6 = @interface_network[:guest_ipv6] @network_ipv6_address = @interface_network[:ipv6_address] @network_ipv6_prefix = @interface_network[:ipv6_prefix] @network_bridge_stp = @options[:bridge_stp].nil? || @options[:bridge_stp] ? 'on' : 'off' @network_bridge_delay = @options[:bridge_delay] ? @options[:bridge_delay] : 0 @network_forward_mode = @options[:forward_mode] if @options[:forward_device] @network_forward_device = @options[:forward_device] end if @options[:dhcp_enabled] # Find out DHCP addresses pool range. network_address = "#{@interface_network[:network_address]}/#{(@interface_network[:netmask]).to_s}" net = @interface_network[:network_address] ? IPAddr.new(network_address) : nil # First is address of network, second is gateway (by default). # So start the range two addresses after network address by default. # TODO: Detect if this IP is not set on the interface. start_address = @options[:dhcp_start] || net.to_range.begin.succ # Default to last possible address. (Stop address must not be broadcast address.) stop_address = @options[:dhcp_stop] || (net.to_range.end & IPAddr.new('255.255.255.254')) @network_dhcp_enabled = true @network_dhcp_bootp_file = @options[:dhcp_bootp_file] @network_dhcp_bootp_server = @options[:dhcp_bootp_server] @network_range_start = start_address @network_range_stop = stop_address else @network_dhcp_enabled = false end @network_domain_name = @options[:domain_name] begin xml = to_xml('private_network') @logger.debug { "Creating private network with XML:\n#{xml}" } @interface_network[:libvirt_network] = \ @libvirt_client.define_network_xml(xml) @logger.debug 'created network' rescue => e raise Errors::CreateNetworkError, error_message: e.message end end def write_created_network(env) created_networks_file = env[:machine].data_dir + 'created_networks' message = 'Saving information about created network ' \ "#{@interface_network[:name]}, UUID=#{@interface_network[:libvirt_network].uuid} " \ "to file #{created_networks_file}." @logger.info(message) File.open(created_networks_file, 'a') do |file| file.puts @interface_network[:libvirt_network].uuid end end def autostart_network @interface_network[:libvirt_network].autostart = true rescue => e raise Errors::AutostartNetworkError, error_message: e.message end def activate_network @interface_network[:libvirt_network].create rescue => e raise Errors::ActivateNetworkError, error_message: e.message end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/destroy_domain.rb000066400000000000000000000062101414232526500253370ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action class DestroyDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::destroy_domain') @app = app end def call(env) # Destroy the server, remove the tracking ID env[:ui].info(I18n.t('vagrant_libvirt.destroy_domain')) # Must delete any snapshots before domain can be destroyed # Fog Libvirt currently doesn't support snapshots. Use # ruby-libvirt client directly. Note this is racy, see # http://www.libvirt.org/html/libvirt-libvirt.html#virDomainSnapshotListNames libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid( env[:machine].id ) begin libvirt_domain.list_snapshots.each do |name| @logger.info("Deleting snapshot '#{name}'") begin libvirt_domain.lookup_snapshot_by_name(name).delete rescue => e raise Errors::DeleteSnapshotError, error_message: e.message end end rescue # Some drivers (xen) don't support getting list of snapshots, # not much can be done here about it @logger.warn("Failed to get list of snapshots") end # must remove managed saves libvirt_domain.managed_save_remove if libvirt_domain.has_managed_save? domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) if env[:machine].provider_config.disks.empty? && env[:machine].provider_config.cdroms.empty? # if using default configuration of disks and cdroms # cdroms are consider volumes, but cannot be destroyed domain.destroy(destroy_volumes: true) else domain.destroy(destroy_volumes: false) env[:machine].provider_config.disks.each do |disk| # shared disks remove only manually or ??? next if disk[:allow_existing] diskname = libvirt_domain.name + '-' + disk[:device] + '.' + disk[:type].to_s # diskname is unique libvirt_disk = domain.volumes.select do |x| x.name == diskname end.first if libvirt_disk libvirt_disk.destroy elsif disk[:path] poolname = env[:machine].provider_config.storage_pool_name libvirt_disk = domain.volumes.select do |x| # FIXME: can remove pool/target.img and pool/123/target.img x.path =~ /\/#{disk[:path]}$/ && x.pool_name == poolname end.first libvirt_disk.destroy if libvirt_disk end end # remove root storage root_disk = domain.volumes.select do |x| x.name == libvirt_domain.name + '.img' if x end.first root_disk.destroy if root_disk end @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/destroy_networks.rb000066400000000000000000000070501414232526500257470ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' require 'nokogiri' module VagrantPlugins module ProviderLibvirt module Action # Destroy all networks created for this specific domain. Skip # removing if network has still active connections. class DestroyNetworks def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::destroy_networks') @app = app end def call(env) if env[:machine].provider_config.qemu_use_session @app.call(env) return end # If there were some networks created for this machine, in machines # data directory, created_networks file holds UUIDs of each network. created_networks_file = env[:machine].data_dir + 'created_networks' @logger.info 'Checking if any networks were created' # If created_networks file doesn't exist, there are no networks we # need to remove. unless File.exist?(created_networks_file) env[:machine].id = nil return @app.call(env) end @logger.info 'File with created networks exists' # Iterate over each created network UUID and try to remove it. created_networks = [] file = File.open(created_networks_file, 'r') file.readlines.each do |network_uuid| @logger.info "Checking for #{network_uuid}" # lookup_network_by_uuid throws same exception # if there is an error or if the network just doesn't exist begin libvirt_network = env[:machine].provider.driver.connection.client.lookup_network_by_uuid( network_uuid ) rescue Libvirt::RetrieveError => e # this network is already destroyed, so move on if e.message =~ /Network not found/ @logger.info 'It is already undefined' next # some other error occured, so raise it again else raise e end end # Skip removing if network has still active connections. xml = Nokogiri::XML(libvirt_network.xml_desc) connections = xml.xpath('/network/@connections').first unless connections.nil? @logger.info 'Still has connections so will not undefine' created_networks << network_uuid next end # Shutdown network first. # Undefine network. begin libvirt_network.destroy libvirt_network.undefine @logger.info 'Undefined it' rescue => e raise Errors::DestroyNetworkError, network_name: libvirt_network.name, error_message: e.message end end file.close # Update status of created networks after removing some/all of them. # Not sure why we are doing this, something else seems to always delete the file if !created_networks.empty? File.open(created_networks_file, 'w') do |file| @logger.info 'Writing new created_networks file' created_networks.each do |network_uuid| file.puts network_uuid end end else @logger.info 'Deleting created_networks file' File.delete(created_networks_file) end env[:machine].id = nil @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/forward_ports.rb000066400000000000000000000156671414232526500252320ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action # Adds support for vagrant's `forward_ports` configuration directive. class ForwardPorts @@lock = Mutex.new def initialize(app, _env) @app = app @logger = Log4r::Logger.new('vagrant_libvirt::action::forward_ports') end def call(env) # Get the ports we're forwarding env[:forwarded_ports] = compile_forwarded_ports(env, env[:machine].config) # Warn if we're port forwarding to any privileged ports env[:forwarded_ports].each do |fp| next unless fp[:host] <= 1024 env[:ui].warn I18n.t( 'vagrant.actions.vm.forward_ports.privileged_ports' ) break end # Continue, we need the VM to be booted in order to grab its IP @app.call env if env[:forwarded_ports].any? env[:ui].info I18n.t('vagrant.actions.vm.forward_ports.forwarding') forward_ports(env) end end def forward_ports(env) env[:forwarded_ports].each do |fp| message_attributes = { adapter: fp[:adapter] || 'eth0', guest_port: fp[:guest], host_port: fp[:host] } env[:ui].info(I18n.t( 'vagrant.actions.vm.forward_ports.forwarding_entry', **message_attributes )) ssh_pid = redirect_port( env, env[:machine], fp[:host_ip] || '*', fp[:host], fp[:guest_ip] || env[:machine].provider.ssh_info[:host], fp[:guest], fp[:gateway_ports] || false ) store_ssh_pid(env[:machine], fp[:host], ssh_pid) end end private def compile_forwarded_ports(env, config) mappings = {} config.vm.networks.each do |type, options| next if options[:disabled] if options[:protocol] == 'udp' env[:ui].warn I18n.t('vagrant_libvirt.warnings.forwarding_udp') next end next if type != :forwarded_port || ( options[:id] == 'ssh' && !env[:machine].provider_config.forward_ssh_port ) if options.fetch(:host_ip, '').to_s.strip.empty? options.delete(:host_ip) end mappings[options[:host]] = options end mappings.values end def redirect_port(env, machine, host_ip, host_port, guest_ip, guest_port, gateway_ports) ssh_info = machine.ssh_info params = %W( -L #{host_ip}:#{host_port}:#{guest_ip}:#{guest_port} -N #{ssh_info[:host]} ).join(' ') params += ' -g' if gateway_ports options = (%W( User=#{ssh_info[:username]} Port=#{ssh_info[:port]} UserKnownHostsFile=/dev/null ExitOnForwardFailure=yes ControlMaster=no StrictHostKeyChecking=no PasswordAuthentication=no ForwardX11=#{ssh_info[:forward_x11] ? 'yes' : 'no'} IdentitiesOnly=#{ssh_info[:keys_only] ? 'yes' : 'no'} ) + ssh_info[:private_key_path].map do |pk| "IdentityFile='\"#{pk}\"'" end).map { |s| "-o #{s}" }.join(' ') options += " -o ProxyCommand=\"#{ssh_info[:proxy_command]}\"" if machine.provider_config.proxy_command # TODO: instead of this, try and lock and get the stdin from spawn... ssh_cmd = '' if host_port <= 1024 @@lock.synchronize do # TODO: add i18n env[:ui].info 'Requesting sudo for host port(s) <= 1024' r = system('sudo -v') if r ssh_cmd += 'sudo ' # add sudo prefix end end end ssh_cmd += "ssh -n #{options} #{params}" @logger.debug "Forwarding port with `#{ssh_cmd}`" log_file = ssh_forward_log_file( env[:machine], host_ip, host_port, guest_ip, guest_port, ) @logger.info "Logging to #{log_file}" spawn(ssh_cmd, [:out, :err] => [log_file, 'w'], :pgroup => true) end def ssh_forward_log_file(machine, host_ip, host_port, guest_ip, guest_port) log_dir = machine.data_dir.join('logs') log_dir.mkdir unless log_dir.directory? File.join( log_dir, 'ssh-forwarding-%s_%s-%s_%s.log' % [host_ip, host_port, guest_ip, guest_port] ) end def store_ssh_pid(machine, host_port, ssh_pid) data_dir = machine.data_dir.join('pids') data_dir.mkdir unless data_dir.directory? data_dir.join("ssh_#{host_port}.pid").open('w') do |pid_file| pid_file.write(ssh_pid) end end end end end end module VagrantPlugins module ProviderLibvirt module Action # Cleans up ssh-forwarded ports on VM halt/destroy. class ClearForwardedPorts @@lock = Mutex.new def initialize(app, _env) @app = app @logger = Log4r::Logger.new( 'vagrant_libvirt::action::clear_forward_ports' ) end def call(env) pids = ssh_pids(env[:machine]) if pids.any? env[:ui].info I18n.t( 'vagrant.actions.vm.clear_forward_ports.deleting' ) pids.each do |tag| next unless ssh_pid?(tag[:pid]) @logger.debug "Killing pid #{tag[:pid]}" kill_cmd = '' if tag[:port] <= 1024 kill_cmd += 'sudo ' # add sudo prefix end kill_cmd += "kill #{tag[:pid]}" @@lock.synchronize do system(kill_cmd) end end @logger.info 'Removing ssh pid files' remove_ssh_pids(env[:machine]) else @logger.info 'No ssh pids found' end @app.call env end protected def ssh_pids(machine) glob = machine.data_dir.join('pids').to_s + '/ssh_*.pid' ssh_pids = Dir[glob].map do |file| { pid: File.read(file).strip.chomp, port: File.basename(file)['ssh_'.length..-1 * ('.pid'.length + 1)].to_i } end end def ssh_pid?(pid) @logger.debug "Checking if #{pid} is an ssh process "\ "with `ps -o command= #{pid}`" `ps -o command= #{pid}`.strip.chomp =~ /ssh/ end def remove_ssh_pids(machine) glob = machine.data_dir.join('pids').to_s + '/ssh_*.pid' Dir[glob].each do |file| File.delete file end end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/halt_domain.rb000066400000000000000000000012051414232526500245750ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Halt the domain. class HaltDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::halt_domain') @app = app end def call(env) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) if env[:machine].state.id == :running env[:ui].info(I18n.t('vagrant_libvirt.halt_domain')) domain.poweroff end @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/handle_box_image.rb000066400000000000000000000237311414232526500255730ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' require 'open3' require 'json' require 'vagrant-libvirt/util/byte_number' module VagrantPlugins module ProviderLibvirt module Action class HandleBoxImage include VagrantPlugins::ProviderLibvirt::Util::StorageUtil include VagrantPlugins::ProviderLibvirt::Util::Ui @@lock = Mutex.new def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::handle_box_image') @app = app end def call(env) # Handle box formats converting between v1 => v2 and ensuring # any obsolete settings are rejected. disks = env[:machine].box.metadata.fetch('disks', []) if disks.empty? # Handle box v1 format # Only qcow2 format is supported in v1, but other formats with backing # store capability should be usable. box_format = env[:machine].box.metadata['format'] HandleBoxImage.verify_box_format(box_format) image_path = HandleBoxImage.get_box_image_path(env[:machine].box, 'box.img') env[:box_volume_number] = 1 env[:box_volumes] = [{ :path => image_path, :name => HandleBoxImage.get_volume_name(env[:machine].box, 'box', image_path, env[:ui]), :virtual_size => HandleBoxImage.get_virtual_size(env), :format => box_format, }] else # Handle box v2 format # { # 'path': '', # 'name': '' # optional, will use index # } # env[:box_volume_number] = disks.length() target_volumes = Hash[] env[:box_volumes] = Array.new(env[:box_volume_number]) { |i| raise Errors::BoxFormatMissingAttribute, attribute: "disks[#{i}]['path']" if disks[i]['path'].nil? image_path = HandleBoxImage.get_box_image_path(env[:machine].box, disks[i]['path']) format, virtual_size = HandleBoxImage.get_box_disk_settings(image_path) volume_name = HandleBoxImage.get_volume_name( env[:machine].box, disks[i].fetch('name', disks[i]['path'].sub(/#{File.extname(disks[i]['path'])}$/, '')), image_path, env[:ui], ) # allowing name means needing to check that it doesn't cause a clash existing = target_volumes[volume_name] if !existing.nil? raise Errors::BoxFormatDuplicateVolume, volume: volume_name, new_disk: "disks[#{i}]", orig_disk: "disks[#{existing}]" end target_volumes[volume_name] = i { :path => image_path, :name => volume_name, :virtual_size => virtual_size, :format => HandleBoxImage.verify_box_format(format) } } end # Get config options config = env[:machine].provider_config box_image_files = [] env[:box_volumes].each do |d| box_image_files.push(d[:path]) end # Override box_virtual_size box_virtual_size = env[:box_volumes][0][:virtual_size] if config.machine_virtual_size config_machine_virtual_size = ByteNumber.from_GB(config.machine_virtual_size) if config_machine_virtual_size < box_virtual_size # Warn that a virtual size less than the box metadata size # is not supported and will be ignored env[:ui].warn I18n.t( 'vagrant_libvirt.warnings.ignoring_virtual_size_too_small', requested: config_machine_virtual_size.to_GB, minimum: box_virtual_size.to_GB ) else env[:ui].info I18n.t('vagrant_libvirt.manual_resize_required') box_virtual_size = config_machine_virtual_size end end # save for use by later actions env[:box_volumes][0][:virtual_size] = box_virtual_size # while inside the synchronize block take care not to call the next # action in the chain, as must exit this block first to prevent # locking all subsequent actions as well. @@lock.synchronize do env[:box_volumes].each_index do |i| # Don't continue if image already exists in storage pool. box_volume = env[:machine].provider.driver.connection.volumes.all( name: env[:box_volumes][i][:name] ).first next if box_volume && box_volume.id send_box_image(env, config, box_image_files[i], env[:box_volumes][i]) end end @app.call(env) end protected def self.get_volume_name(box, name, path, ui) version = begin box.version.to_s rescue '' end if version.empty? ui.warn(I18n.t('vagrant_libvirt.box_version_missing', name: box.name.to_s)) version = "0_#{File.mtime(path).to_i}" end vol_name = box.name.to_s.dup.gsub('/', '-VAGRANTSLASH-') vol_name << "_vagrant_box_image_#{version}_#{name.dup.gsub('/', '-SLASH-')}.img" end def self.get_virtual_size(env) # Virtual size has to be set for allocating space in storage pool. box_virtual_size = env[:machine].box.metadata['virtual_size'] raise Errors::NoBoxVirtualSizeSet if box_virtual_size.nil? return ByteNumber.from_GB(box_virtual_size) end def self.get_box_image_path(box, box_name) return box.directory.join(box_name).to_s end def self.verify_box_format(box_format, disk_index=nil) if box_format.nil? raise Errors::NoBoxFormatSet elsif box_format != 'qcow2' if disk_index.nil? raise Errors::WrongBoxFormatSet else raise Errors::WrongDiskFormatSet, disk_index: disk_index end end return box_format end def self.get_box_disk_settings(image_path) stdout, stderr, status = Open3.capture3('qemu-img', 'info', '--output=json', image_path) if !status.success? raise Errors::BadBoxImage, image: image_path, out: stdout, err: stderr end image_info = JSON.parse(stdout) format = image_info['format'] virtual_size = ByteNumber.new(image_info['virtual-size']) return format, virtual_size end def send_box_image(env, config, box_image_file, box_volume) # Box is not available as a storage pool volume. Create and upload # it as a copy of local box image. env[:ui].info(I18n.t('vagrant_libvirt.uploading_volume')) # Create new volume in storage pool unless File.exist?(box_image_file) raise Vagrant::Errors::BoxNotFound, name: env[:machine].box.name end box_image_size = File.size(box_image_file) # B message = "Creating volume #{box_volume[:name]} in storage pool #{config.storage_pool_name}." @logger.info(message) begin fog_volume = env[:machine].provider.driver.connection.volumes.create( name: box_volume[:name], allocation: "#{box_image_size / 1024 / 1024}M", capacity: "#{box_volume[:virtual_size].to_B}B", format_type: box_volume[:format], owner: storage_uid(env), group: storage_gid(env), pool_name: config.storage_pool_name ) rescue Fog::Errors::Error => e raise Errors::FogCreateVolumeError, error_message: e.message end # Upload box image to storage pool ret = upload_image(box_image_file, config.storage_pool_name, box_volume[:name], env) do |progress| rewriting(env[:ui]) do |ui| ui.clear_line ui.report_progress(progress, box_image_size, false) end end # Clear the line one last time since the progress meter doesn't # disappear immediately. rewriting(env[:ui]) {|ui| ui.clear_line} # If upload failed or was interrupted, remove created volume from # storage pool. if env[:interrupted] || !ret begin fog_volume.destroy rescue nil end end end # Fog Libvirt currently doesn't support uploading images to storage # pool volumes. Use ruby-libvirt client instead. def upload_image(image_file, pool_name, volume_name, env) image_size = File.size(image_file) # B begin pool = env[:machine].provider.driver.connection.client.lookup_storage_pool_by_name( pool_name ) volume = pool.lookup_volume_by_name(volume_name) stream = env[:machine].provider.driver.connection.client.stream volume.upload(stream, offset = 0, length = image_size) # Exception ProviderLibvirt::RetrieveError can be raised if buffer is # longer than length accepted by API send function. # # TODO: How to find out if buffer is too large and what is the # length that send function will accept? buf_size = 1024 * 250 # 250K progress = 0 open(image_file, 'rb') do |io| while (buff = io.read(buf_size)) sent = stream.send buff progress += sent yield progress end end rescue => e raise Errors::ImageUploadError, error_message: e.message end progress == image_size end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/handle_storage_pool.rb000066400000000000000000000044121414232526500263310ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action class HandleStoragePool include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate include VagrantPlugins::ProviderLibvirt::Util::StorageUtil @@lock = Mutex.new def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::handle_storage_pool') @app = app end def call(env) # Get config options. config = env[:machine].provider_config # while inside the synchronize block take care not to call the next # action in the chain, as must exit this block first to prevent # locking all subsequent actions as well. @@lock.synchronize do # Check for storage pool, where box image should be created break unless env[:machine].provider.driver.connection.pools.all( name: config.storage_pool_name ).empty? @logger.info("No storage pool '#{config.storage_pool_name}' is available.") # If user specified other pool than default, don't create default # storage pool, just write error message. raise Errors::NoStoragePool if config.storage_pool_name != 'default' @logger.info("Creating storage pool 'default'") # Fog Libvirt currently doesn't support creating pools. Use # ruby-libvirt client directly. begin @storage_pool_path = storage_pool_path(env) @storage_pool_uid = storage_uid(env) @storage_pool_gid = storage_gid(env) xml = to_xml('default_storage_pool') @logger.debug { "Creating Storage Pool with XML:\n#{xml}" } libvirt_pool = env[:machine].provider.driver.connection.client.define_storage_pool_xml( xml ) libvirt_pool.build libvirt_pool.create rescue => e raise Errors::CreatingStoragePoolError, error_message: e.message end raise Errors::NoStoragePool unless libvirt_pool end @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/is_created.rb000066400000000000000000000007101414232526500244200ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action # This can be used with "Call" built-in to check if the machine # is created and branch in the middleware. class IsCreated def initialize(app, _env) @app = app end def call(env) env[:result] = env[:machine].state.id != :not_created @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/is_running.rb000066400000000000000000000007051414232526500244750ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action # This can be used with "Call" built-in to check if the machine # is running and branch in the middleware. class IsRunning def initialize(app, _env) @app = app end def call(env) env[:result] = env[:machine].state.id == :running @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/is_suspended.rb000066400000000000000000000030211414232526500250010ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action # This can be used with "Call" built-in to check if the machine # is suspended and branch in the middleware. class IsSuspended def initialize(app, _env) @app = app end def call(env) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? config = env[:machine].provider_config libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(env[:machine].id) if config.suspend_mode == 'managedsave' if libvirt_domain.has_managed_save? env[:result] = env[:machine].state.id == :shutoff else env[:result] = env[:machine].state.id == :paused if env[:result] env[:ui].warn('One time switching to pause suspend mode, found a paused VM.') config.suspend_mode = 'pause' end end else if libvirt_domain.has_managed_save? env[:ui].warn('One time switching to managedsave suspend mode, state found.') env[:result] = [:shutoff, :paused].include?(env[:machine].state.id) config.suspend_mode = 'managedsave' else env[:result] = env[:machine].state.id == :paused end end @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/message_already_created.rb000066400000000000000000000005401414232526500271330ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action class MessageAlreadyCreated def initialize(app, _env) @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.already_created')) @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/message_not_created.rb000066400000000000000000000005301414232526500263110ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action class MessageNotCreated def initialize(app, _env) @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.not_created')) @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/message_not_running.rb000066400000000000000000000005301414232526500263620ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action class MessageNotRunning def initialize(app, _env) @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.not_running')) @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/message_not_suspended.rb000066400000000000000000000005341414232526500267000ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action class MessageNotSuspended def initialize(app, _env) @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.not_suspended')) @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/message_will_not_destroy.rb000066400000000000000000000006401414232526500274240ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action class MessageWillNotDestroy def initialize(app, env) @app = app end def call(env) env[:ui].info I18n.t("vagrant.commands.destroy.will_not_destroy", name: env[:machine].name) @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/package_domain.rb000066400000000000000000000166231414232526500252520ustar00rootroot00000000000000# frozen_string_literal: true require 'fileutils' require 'log4r' class String def unindent gsub(/^#{scan(/^\s*/).min_by{|l|l.length}}/, "") end end module VagrantPlugins module ProviderLibvirt module Action # Action for create new box for Libvirt provider class PackageDomain include VagrantPlugins::ProviderLibvirt::Util::Ui def initialize(app, env) @logger = Log4r::Logger.new('vagrant_libvirt::action::package_domain') @app = app @options = ENV.fetch('VAGRANT_LIBVIRT_VIRT_SYSPREP_OPTIONS', '') @operations = ENV.fetch('VAGRANT_LIBVIRT_VIRT_SYSPREP_OPERATIONS', 'defaults,-ssh-userdir,-ssh-hostkeys,-customize') end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.package_domain')) libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid( env[:machine].id ) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) volumes = domain.volumes.select { |x| !x.nil? } root_disk = volumes.select do |x| x.name == libvirt_domain.name + '.img' end.first raise Errors::NoDomainVolume if root_disk.nil? package_func = method(:package_v1) box_format = ENV.fetch('VAGRANT_LIBVIRT_BOX_FORMAT_VERSION', nil) case box_format when nil if volumes.length() > 1 msg = "Detected more than one volume for machine, in the future this will switch to using the v2 " msg += "box format v2 automatically." msg += "\nIf you want to include the additional disks attached when packaging please set the " msg += "env variable VAGRANT_LIBVIRT_BOX_FORMAT_VERSION=v2 to use the new format. If you want " msg += "to ensure that your box uses the old format for single disk only, please set the " msg += "environment variable explicitly to 'v1'" env[:ui].warn(msg) end when 'v2' package_func = method(:package_v2) when 'v1' else env[:ui].warn("Unrecognized value for 'VAGRANT_LIBVIRT_BOX_FORMAT_VERSION', defaulting to v1") end metadata = package_func.call(env, volumes) # metadata / Vagrantfile package_directory = env["package.directory"] File.write(package_directory + '/metadata.json', metadata) File.write(package_directory + '/Vagrantfile', vagrantfile_content(env)) @app.call(env) end def package_v1(env, volumes) domain_img = download_volume(env, volumes.first, 'box.img') sysprep_domain(domain_img) sparsify_volume(domain_img) info = JSON.parse(`qemu-img info --output=json #{domain_img}`) img_size = (Float(info['virtual-size'])/(1024**3)).ceil return metadata_content_v1(img_size) end def package_v2(env, volumes) disks = [] volumes.each_with_index do |vol, idx| disk = {:path => "box_#{idx+1}.img"} volume_img = download_volume(env, vol, disk[:path]) if idx == 0 sysprep_domain(volume_img) end sparsify_volume(volume_img) disks.push(disk) end return metadata_content_v2(disks) end def vagrantfile_content(env) include_vagrantfile = "" if env["package.vagrantfile"] include_vagrantfile = <<-EOF # Load include vagrant file if it exists after the auto-generated # so it can override any of the settings include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__) load include_vagrantfile if File.exist?(include_vagrantfile) EOF end <<-EOF.unindent Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.driver = "kvm" end #{include_vagrantfile} end EOF end def metadata_content_v1(filesize) <<-EOF.unindent { "provider": "libvirt", "format": "qcow2", "virtual_size": #{filesize} } EOF end def metadata_content_v2(disks) data = { "provider": "libvirt", "format": "qcow2", "disks": disks.each do |disk| {'path': disk[:path]} end } JSON.pretty_generate(data) end protected def sparsify_volume(volume_img) `virt-sparsify --in-place #{volume_img}` end def sysprep_domain(domain_img) # remove hw association with interface # working for centos with lvs default disks `virt-sysprep --no-logfile --operations #{@operations} -a #{domain_img} #{@options}` end def download_volume(env, volume, disk_path) package_directory = env["package.directory"] volume_img = package_directory + '/' + disk_path env[:ui].info("Downloading #{volume.name} to #{volume_img}") download_image(volume_img, env[:machine].provider_config.storage_pool_name, volume.name, env) do |progress,image_size| rewriting(env[:ui]) do |ui| ui.clear_line ui.report_progress(progress, image_size, false) end end # Clear the line one last time since the progress meter doesn't # disappear immediately. rewriting(env[:ui]) {|ui| ui.clear_line} # Prep domain disk backing = `qemu-img info "#{volume_img}" | grep 'backing file:' | cut -d ':' -f2`.chomp if backing env[:ui].info('Image has backing image, copying image and rebasing ...') `qemu-img rebase -p -b "" #{volume_img}` end return volume_img end # Fog libvirt currently doesn't support downloading images from storage # pool volumes. Use ruby-libvirt client instead. def download_image(image_file, pool_name, volume_name, env) begin pool = env[:machine].provider.driver.connection.client.lookup_storage_pool_by_name( pool_name ) volume = pool.lookup_volume_by_name(volume_name) image_size = volume.info.allocation # B stream = env[:machine].provider.driver.connection.client.stream # Use length of 0 to download remaining contents after offset volume.download(stream, offset = 0, length = 0) buf_size = 1024 * 250 # 250K, copied from upload_image in handle_box_image.rb progress = 0 retval = stream.recv(buf_size) open(image_file, 'wb') do |io| while (retval.at(0) > 0) recvd = io.write(retval.at(1)) progress += recvd yield [progress, image_size] retval = stream.recv(buf_size) end end rescue => e raise Errors::ImageDownloadError, volume_name: volume_name, pool_name: pool_name, error_message: e.message end progress == image_size end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/prepare_nfs_settings.rb000066400000000000000000000047701414232526500265540ustar00rootroot00000000000000# frozen_string_literal: true require 'nokogiri' require 'socket' require 'timeout' require 'vagrant-libvirt/util/nfs' module VagrantPlugins module ProviderLibvirt module Action class PrepareNFSSettings include VagrantPlugins::ProviderLibvirt::Util::Nfs def initialize(app, _env) @app = app @logger = Log4r::Logger.new('vagrant::action::vm::nfs') end def call(env) @machine = env[:machine] @app.call(env) if using_nfs? @logger.info('Using NFS, preparing NFS settings by reading host IP and machine IP') env[:nfs_machine_ip] = read_machine_ip(env[:machine]) env[:nfs_host_ip] = read_host_ip(env[:nfs_machine_ip]) @logger.info("host IP: #{env[:nfs_host_ip]} machine IP: #{env[:nfs_machine_ip]}") raise Vagrant::Errors::NFSNoHostonlyNetwork if !env[:nfs_machine_ip] || !env[:nfs_host_ip] end end # Returns the IP address of the host # # @param [Machine] machine # @return [String] def read_host_ip(ip) UDPSocket.open do |s| if ip.is_a?(Array) s.connect(ip.last, 1) else s.connect(ip, 1) end s.addr.last end end # Returns the IP address of the guest # # @param [Machine] machine # @return [String] def read_machine_ip(machine) # check host only ip ssh_host = machine.ssh_info[:host] return ssh_host if ping(ssh_host) # check other ips command = "ip=$(which ip); ${ip:-/sbin/ip} addr show | grep -i 'inet ' | grep -v '127.0.0.1' | tr -s ' ' | cut -d' ' -f3 | cut -d'/' -f 1" result = '' machine.communicate.execute(command) do |type, data| result += data if type == :stdout end ips = result.chomp.split("\n").uniq @logger.info("guest IPs: #{ips.join(', ')}") ips.each do |ip| next if ip == ssh_host return ip if ping(ip) end end private # Check if we can open a connection to the host def ping(host, timeout = 3) ::Timeout.timeout(timeout) do s = TCPSocket.new(host, 'ssh') s.close end true rescue Errno::ECONNREFUSED true rescue Timeout::Error, StandardError false end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/prepare_nfs_valid_ids.rb000066400000000000000000000006731414232526500266500ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Action class PrepareNFSValidIds def initialize(app, _env) @app = app @logger = Log4r::Logger.new('vagrant::action::vm::nfs') end def call(env) env[:nfs_valid_ids] = env[:machine].provider.driver.connection.servers.all.map(&:id) @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/prune_nfs_exports.rb000066400000000000000000000017651414232526500261140ustar00rootroot00000000000000# frozen_string_literal: true require 'vagrant-libvirt/util/nfs' require 'yaml' module VagrantPlugins module ProviderLibvirt module Action class PruneNFSExports include VagrantPlugins::ProviderLibvirt::Util::Nfs def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::prune_nfs_exports') @app = app end def call(env) @machine = env[:machine] if using_nfs? @logger.info('Using NFS, prunning NFS settings from host') if env[:host] uuid = env[:machine].id # get all uuids uuids = env[:machine].provider.driver.connection.servers.all.map(&:id) # not exiisted in array will removed from nfs uuids.delete(uuid) env[:host].capability( :nfs_prune, env[:machine].ui, uuids ) end end @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/read_mac_addresses.rb000066400000000000000000000021401414232526500261050ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action class ReadMacAddresses def initialize(app, _env) @app = app @logger = Log4r::Logger.new('vagrant_libvirt::action::read_mac_addresses') end def call(env) env[:machine_mac_addresses] = read_mac_addresses(env[:machine].provider.driver.connection, env[:machine]) end def read_mac_addresses(libvirt, machine) return nil if machine.id.nil? domain = libvirt.client.lookup_domain_by_uuid(machine.id) if domain.nil? @logger.info('Machine could not be found, assuming it got destroyed') machine.id = nil return nil end xml = Nokogiri::XML(domain.xml_desc) mac = xml.xpath('/domain/devices/interface/mac/@address') return {} if mac.empty? Hash[mac.each_with_index.map do |x, i| @logger.debug("interface[#{i}] = #{x.value}") [i, x.value] end] end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/remove_libvirt_image.rb000066400000000000000000000011611414232526500265110ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action class RemoveLibvirtImage def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::remove_libvirt_image') @app = app end def call(env) env[:ui].info('Vagrant-libvirt plugin removed box only from your LOCAL ~/.vagrant/boxes directory') env[:ui].info('From Libvirt storage pool you have to delete image manually(virsh, virt-manager or by any other tool)') @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/remove_stale_volume.rb000066400000000000000000000035741414232526500264050ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' # require 'log4r/yamlconfigurator' module VagrantPlugins module ProviderLibvirt module Action class RemoveStaleVolume def initialize(app, _env) # log4r_config= YAML.load_file(File.join(File.dirname(__FILE__),"log4r.yaml")) # log_cfg = Log4r::YamlConfigurator # log_cfg.decode_yaml( log4r_config['log4r_config'] ) @logger = Log4r::Logger.new('vagrant_libvirt::action::remove_stale_volume') @app = app end def call(env) # Remove stale server volume config = env[:machine].provider_config # Check for storage pool, where box image should be created fog_pool = env[:machine].provider.driver.connection.pools.all( name: config.storage_pool_name ).first env[:result] = nil if not fog_pool @logger.debug("**** Pool #{config.storage_pool_name} not found") return @app.call(env) end @logger.debug("**** Pool #{fog_pool.name}") # This is name of newly created image for vm. name = "#{env[:domain_name]}.img" @logger.debug("**** Volume name #{name}") # remove root storage box_volume = env[:machine].provider.driver.connection.volumes.all( name: name ).find { |x| x.pool_name == fog_pool.name } if box_volume && box_volume.id env[:ui].info(I18n.t('vagrant_libvirt.remove_stale_volume')) @logger.info("Deleting volume #{box_volume.key}") box_volume.destroy env[:result] = box_volume else @logger.debug("**** Volume #{name} not found in pool #{fog_pool.name}") end # Continue the middleware chain. @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/resume_domain.rb000066400000000000000000000017311414232526500251510ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Resume suspended domain. class ResumeDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::resume_domain') @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.resuming_domain')) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(env[:machine].id) config = env[:machine].provider_config if config.suspend_mode == 'managedsave' domain.start else domain.resume end @logger.info("Machine #{env[:machine].id} is resumed.") @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/set_boot_order.rb000066400000000000000000000101031414232526500253240ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' require 'nokogiri' module VagrantPlugins module ProviderLibvirt module Action # boot order useful for pxe in discovery workflow class SetBootOrder def initialize(app, env) @app = app @logger = Log4r::Logger.new('vagrant_libvirt::action::set_boot_order') config = env[:machine].provider_config @boot_order = config.boot_order end def call(env) # Get domain first begin domain = env[:machine].provider .driver .connection .client .lookup_domain_by_uuid( env[:machine].id.to_s ) rescue => e raise Errors::NoDomainError, error_message: e.message end # Only execute specific boot ordering if this is defined # in the Vagrant file if @boot_order.count >= 1 # If a domain is initially defined with no box or disk or # with an explicit boot order, Libvirt adds # This conflicts with an explicit boot_order configuration, # so we need to remove it from the domain xml and feed it back. # Also see https://bugzilla.redhat.com/show_bug.cgi?id=1248514 # as to why we have to do this after all devices have been defined. xml = Nokogiri::XML(domain.xml_desc) xml.search('/domain/os/boot').each(&:remove) # Parse the XML and find each defined drive and network interfacee hd = xml.search("/domain/devices/disk[@device='disk']") cdrom = xml.search("/domain/devices/disk[@device='cdrom']") # implemented only for 1 network nets = @boot_order.flat_map do |x| x.class == Hash ? x : nil end.compact raise 'Defined only for 1 network for boot' if nets.size > 1 network = search_network(nets, xml) # Generate an array per device group and a flattened # array from all of those devices = { 'hd' => hd, 'cdrom' => cdrom, 'network' => network } final_boot_order = final_boot_order(@boot_order, devices) # Loop over the entire defined boot order array and # create boot order entries in the domain XML final_boot_order.each_with_index do |node, index| boot = "" node.add_child(boot) logger_msg(node, index) end # Finally redefine the domain XML through Libvirt # to apply the boot ordering env[:machine].provider .driver .connection .client .define_domain_xml(xml.to_s) end @app.call(env) end def final_boot_order(boot_order, devices) boot_order.flat_map do |category| devices[category.class == Hash ? category.keys.first : category] end end def search_network(nets, xml) str = '/domain/devices/interface' str += "[(@type='network' or @type='udp' or @type='bridge' or @type='direct')" unless nets.empty? net = nets.first network = net['network'] dev = net['dev'] str += " and source[@network='#{network}']" if network str += " and source[@dev='#{dev}']" if dev end str += ']' @logger.debug(str) xml.search(str) end def logger_msg(node, index) name = if node.name == 'disk' node['device'] elsif node.name == 'interface' node.name end @logger.debug "Setting #{name} to boot index #{index + 1}" end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/set_name_of_domain.rb000066400000000000000000000037041414232526500261320ustar00rootroot00000000000000# frozen_string_literal: true require 'securerandom' module VagrantPlugins module ProviderLibvirt module Action # Setup name for domain and domain volumes. class SetNameOfDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::set_name_of_domain') @app = app end def call(env) env[:domain_name] = build_domain_name(env) begin @logger.info("Looking for domain #{env[:domain_name]}") # Check if the domain name is not already taken domain = env[:machine].provider.driver.connection.servers.all( name: env[:domain_name] ) rescue Libvirt::RetrieveError => e @logger.info(e.to_s) domain = nil end unless domain.nil? raise ProviderLibvirt::Errors::DomainNameExists, domain_name: env[:domain_name] end @app.call(env) end # build domain name # random_hostname option avoids # `domain about to create is already taken` # parsable and sortable by epoch time # @example # development-centos-6-chef-11_1404488971_3b7a569e2fd7c554b852 # @return [String] Libvirt domain name def build_domain_name(env) config = env[:machine].provider_config domain_name = if config.default_prefix.nil? env[:root_path].basename.to_s.dup.concat('_') elsif config.default_prefix.empty? # don't have any prefix, not even "_" String.new else config.default_prefix.to_s.dup end domain_name << env[:machine].name.to_s domain_name.gsub!(/[^-a-z0-9_\.]/i, '') domain_name << "_#{Time.now.utc.to_i}_#{SecureRandom.hex(10)}" if config.random_hostname domain_name end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/share_folders.rb000066400000000000000000000037421414232526500251460ustar00rootroot00000000000000# frozen_string_literal: true require 'pathname' require 'log4r' module VagrantPlugins module ProviderLibvirt module Action class ShareFolders def initialize(app, _env) @logger = Log4r::Logger.new('vagrant::action::vm::share_folders') @app = app end def call(env) @env = env prepare_folders create_metadata @app.call(env) end # This method returns an actual list of shared # folders to create and their proper path. def shared_folders {}.tap do |result| @env[:machine].config.vm.synced_folders.each do |id, data| # Ignore NFS shared folders next if !data[:type] == :nfs # This to prevent overwriting the actual shared folders data result[id] = data.dup end end end # Prepares the shared folders by verifying they exist and creating them # if they don't. def prepare_folders shared_folders.each do |_id, options| hostpath = Pathname.new(options[:hostpath]).expand_path(@env[:root_path]) next unless !hostpath.directory? && options[:create] # Host path doesn't exist, so let's create it. @logger.debug("Host path doesn't exist, creating: #{hostpath}") begin hostpath.mkpath rescue Errno::EACCES raise Vagrant::Errors::SharedFolderCreateFailed, path: hostpath.to_s end end end def create_metadata @env[:ui].info I18n.t('vagrant.actions.vm.share_folders.creating') folders = [] shared_folders.each do |id, data| folders << { name: id, hostpath: File.expand_path(data[:hostpath], @env[:root_path]), transient: data[:transient] } end end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/shutdown_domain.rb000066400000000000000000000037171414232526500255320ustar00rootroot00000000000000require 'log4r' module VagrantPlugins module ProviderLibvirt module Action # To wrap GracefulShutdown need to track the time taken class StartShutdownTimer def initialize(app, _env) @app = app end def call(env) env[:shutdown_start_time] = Time.now @app.call(env) end end end end end module VagrantPlugins module ProviderLibvirt module Action # Shutdown the domain. class ShutdownDomain def initialize(app, _env, target_state, source_state) @logger = Log4r::Logger.new('vagrant_libvirt::action::shutdown_domain') @target_state = target_state @source_state = source_state @app = app end def call(env) timeout = env[:machine].config.vm.graceful_halt_timeout start_time = env[:shutdown_start_time] if start_time.nil? # this really shouldn't happen raise Errors::CallChainError, require_action: StartShutdownTimer.name, current_action: ShutdownDomain.name end # return if successful, otherwise will ensure result is set to false env[:result] = env[:machine].state.id == @target_state return @app.call(env) if env[:result] current_time = Time.now # if we've already exceeded the timeout return @app.call(env) if current_time - start_time >= timeout # otherwise construct a new timeout. timeout = timeout - (current_time - start_time) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) if env[:machine].state.id == @source_state env[:ui].info(I18n.t('vagrant_libvirt.shutdown_domain')) domain.shutdown domain.wait_for(timeout) { !ready? } end env[:result] = env[:machine].state.id == @target_state @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/start_domain.rb000066400000000000000000000453321414232526500250130ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' require 'rexml/document' module VagrantPlugins module ProviderLibvirt module Action # Just start the domain. class StartDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::start_domain') @app = app end def call(env) env[:ui].info(I18n.t('vagrant_libvirt.starting_domain')) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? config = env[:machine].provider_config begin # update domain settings on change. libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(env[:machine].id) # Libvirt API doesn't support modifying memory on NUMA enabled CPUs # http://libvirt.org/git/?p=libvirt.git;a=commit;h=d174394105cf00ed266bf729ddf461c21637c736 if config.numa_nodes == nil if config.memory.to_i * 1024 != libvirt_domain.max_memory libvirt_domain.max_memory = config.memory.to_i * 1024 libvirt_domain.memory = libvirt_domain.max_memory end end begin # XML definition manipulation descr = libvirt_domain.xml_desc(1) xml_descr = REXML::Document.new descr descr_changed = false # For outputting XML for comparison formatter = REXML::Formatters::Pretty.new # additional disk bus config.disks.each do |disk| device = disk[:device] bus = disk[:bus] REXML::XPath.each(xml_descr, '/domain/devices/disk[@device="disk"]/target[@dev="' + device + '"]') do |disk_target| next unless disk_target.attributes['bus'] != bus @logger.debug "disk #{device} bus updated from '#{disk_target.attributes['bus']}' to '#{bus}'" descr_changed = true disk_target.attributes['bus'] = bus disk_target.parent.delete_element("#{disk_target.parent.xpath}/address") end end # disk_bus REXML::XPath.each(xml_descr, '/domain/devices/disk[@device="disk"]/target[@dev="vda"]') do |disk_target| next unless disk_target.attributes['bus'] != config.disk_bus @logger.debug "domain disk bus updated from '#{disk_target.attributes['bus']}' to '#{config.disk_bus}'" descr_changed = true disk_target.attributes['bus'] = config.disk_bus disk_target.parent.delete_element("#{disk_target.parent.xpath}/address") end # Iterface type unless config.nic_model_type.nil? REXML::XPath.each(xml_descr, '/domain/devices/interface/model') do |iface_model| if iface_model.attributes['type'] != config.nic_model_type @logger.debug "network type updated from '#{iface_model.attributes['type']}' to '#{config.nic_model_type}'" descr_changed = true iface_model.attributes['type'] = config.nic_model_type end end end # vCpu count vcpus_count = libvirt_domain.num_vcpus(0) if config.cpus.to_i != vcpus_count @logger.debug "cpu count updated from '#{vcpus_count}' to '#{config.cpus}'" descr_changed = true REXML::XPath.first(xml_descr, '/domain/vcpu').text = config.cpus end # cpu_mode cpu = REXML::XPath.first(xml_descr, '/domain/cpu') if cpu.nil? @logger.debug "cpu_mode updated from not set to '#{config.cpu_mode}'" descr_changed = true cpu = REXML::Element.new('cpu', REXML::XPath.first(xml_descr, '/domain')) cpu.attributes['mode'] = config.cpu_mode else if cpu.attributes['mode'] != config.cpu_mode @logger.debug "cpu_mode updated from '#{cpu.attributes['mode']}' to '#{config.cpu_mode}'" descr_changed = true cpu.attributes['mode'] = config.cpu_mode end end if config.cpu_mode != 'host-passthrough' cpu_model = REXML::XPath.first(xml_descr, '/domain/cpu/model') if cpu_model.nil? if config.cpu_model.strip != '' @logger.debug "cpu_model updated from not set to '#{config.cpu_model}'" descr_changed = true cpu_model = REXML::Element.new('model', REXML::XPath.first(xml_descr, '/domain/cpu')) cpu_model.attributes['fallback'] = 'allow' cpu_model.text = config.cpu_model end else if (cpu_model.text or '').strip != config.cpu_model.strip @logger.debug "cpu_model text updated from #{cpu_model.text} to '#{config.cpu_model}'" descr_changed = true cpu_model.text = config.cpu_model end if cpu_model.attributes['fallback'] != config.cpu_fallback @logger.debug "cpu_model fallback attribute updated from #{cpu_model.attributes['fallback']} to '#{config.cpu_fallback}'" descr_changed = true cpu_model.attributes['fallback'] = config.cpu_fallback end end vmx_feature = REXML::XPath.first(xml_descr, '/domain/cpu/feature[@name="vmx"]') svm_feature = REXML::XPath.first(xml_descr, '/domain/cpu/feature[@name="svm"]') if config.nested if vmx_feature.nil? @logger.debug "nested mode enabled from unset by setting cpu vmx feature" descr_changed = true vmx_feature = REXML::Element.new('feature', REXML::XPath.first(xml_descr, '/domain/cpu')) vmx_feature.attributes['policy'] = 'optional' vmx_feature.attributes['name'] = 'vmx' end if svm_feature.nil? @logger.debug "nested mode enabled from unset by setting cpu svm feature" descr_changed = true svm_feature = REXML::Element.new('feature', REXML::XPath.first(xml_descr, '/domain/cpu')) svm_feature.attributes['policy'] = 'optional' svm_feature.attributes['name'] = 'svm' end else unless vmx_feature.nil? @logger.debug "nested mode disabled for cpu by removing vmx feature" descr_changed = true cpu.delete_element(vmx_feature) end unless svm_feature.nil? @logger.debug "nested mode disabled for cpu by removing svm feature" descr_changed = true cpu.delete_element(svm_feature) end end elsif config.numa_nodes == nil unless cpu.elements.to_a.empty? @logger.debug "switching cpu_mode to host-passthrough and removing emulated cpu features" descr_changed = true cpu.elements.each do |elem| cpu.delete_element(elem) end end end # Clock clock = REXML::XPath.first(xml_descr, '/domain/clock') if clock.attributes['offset'] != config.clock_offset @logger.debug "clock offset changed" descr_changed = true clock.attributes['offset'] = config.clock_offset end # clock timers - because timers can be added/removed, just rebuild and then compare if !config.clock_timers.empty? || clock.has_elements? oldclock = String.new formatter.write(REXML::XPath.first(xml_descr, '/domain/clock'), oldclock) clock.delete_element('//timer') config.clock_timers.each do |clock_timer| timer = REXML::Element.new('timer', clock) clock_timer.each do |attr, value| timer.attributes[attr.to_s] = value end end newclock = String.new formatter.write(clock, newclock) unless newclock.eql? oldclock @logger.debug "clock timers config changed" descr_changed = true end end # Graphics graphics = REXML::XPath.first(xml_descr, '/domain/devices/graphics') if config.graphics_type != 'none' if graphics.nil? descr_changed = true graphics = REXML::Element.new('graphics', REXML::XPath.first(xml_descr, '/domain/devices')) end if graphics.attributes['type'] != config.graphics_type descr_changed = true graphics.attributes['type'] = config.graphics_type end if graphics.attributes['listen'] != config.graphics_ip descr_changed = true graphics.attributes['listen'] = config.graphics_ip graphics.delete_element('//listen') end if graphics.attributes['autoport'] != config.graphics_autoport descr_changed = true graphics.attributes['autoport'] = config.graphics_autoport if config.graphics_autoport == 'no' graphics.attributes['port'] = config.graphics_port end end if graphics.attributes['keymap'] != config.keymap descr_changed = true graphics.attributes['keymap'] = config.keymap end if graphics.attributes['passwd'] != config.graphics_passwd descr_changed = true if config.graphics_passwd.nil? graphics.attributes.delete 'passwd' else graphics.attributes['passwd'] = config.graphics_passwd end end graphics_gl = REXML::XPath.first(xml_descr, '/domain/devices/graphics/gl') if graphics_gl.nil? if config.graphics_gl graphics_gl = REXML::Element.new('gl', REXML::XPath.first(xml_descr, '/domain/devices/graphics')) graphics_gl.attributes['enable'] = 'yes' descr_changed = true end else if config.graphics_gl if graphics_gl.attributes['enable'] != 'yes' graphics_gl.attributes['enable'] = 'yes' descr_changed = true end else graphics_gl.parent.delete_element(graphics_gl) descr_changed = true end end else # graphics_type = none, remove entire element graphics.parent.delete_element(graphics) unless graphics.nil? end # TPM if [config.tpm_path, config.tpm_version].any? if config.tpm_path raise Errors::FogCreateServerError, 'The TPM Path must be fully qualified' unless config.tpm_path[0].chr == '/' end # just build the tpm element every time # check at the end if it is different oldtpm = REXML::XPath.first(xml_descr, '/domain/devices/tpm') REXML::XPath.first(xml_descr, '/domain/devices').delete_element("tpm") newtpm = REXML::Element.new('tpm', REXML::XPath.first(xml_descr, '/domain/devices')) newtpm.attributes['model'] = config.tpm_model backend = newtpm.add_element('backend') backend.attributes['type'] = config.tpm_type case config.tpm_type when 'emulator' backend.attributes['version'] = config.tpm_version when 'passthrough' backend.add_element('device').attributes['path'] = config.tpm_path end unless "'#{newtpm}'".eql? "'#{oldtpm}'" @logger.debug "tpm config changed" descr_changed = true end end # Video device video = REXML::XPath.first(xml_descr, '/domain/devices/video') if !video.nil? && (config.graphics_type == 'none') # graphics_type = none, video devices are removed since there is no possible output @logger.debug "deleting video elements as config.graphics_type is none" descr_changed = true video.parent.delete_element(video) else video_model = REXML::XPath.first(xml_descr, '/domain/devices/video/model') if video_model.nil? @logger.debug "video updated from not set to type '#{config.video_type}' and vram '#{config.video_vram}'" descr_changed = true video_model = REXML::Element.new('model', REXML::XPath.first(xml_descr, '/domain/devices/video')) video_model.attributes['type'] = config.video_type video_model.attributes['vram'] = config.video_vram else if video_model.attributes['type'] != config.video_type || video_model.attributes['vram'] != config.video_vram.to_s @logger.debug "video type updated from '#{video_model.attributes['type']}' to '#{config.video_type}'" @logger.debug "video vram updated from '#{video_model.attributes['vram']}' to '#{config.video_vram}'" descr_changed = true video_model.attributes['type'] = config.video_type video_model.attributes['vram'] = config.video_vram end end video_accel = REXML::XPath.first(xml_descr, '/domain/devices/video/model/acceleration') if video_accel.nil? if config.video_accel3d video_accel = REXML::Element.new('acceleration', REXML::XPath.first(xml_descr, '/domain/devices/video/model')) video_accel.attributes['accel3d'] = 'yes' descr_changed = true end else if config.video_accel3d if video_accel.attributes['accel3d'] != 'yes' video_accel.attributes['accel3d'] = 'yes' descr_changed = true end else video_accel.parent.delete_element(video_accel) descr_changed = true end end end # Sound device if config.sound_type sound = REXML::XPath.first(xml_descr,'/domain/devices/sound/model') end # dtb if config.dtb dtb = REXML::XPath.first(xml_descr, '/domain/os/dtb') if dtb.nil? @logger.debug "dtb updated from not set to '#{config.dtb}'" descr_changed = true dtb = REXML::Element.new('dtb', REXML::XPath.first(xml_descr, '/domain/os')) dtb.text = config.dtb else if (dtb.text or '') != config.dtb @logger.debug "dtb updated from '#{dtb.text}' to '#{config.dtb}'" descr_changed = true dtb.text = config.dtb end end end # kernel and initrd if config.kernel kernel = REXML::XPath.first(xml_descr, '/domain/os/kernel') if kernel.nil? @logger.debug "kernel updated from not set to '#{config.kernel}'" descr_changed = true kernel = REXML::Element.new('kernel', REXML::XPath.first(xml_descr, '/domain/os')) kernel.text = config.kernel else if (kernel.text or '').strip != config.kernel @logger.debug "kernel updated from '#{kernel.text}' to '#{config.kernel}'" descr_changed = true kernel.text = config.kernel end end end if config.initrd initrd = REXML::XPath.first(xml_descr, '/domain/os/initrd') if initrd.nil? if config.initrd.strip != '' @logger.debug "initrd updated from not set to '#{config.initrd}'" descr_changed = true initrd = REXML::Element.new('initrd', REXML::XPath.first(xml_descr, '/domain/os')) initrd.text = config.initrd end else if (initrd.text or '').strip != config.initrd @logger.debug "initrd updated from '#{initrd.text}' to '#{config.initrd}'" descr_changed = true initrd.text = config.initrd end end end # Apply if descr_changed begin libvirt_domain.undefine new_descr = String.new xml_descr.write new_descr env[:machine].provider.driver.connection.servers.create(xml: new_descr) rescue Fog::Errors::Error => e env[:machine].provider.driver.connection.servers.create(xml: descr) raise Errors::FogCreateServerError, error_message: e.message end end rescue Errors::VagrantLibvirtError => e env[:ui].error("Error when updating domain settings: #{e.message}") end # Autostart with host if enabled in Vagrantfile libvirt_domain.autostart = config.autostart @logger.debug { "Starting Domain with XML:\n#{libvirt_domain.xml_desc}" } # Actually start the domain domain.start rescue Fog::Errors::Error, Errors::VagrantLibvirtError => e raise Errors::FogError, message: e.message end @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/suspend_domain.rb000066400000000000000000000024261414232526500253340ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' module VagrantPlugins module ProviderLibvirt module Action # Suspend domain. class SuspendDomain def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::suspend_domain') @app = app end # make pause def call(env) env[:ui].info(I18n.t('vagrant_libvirt.suspending_domain')) domain = env[:machine].provider.driver.connection.servers.get(env[:machine].id.to_s) raise Errors::NoDomainError if domain.nil? config = env[:machine].provider_config if config.suspend_mode == 'managedsave' libvirt_domain = env[:machine].provider.driver.connection.client.lookup_domain_by_uuid(env[:machine].id) begin libvirt_domain.managed_save rescue => e env[:ui].error("Error doing a managed save for domain. It may have entered a paused state. Check the output of `virsh managedsave DOMAIN_NAME --verbose` on the VM host, error: #{e.message}") end else domain.suspend end @logger.info("Machine #{env[:machine].id} is suspended ") @app.call(env) end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/action/wait_till_up.rb000066400000000000000000000053161414232526500250210ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' require 'vagrant-libvirt/errors' require 'vagrant-libvirt/util/timer' require 'vagrant/util/retryable' module VagrantPlugins module ProviderLibvirt module Action # Wait till domain is started, till it obtains an IP address and is # accessible via ssh. class WaitTillUp include Vagrant::Util::Retryable def initialize(app, _env) @logger = Log4r::Logger.new('vagrant_libvirt::action::wait_till_up') @app = app end def call(env) # Initialize metrics if they haven't been env[:metrics] ||= {} # Get domain object domain = env[:machine].provider.driver.get_domain(env[:machine]) if domain.nil? raise Errors::NoDomainError, error_message: "Domain #{env[:machine].id} not found" end # Wait for domain to obtain an ip address. Ip address is searched # from arp table, either locally or remotely via ssh, if Libvirt # connection was done via ssh. env[:ip_address] = nil @logger.debug("Searching for IP for MAC address: #{domain.mac}") env[:ui].info(I18n.t('vagrant_libvirt.waiting_for_ip')) env[:metrics]['instance_ip_time'] = Util::Timer.time do retryable(on: Fog::Errors::TimeoutError, tries: 300) do # just return if interrupted and let the warden call recover return if env[:interrupted] # Wait for domain to obtain an ip address env[:ip_address] = env[:machine].provider.driver.get_domain_ipaddress(env[:machine], domain) end end @logger.info("Got IP address #{env[:ip_address]}") @logger.info("Time for getting IP: #{env[:metrics]['instance_ip_time']}") @app.call(env) end def recover(env) # Undo the import terminate(env) end def terminate(env) if env[:machine].state.id != :not_created # If we're not supposed to destroy on error then just return return unless env[:destroy_on_error] if env[:halt_on_error] halt_env = env.dup halt_env.delete(:interrupted) halt_env[:config_validate] = false env[:action_runner].run(Action.action_halt, halt_env) else destroy_env = env.dup destroy_env.delete(:interrupted) destroy_env[:config_validate] = false destroy_env[:force_confirm_destroy] = true env[:action_runner].run(Action.action_destroy, destroy_env) end end end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/cap/000077500000000000000000000000001414232526500212615ustar00rootroot00000000000000vagrant-libvirt-0.7.0/lib/vagrant-libvirt/cap/mount_9p.rb000066400000000000000000000030071414232526500233600ustar00rootroot00000000000000# frozen_string_literal: true require 'digest/md5' require 'vagrant/util/retryable' module VagrantPlugins module ProviderLibvirt module Cap class Mount9P extend Vagrant::Util::Retryable def self.mount_9p_shared_folder(machine, folders) folders.each do |_name, opts| # Expand the guest path so we can handle things like "~/vagrant" expanded_guest_path = machine.guest.capability( :shell_expand_guest_path, opts[:guestpath] ) # Do the actual creating and mounting machine.communicate.sudo("mkdir -p #{expanded_guest_path}") # Mount mount_tag = Digest::MD5.new.update(opts[:hostpath]).to_s[0, 31] mount_opts = '-o trans=virtio' mount_opts += ",access=#{opts[:owner]}" if opts[:owner] mount_opts += ",version=#{opts[:version]}" if opts[:version] mount_opts += ",#{opts[:mount_opts]}" if opts[:mount_opts] mount_command = "mount -t 9p #{mount_opts} '#{mount_tag}' #{expanded_guest_path}" retryable(on: Vagrant::Errors::LinuxMountFailed, tries: 5, sleep: 3) do machine.communicate.sudo('modprobe 9p') machine.communicate.sudo('modprobe 9pnet_virtio') machine.communicate.sudo(mount_command, error_class: Vagrant::Errors::LinuxMountFailed) end end end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/cap/mount_virtiofs.rb000066400000000000000000000023541414232526500247010ustar00rootroot00000000000000# frozen_string_literal: true require 'digest/md5' require 'vagrant/util/retryable' module VagrantPlugins module ProviderLibvirt module Cap class MountVirtioFS extend Vagrant::Util::Retryable def self.mount_virtiofs_shared_folder(machine, folders) folders.each do |_name, opts| # Expand the guest path so we can handle things like "~/vagrant" expanded_guest_path = machine.guest.capability( :shell_expand_guest_path, opts[:guestpath] ) # Do the actual creating and mounting machine.communicate.sudo("mkdir -p #{expanded_guest_path}") # Mount mount_tag = Digest::MD5.new.update(opts[:hostpath]).to_s[0, 31] mount_opts = "-o #{opts[:mount_opts]}" if opts[:mount_opts] mount_command = "mount -t virtiofs #{mount_opts} '#{mount_tag}' #{expanded_guest_path}" retryable(on: Vagrant::Errors::LinuxMountFailed, tries: 5, sleep: 3) do machine.communicate.sudo(mount_command, error_class: Vagrant::Errors::LinuxMountFailed) end end end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/cap/nic_mac_addresses.rb000066400000000000000000000010071414232526500252320ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Cap class NicMacAddresses def self.nic_mac_addresses(machine) # Vagrant expects a Hash with an index starting at 1 as key # and the mac as uppercase string without colons as value nic_macs = {} machine.provider.mac_addresses.each do |index, mac| nic_macs[index + 1] = mac.upcase.delete(':') end nic_macs end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/cap/public_address.rb000066400000000000000000000007051414232526500245730ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Cap class PublicAddress def self.public_address(machine) # This does not need to be a globally routable address, it # only needs to be accessible from the machine running # Vagrant. ssh_info = machine.ssh_info return nil if !ssh_info ssh_info[:host] end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/cap/synced_folder_9p.rb000066400000000000000000000104071414232526500250400ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' require 'ostruct' require 'nokogiri' require 'digest/md5' require 'vagrant/util/subprocess' require 'vagrant/errors' require 'vagrant-libvirt/errors' module VagrantPlugins module SyncedFolder9P class SyncedFolder < Vagrant.plugin('2', :synced_folder) include Vagrant::Util include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate def initialize(*args) super @logger = Log4r::Logger.new('vagrant_libvirt::synced_folders::9p') end def usable?(machine, _raise_error = false) # bail now if not using Libvirt since checking version would throw error return false unless machine.provider_name == :libvirt # support in device attach/detach introduced in 1.2.2 # version number format is major * 1,000,000 + minor * 1,000 + release libvirt_version = machine.provider.driver.connection.client.libversion libvirt_version >= 1_002_002 end def prepare(machine, folders, _opts) raise Vagrant::Errors::Error('No Libvirt connection') if machine.provider.driver.connection.nil? @conn = machine.provider.driver.connection.client begin # loop through folders folders.each do |id, folder_opts| folder_opts.merge!(target: id, accessmode: 'passthrough', mount: true, readonly: nil) { |_k, ov, _nv| ov } mount_tag = Digest::MD5.new.update(folder_opts[:hostpath]).to_s[0, 31] folder_opts[:mount_tag] = mount_tag machine.ui.info "================\nMachine id: #{machine.id}\nShould be mounting folders\n #{id}, opts: #{folder_opts}" xml = Nokogiri::XML::Builder.new do |xml| xml.filesystem(type: 'mount', accessmode: folder_opts[:accessmode]) do xml.driver(type: 'path', wrpolicy: 'immediate') xml.source(dir: folder_opts[:hostpath]) xml.target(dir: mount_tag) xml.readonly unless folder_opts[:readonly].nil? end end.to_xml( save_with: Nokogiri::XML::Node::SaveOptions::NO_DECLARATION | Nokogiri::XML::Node::SaveOptions::NO_EMPTY_TAGS | Nokogiri::XML::Node::SaveOptions::FORMAT ) @logger.debug { "Attaching Synced Folder device with XML:\n#{xml}" } @conn.lookup_domain_by_uuid(machine.id).attach_device(xml, 0) end rescue => e machine.ui.error("could not attach device because: #{e}") raise VagrantPlugins::ProviderLibvirt::Errors::AttachDeviceError, error_message: e.message end end # once up, mount folders def enable(machine, folders, _opts) # Go through each folder and mount machine.ui.info('mounting 9p share in guest') # Only mount folders that have a guest path specified. mount_folders = {} folders.each do |id, opts| next unless opts[:mount] && opts[:guestpath] && !opts[:guestpath].empty? mount_folders[id] = opts.dup # merge common options if not given mount_folders[id].merge!(version: '9p2000.L') { |_k, ov, _nv| ov } end # Mount the actual folder machine.guest.capability( :mount_9p_shared_folder, mount_folders ) end def cleanup(machine, _opts) if machine.provider.driver.connection.nil? raise Vagrant::Errors::Error('No Libvirt connection') end @conn = machine.provider.driver.connection.client begin if machine.id && machine.id != '' dom = @conn.lookup_domain_by_uuid(machine.id) Nokogiri::XML(dom.xml_desc).xpath( '/domain/devices/filesystem' ).each do |xml| dom.detach_device(xml.to_s) machine.ui.info 'Cleaned up shared folders' end end rescue => e machine.ui.error("could not detach device because: #{e}") raise VagrantPlugins::ProviderLibvirt::Errors::DetachDeviceError, error_message: e.message end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/cap/synced_folder_virtiofs.rb000066400000000000000000000100621414232526500263520ustar00rootroot00000000000000# frozen_string_literal: true require 'log4r' require 'ostruct' require 'nokogiri' require 'digest/md5' require 'vagrant/util/subprocess' require 'vagrant/errors' require 'vagrant-libvirt/errors' module VagrantPlugins module SyncedFolderVirtioFS class SyncedFolder < Vagrant.plugin('2', :synced_folder) include Vagrant::Util include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate def initialize(*args) super @logger = Log4r::Logger.new('vagrant_libvirt::synced_folders::virtiofs') end def usable?(machine, _raise_error = false) # bail now if not using Libvirt since checking version would throw error return false unless machine.provider_name == :libvirt # virtiofs support introduced since 6.2.0 # version number format is major * 1,000,000 + minor * 1,000 + release libvirt_version = machine.provider.driver.connection.client.libversion libvirt_version >= 6_002_000 end def prepare(machine, folders, _opts) raise Vagrant::Errors::Error('No Libvirt connection') if machine.provider.driver.connection.nil? @conn = machine.provider.driver.connection.client begin # loop through folders folders.each do |id, folder_opts| folder_opts.merge!(target: id, mount: true, readonly: nil) { |_k, ov, _nv| ov } mount_tag = Digest::MD5.new.update(folder_opts[:hostpath]).to_s[0, 31] folder_opts[:mount_tag] = mount_tag machine.ui.info "================\nMachine id: #{machine.id}\nShould be mounting folders\n #{id}, opts: #{folder_opts}" xml = Nokogiri::XML::Builder.new do |xml| xml.filesystem(type: 'mount', accessmode: 'passthrough') do xml.driver(type: 'virtiofs') xml.source(dir: folder_opts[:hostpath]) xml.target(dir: mount_tag) xml.readonly unless folder_opts[:readonly].nil? end end.to_xml( save_with: Nokogiri::XML::Node::SaveOptions::NO_DECLARATION | Nokogiri::XML::Node::SaveOptions::NO_EMPTY_TAGS | Nokogiri::XML::Node::SaveOptions::FORMAT ) @logger.debug { "Attaching Synced Folder device with XML:\n#{xml}" } @conn.lookup_domain_by_uuid(machine.id).attach_device(xml, 0) end rescue => e machine.ui.error("could not attach device because: #{e}") raise VagrantPlugins::ProviderLibvirt::Errors::AttachDeviceError, error_message: e.message end end # once up, mount folders def enable(machine, folders, _opts) # Go through each folder and mount machine.ui.info('mounting virtiofs share in guest') # Only mount folders that have a guest path specified. mount_folders = {} folders.each do |id, opts| next unless opts[:mount] && opts[:guestpath] && !opts[:guestpath].empty? mount_folders[id] = opts.dup end # Mount the actual folder machine.guest.capability( :mount_virtiofs_shared_folder, mount_folders ) end def cleanup(machine, _opts) if machine.provider.driver.connection.nil? raise Vagrant::Errors::Error('No Libvirt connection') end @conn = machine.provider.driver.connection.client begin if machine.id && machine.id != '' dom = @conn.lookup_domain_by_uuid(machine.id) Nokogiri::XML(dom.xml_desc).xpath( '/domain/devices/filesystem' ).each do |xml| dom.detach_device(xml.to_s) machine.ui.info 'Cleaned up shared folders' end end rescue => e machine.ui.error("could not detach device because: #{e}") raise VagrantPlugins::ProviderLibvirt::Errors::DetachDeviceError, error_message: e.message end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/config.rb000066400000000000000000001146271414232526500223230ustar00rootroot00000000000000# frozen_string_literal: true require 'cgi' require 'vagrant' class Numeric Alphabet = ('a'..'z').to_a def vdev s = String.new q = self (q, r = (q - 1).divmod(26)) && s.prepend(Alphabet[r]) until q.zero? "vd#{s}" end end module VagrantPlugins module ProviderLibvirt class Config < Vagrant.plugin('2', :config) # manually specify URI # will supercede most other options if provided attr_accessor :uri # A hypervisor name to access via Libvirt. attr_accessor :driver # The name of the server, where Libvirtd is running. attr_accessor :host # If use ssh tunnel to connect to Libvirt. attr_accessor :connect_via_ssh # Path towards the Libvirt socket attr_accessor :socket # The username to access Libvirt. attr_accessor :username # Password for Libvirt connection. attr_accessor :password # ID SSH key file attr_accessor :id_ssh_key_file attr_accessor :proxy_command # Forward port with id 'ssh' attr_accessor :forward_ssh_port # Libvirt storage pool name, where box image and instance snapshots will # be stored. attr_accessor :storage_pool_name attr_accessor :storage_pool_path # Libvirt storage pool where the base image snapshot shall be stored attr_accessor :snapshot_pool_name # Turn on to prevent hostname conflicts attr_accessor :random_hostname # Libvirt default network attr_accessor :management_network_device attr_accessor :management_network_name attr_accessor :management_network_address attr_accessor :management_network_mode attr_accessor :management_network_mac attr_accessor :management_network_guest_ipv6 attr_accessor :management_network_autostart attr_accessor :management_network_pci_bus attr_accessor :management_network_pci_slot attr_accessor :management_network_domain attr_accessor :management_network_mtu # System connection information attr_accessor :system_uri # Default host prefix (alternative to use project folder name) attr_accessor :default_prefix # Domain specific settings used while creating new domain. attr_accessor :title attr_accessor :description attr_accessor :uuid attr_accessor :memory attr_accessor :nodeset attr_accessor :memory_backing attr_accessor :channel attr_accessor :cpus attr_accessor :cpuset attr_accessor :cpu_mode attr_accessor :cpu_model attr_accessor :cpu_fallback attr_accessor :cpu_features attr_accessor :cpu_topology attr_accessor :shares attr_accessor :features attr_accessor :features_hyperv attr_accessor :clock_offset attr_accessor :clock_timers attr_accessor :numa_nodes attr_accessor :loader attr_accessor :nvram attr_accessor :boot_order attr_accessor :machine_type attr_accessor :machine_arch attr_accessor :machine_virtual_size attr_accessor :disk_bus attr_accessor :disk_device attr_accessor :disk_driver_opts attr_accessor :nic_model_type attr_accessor :nested attr_accessor :volume_cache # deprecated, kept for backwards compatibility; use disk_driver attr_accessor :kernel attr_accessor :cmd_line attr_accessor :initrd attr_accessor :dtb attr_accessor :emulator_path attr_accessor :graphics_type attr_accessor :graphics_autoport attr_accessor :graphics_port attr_accessor :graphics_passwd attr_accessor :graphics_ip attr_accessor :graphics_gl attr_accessor :video_type attr_accessor :video_vram attr_accessor :video_accel3d attr_accessor :keymap attr_accessor :kvm_hidden attr_accessor :sound_type # Sets the information for connecting to a host TPM device # Only supports socket-based TPMs attr_accessor :tpm_model attr_accessor :tpm_type attr_accessor :tpm_path attr_accessor :tpm_version # Configure the memballoon attr_accessor :memballoon_enabled attr_accessor :memballoon_model attr_accessor :memballoon_pci_bus attr_accessor :memballoon_pci_slot # Sets the max number of NICs that can be created # Default set to 8. Don't change the default unless you know # what are doing attr_accessor :nic_adapter_count # Storage attr_accessor :disks attr_accessor :cdroms # Inputs attr_accessor :inputs # Channels attr_accessor :channels # PCI device passthrough attr_accessor :pcis # Random number device passthrough attr_accessor :rng # Watchdog device attr_accessor :watchdog_dev # USB controller attr_accessor :usbctl_dev # USB device passthrough attr_accessor :usbs # Redirected devices attr_accessor :redirdevs attr_accessor :redirfilters # smartcard device attr_accessor :smartcard_dev # Suspend mode attr_accessor :suspend_mode # Autostart attr_accessor :autostart # Attach mgmt network attr_accessor :mgmt_attach # Additional qemuargs arguments attr_accessor :qemu_args # Additional qemuenv arguments attr_accessor :qemu_env # Use QEMU session instead of system attr_accessor :qemu_use_session # Use QEMU Agent to get ip address attr_accessor :qemu_use_agent # serial consoles attr_accessor :serials def initialize @uri = UNSET_VALUE @driver = UNSET_VALUE @host = UNSET_VALUE @port = UNSET_VALUE @connect_via_ssh = UNSET_VALUE @username = UNSET_VALUE @password = UNSET_VALUE @id_ssh_key_file = UNSET_VALUE @socket = UNSET_VALUE @proxy_command = UNSET_VALUE @forward_ssh_port = UNSET_VALUE # forward port with id 'ssh' @storage_pool_name = UNSET_VALUE @snapshot_pool_name = UNSET_VALUE @random_hostname = UNSET_VALUE @management_network_device = UNSET_VALUE @management_network_name = UNSET_VALUE @management_network_address = UNSET_VALUE @management_network_mode = UNSET_VALUE @management_network_mac = UNSET_VALUE @management_network_guest_ipv6 = UNSET_VALUE @management_network_autostart = UNSET_VALUE @management_network_pci_slot = UNSET_VALUE @management_network_pci_bus = UNSET_VALUE @management_network_domain = UNSET_VALUE @management_network_mtu = UNSET_VALUE # System connection information @system_uri = UNSET_VALUE # Domain specific settings. @title = UNSET_VALUE @description = UNSET_VALUE @uuid = UNSET_VALUE @memory = UNSET_VALUE @nodeset = UNSET_VALUE @memory_backing = UNSET_VALUE @cpus = UNSET_VALUE @cpuset = UNSET_VALUE @cpu_mode = UNSET_VALUE @cpu_model = UNSET_VALUE @cpu_fallback = UNSET_VALUE @cpu_features = UNSET_VALUE @cpu_topology = UNSET_VALUE @shares = UNSET_VALUE @features = UNSET_VALUE @features_hyperv = UNSET_VALUE @clock_offset = UNSET_VALUE @clock_timers = [] @numa_nodes = UNSET_VALUE @loader = UNSET_VALUE @nvram = UNSET_VALUE @machine_type = UNSET_VALUE @machine_arch = UNSET_VALUE @machine_virtual_size = UNSET_VALUE @disk_bus = UNSET_VALUE @disk_device = UNSET_VALUE @disk_driver_opts = {} @nic_model_type = UNSET_VALUE @nested = UNSET_VALUE @volume_cache = UNSET_VALUE @kernel = UNSET_VALUE @initrd = UNSET_VALUE @dtb = UNSET_VALUE @cmd_line = UNSET_VALUE @emulator_path = UNSET_VALUE @graphics_type = UNSET_VALUE @graphics_autoport = UNSET_VALUE @graphics_port = UNSET_VALUE @graphics_ip = UNSET_VALUE @graphics_passwd = UNSET_VALUE @graphics_gl = UNSET_VALUE @video_type = UNSET_VALUE @video_vram = UNSET_VALUE @video_accel3d = UNSET_VALUE @sound_type = UNSET_VALUE @keymap = UNSET_VALUE @kvm_hidden = UNSET_VALUE @tpm_model = UNSET_VALUE @tpm_type = UNSET_VALUE @tpm_path = UNSET_VALUE @tpm_version = UNSET_VALUE @memballoon_enabled = UNSET_VALUE @memballoon_model = UNSET_VALUE @memballoon_pci_bus = UNSET_VALUE @memballoon_pci_slot = UNSET_VALUE @nic_adapter_count = UNSET_VALUE # Boot order @boot_order = [] # Storage @disks = [] @cdroms = [] # Inputs @inputs = UNSET_VALUE # Channels @channels = UNSET_VALUE # PCI device passthrough @pcis = UNSET_VALUE # Random number device passthrough @rng = UNSET_VALUE # Watchdog device @watchdog_dev = UNSET_VALUE # USB controller @usbctl_dev = UNSET_VALUE # USB device passthrough @usbs = UNSET_VALUE # Redirected devices @redirdevs = UNSET_VALUE @redirfilters = UNSET_VALUE # smartcard device @smartcard_dev = UNSET_VALUE # Suspend mode @suspend_mode = UNSET_VALUE # Autostart @autostart = UNSET_VALUE # Attach mgmt network @mgmt_attach = UNSET_VALUE # Additional QEMU commandline arguments @qemu_args = UNSET_VALUE # Additional QEMU commandline environment variables @qemu_env = UNSET_VALUE @qemu_use_session = UNSET_VALUE # Use Qemu agent to get ip address @qemu_use_agent = UNSET_VALUE @serials = [] end def boot(device) @boot_order << device # append end def _get_device(disks) # skip existing devices and also the first one (vda) exist = disks.collect { |x| x[:device] } + [1.vdev.to_s] skip = 1 # we're 1 based, not 0 based... loop do dev = skip.vdev # get lettered device return dev unless exist.include?(dev) skip += 1 end end def _get_cdrom_dev(cdroms) exist = Hash[cdroms.collect { |x| [x[:dev], true] }] # hda - hdc curr = 'a'.ord while curr <= 'd'.ord dev = "hd#{curr.chr}" if exist[dev] curr += 1 next else return dev end end # is it better to raise our own error, or let Libvirt cause the exception? raise 'Only four cdroms may be attached at a time' end def _generate_numa @numa_nodes.collect { |x| # Perform some validation of cpu values unless x[:cpus] =~ /^\d+-\d+$/ raise 'numa_nodes[:cpus] must be in format "integer-integer"' end # Convert to KiB x[:memory] = x[:memory].to_i * 1024 } # Grab the value of the last @numa_nodes[:cpus] and verify @cpus matches # Note: [:cpus] is zero based and @cpus is not, so we need to +1 last_cpu = @numa_nodes.last[:cpus] last_cpu = last_cpu.scan(/\d+$/)[0] last_cpu = last_cpu.to_i + 1 if @cpus != last_cpu.to_i raise 'The total number of numa_nodes[:cpus] must equal config.cpus' end @numa_nodes end def cpu_feature(options = {}) if options[:name].nil? || options[:policy].nil? raise 'CPU Feature name AND policy must be specified' end @cpu_features = [] if @cpu_features == UNSET_VALUE @cpu_features.push(name: options[:name], policy: options[:policy]) end def hyperv_feature(options = {}) if options[:name].nil? || options[:state].nil? raise 'Feature name AND state must be specified' end if options[:name] == 'spinlocks' && options[:retries].nil? raise 'Feature spinlocks requires retries parameter' end @features_hyperv = [] if @features_hyperv == UNSET_VALUE if options[:name] == 'spinlocks' @features_hyperv.push(name: options[:name], state: options[:state], retries: options[:retries]) else @features_hyperv.push(name: options[:name], state: options[:state]) end end def clock_timer(options = {}) if options[:name].nil? raise 'Clock timer name must be specified' end options.each do |key, value| case key when :name, :track, :tickpolicy, :frequency, :mode, :present if value.nil? raise "Value of timer option #{key} is nil" end else raise "Unknown clock timer option: #{key}" end end @clock_timers.push(options.dup) end def cputopology(options = {}) if options[:sockets].nil? || options[:cores].nil? || options[:threads].nil? raise 'CPU topology must have all of sockets, cores and threads specified' end if @cpu_topology == UNSET_VALUE @cpu_topology = {} end @cpu_topology[:sockets] = options[:sockets] @cpu_topology[:cores] = options[:cores] @cpu_topology[:threads] = options[:threads] end def memorybacking(option, config = {}) case option when :source raise 'Source type must be specified' if config[:type].nil? when :access raise 'Access mode must be specified' if config[:mode].nil? when :allocation raise 'Allocation mode must be specified' if config[:mode].nil? end @memory_backing = [] if @memory_backing == UNSET_VALUE @memory_backing.push(name: option, config: config) end def input(options = {}) if options[:type].nil? || options[:bus].nil? raise 'Input type AND bus must be specified' end @inputs = [] if @inputs == UNSET_VALUE @inputs.push(type: options[:type], bus: options[:bus]) end def channel(options = {}) if options[:type].nil? raise 'Channel type must be specified.' elsif options[:type] == 'unix' && options[:target_type] == 'guestfwd' # Guest forwarding requires a target (ip address) and a port if options[:target_address].nil? || options[:target_port].nil? || options[:source_path].nil? raise 'guestfwd requires target_address, target_port and source_path' end end @channels = [] if @channels == UNSET_VALUE @channels.push(type: options[:type], source_mode: options[:source_mode], source_path: options[:source_path], target_address: options[:target_address], target_name: options[:target_name], target_port: options[:target_port], target_type: options[:target_type]) end def random(options = {}) if !options[:model].nil? && options[:model] != 'random' raise 'The only supported rng backend is "random".' end @rng = {} if @rng == UNSET_VALUE @rng[:model] = options[:model] end def pci(options = {}) if options[:bus].nil? || options[:slot].nil? || options[:function].nil? raise 'Bus AND slot AND function must be specified. Check `lspci` for that numbers.' end @pcis = [] if @pcis == UNSET_VALUE if options[:domain].nil? pci_domain = '0x0000' else pci_domain = options[:domain] end @pcis.push(domain: pci_domain, bus: options[:bus], slot: options[:slot], function: options[:function]) end def watchdog(options = {}) if options[:model].nil? raise 'Model must be specified.' end if @watchdog_dev == UNSET_VALUE @watchdog_dev = {} end @watchdog_dev[:model] = options[:model] @watchdog_dev[:action] = options[:action] || 'reset' end def usb_controller(options = {}) if options[:model].nil? raise 'USB controller model must be specified.' end if @usbctl_dev == UNSET_VALUE @usbctl_dev = {} end @usbctl_dev[:model] = options[:model] @usbctl_dev[:ports] = options[:ports] if options[:ports] end def usb(options = {}) if (options[:bus].nil? || options[:device].nil?) && options[:vendor].nil? && options[:product].nil? raise 'Bus and device and/or vendor and/or product must be specified. Check `lsusb` for these.' end @usbs = [] if @usbs == UNSET_VALUE @usbs.push(bus: options[:bus], device: options[:device], vendor: options[:vendor], product: options[:product], startupPolicy: options[:startupPolicy]) end def redirdev(options = {}) raise 'Type must be specified.' if options[:type].nil? @redirdevs = [] if @redirdevs == UNSET_VALUE @redirdevs.push(type: options[:type]) end def redirfilter(options = {}) raise 'Option allow must be specified.' if options[:allow].nil? @redirfilters = [] if @redirfilters == UNSET_VALUE @redirfilters.push(class: options[:class] || -1, vendor: options[:vendor] || -1, product: options[:product] || -1, version: options[:version] || -1, allow: options[:allow]) end def smartcard(options = {}) if options[:mode].nil? raise 'Option mode must be specified.' elsif options[:mode] != 'passthrough' raise 'Currently only passthrough mode is supported!' elsif options[:type] == 'tcp' && (options[:source_mode].nil? || options[:source_host].nil? || options[:source_service].nil?) raise 'If using type "tcp", option "source_mode", "source_host" and "source_service" must be specified.' end if @smartcard_dev == UNSET_VALUE @smartcard_dev = {} end @smartcard_dev[:mode] = options[:mode] @smartcard_dev[:type] = options[:type] || 'spicevmc' @smartcard_dev[:source_mode] = options[:source_mode] if @smartcard_dev[:type] == 'tcp' @smartcard_dev[:source_host] = options[:source_host] if @smartcard_dev[:type] == 'tcp' @smartcard_dev[:source_service] = options[:source_service] if @smartcard_dev[:type] == 'tcp' end # Disk driver options for primary disk def disk_driver(options = {}) supported_opts = [:cache, :io, :copy_on_read, :discard, :detect_zeroes] @disk_driver_opts = options.select { |k,_| supported_opts.include? k } end # NOTE: this will run twice for each time it's needed- keep it idempotent def storage(storage_type, options = {}) if storage_type == :file if options[:device] == :cdrom _handle_cdrom_storage(options) else _handle_disk_storage(options) end end end def _handle_cdrom_storage(options = {}) # # # # #
# # # note the target dev will need to be changed with each cdrom drive (hdc, hdd, etc), # as will the address unit number (unit=0, unit=1, etc) options = { type: 'raw', bus: 'ide', path: nil }.merge(options) cdrom = { type: options[:type], dev: options[:dev], bus: options[:bus], path: options[:path] } @cdroms << cdrom end def _handle_disk_storage(options = {}) options = { type: 'qcow2', size: '10G', # matches the fog default path: nil, bus: 'virtio' }.merge(options) disk = { device: options[:device], type: options[:type], size: options[:size], path: options[:path], bus: options[:bus], cache: options[:cache] || 'default', allow_existing: options[:allow_existing], shareable: options[:shareable], serial: options[:serial], io: options[:io], copy_on_read: options[:copy_on_read], discard: options[:discard], detect_zeroes: options[:detect_zeroes], pool: options[:pool], # overrides storage_pool setting for additional disks wwn: options[:wwn], } @disks << disk # append end def qemuargs(options = {}) @qemu_args = [] if @qemu_args == UNSET_VALUE @qemu_args << options if options[:value] end def qemuenv(options = {}) @qemu_env = {} if @qemu_env == UNSET_VALUE @qemu_env.merge!(options) end def serial(options={}) options = { :type => "pty", :source => nil, }.merge(options) serial = { :type => options[:type], :source => options[:source], } @serials << serial end def _default_uri # Determine if any settings except driver provided explicitly, if not # and the LIBVIRT_DEFAULT_URI var is set, use that. # # Skipping driver because that may be set on individual boxes rather # than by the user. if [ @connect_via_ssh, @host, @username, @password, @id_ssh_key_file, @qemu_use_session, @socket, ].none?{ |v| v != UNSET_VALUE } if ENV.fetch('LIBVIRT_DEFAULT_URI', '') != "" @uri = ENV['LIBVIRT_DEFAULT_URI'] end end end # code to generate URI from from either the LIBVIRT_URI environment # variable or a config moved out of the connect action def _generate_uri(qemu_use_session) # builds the Libvirt connection URI from the given driver config # Setup connection uri. uri = @driver.dup virt_path = case uri when 'qemu', 'kvm' qemu_use_session ? '/session' : '/system' when 'openvz', 'uml', 'phyp', 'parallels' '/system' when '@en', 'esx' '/' when 'vbox', 'vmwarews', 'hyperv' '/session' else raise "Require specify driver #{uri}" end if uri == 'kvm' uri = 'qemu' # use QEMU uri for KVM domain type end # turn on ssh if an ssh key file is explicitly provided if @connect_via_ssh == UNSET_VALUE && @id_ssh_key_file && @id_ssh_key_file != UNSET_VALUE @connect_via_ssh = true end params = {} if @connect_via_ssh == true finalize_id_ssh_key_file uri += '+ssh://' uri += "#{@username}@" if @username && @username != UNSET_VALUE uri += (@host && @host != UNSET_VALUE ? @host : 'localhost') params['no_verify'] = '1' params['keyfile'] = @id_ssh_key_file if @id_ssh_key_file else uri += '://' uri += @host if @host && @host != UNSET_VALUE end uri += virt_path # set path to Libvirt socket params['socket'] = @socket if @socket uri += '?' + params.map { |pair| pair.join('=') }.join('&') unless params.empty? uri end def _parse_uri(uri) begin URI.parse(uri) rescue raise "@uri set to invalid uri '#{uri}'" end end def finalize! _default_uri if @uri == UNSET_VALUE # settings which _generate_uri @driver = 'kvm' if @driver == UNSET_VALUE @password = nil if @password == UNSET_VALUE @socket = nil if @socket == UNSET_VALUE # If uri isn't set then let's build one from various sources. # Default to passing false for qemu_use_session if it's not set. if @uri == UNSET_VALUE @uri = _generate_uri(@qemu_use_session == UNSET_VALUE ? false : @qemu_use_session) end finalize_from_uri finalize_proxy_command # forward port with id 'ssh' @forward_ssh_port = false if @forward_ssh_port == UNSET_VALUE @storage_pool_name = 'default' if @storage_pool_name == UNSET_VALUE @snapshot_pool_name = @storage_pool_name if @snapshot_pool_name == UNSET_VALUE @storage_pool_path = nil if @storage_pool_path == UNSET_VALUE @random_hostname = false if @random_hostname == UNSET_VALUE @management_network_device = 'virbr0' if @management_network_device == UNSET_VALUE @management_network_name = 'vagrant-libvirt' if @management_network_name == UNSET_VALUE @management_network_address = '192.168.121.0/24' if @management_network_address == UNSET_VALUE @management_network_mode = 'nat' if @management_network_mode == UNSET_VALUE @management_network_mac = nil if @management_network_mac == UNSET_VALUE @management_network_guest_ipv6 = 'yes' if @management_network_guest_ipv6 == UNSET_VALUE @management_network_autostart = false if @management_network_autostart == UNSET_VALUE @management_network_pci_bus = nil if @management_network_pci_bus == UNSET_VALUE @management_network_pci_slot = nil if @management_network_pci_slot == UNSET_VALUE @management_network_domain = nil if @management_network_domain == UNSET_VALUE @management_network_mtu = nil if @management_network_mtu == UNSET_VALUE # Domain specific settings. @title = '' if @title == UNSET_VALUE @description = '' if @description == UNSET_VALUE @uuid = '' if @uuid == UNSET_VALUE @memory = 512 if @memory == UNSET_VALUE @nodeset = nil if @nodeset == UNSET_VALUE @memory_backing = [] if @memory_backing == UNSET_VALUE @cpus = 1 if @cpus == UNSET_VALUE @cpuset = nil if @cpuset == UNSET_VALUE @cpu_mode = 'host-model' if @cpu_mode == UNSET_VALUE @cpu_model = if (@cpu_model == UNSET_VALUE) && (@cpu_mode == 'custom') 'qemu64' elsif @cpu_mode != 'custom' '' else @cpu_model end @cpu_topology = {} if @cpu_topology == UNSET_VALUE @cpu_fallback = 'allow' if @cpu_fallback == UNSET_VALUE @cpu_features = [] if @cpu_features == UNSET_VALUE @shares = nil if @shares == UNSET_VALUE @features = ['acpi','apic','pae'] if @features == UNSET_VALUE @features_hyperv = [] if @features_hyperv == UNSET_VALUE @clock_offset = 'utc' if @clock_offset == UNSET_VALUE @clock_timers = [] if @clock_timers == UNSET_VALUE @numa_nodes = @numa_nodes == UNSET_VALUE ? nil : _generate_numa @loader = nil if @loader == UNSET_VALUE @nvram = nil if @nvram == UNSET_VALUE @machine_type = nil if @machine_type == UNSET_VALUE @machine_arch = nil if @machine_arch == UNSET_VALUE @machine_virtual_size = nil if @machine_virtual_size == UNSET_VALUE @disk_bus = 'virtio' if @disk_bus == UNSET_VALUE @disk_device = 'vda' if @disk_device == UNSET_VALUE @disk_driver_opts = {} if @disk_driver_opts == UNSET_VALUE @nic_model_type = nil if @nic_model_type == UNSET_VALUE @nested = false if @nested == UNSET_VALUE @volume_cache = nil if @volume_cache == UNSET_VALUE @kernel = nil if @kernel == UNSET_VALUE @cmd_line = '' if @cmd_line == UNSET_VALUE @initrd = '' if @initrd == UNSET_VALUE @dtb = nil if @dtb == UNSET_VALUE @graphics_type = 'vnc' if @graphics_type == UNSET_VALUE @graphics_autoport = 'yes' if @graphics_port == UNSET_VALUE @graphics_autoport = 'no' if @graphics_port != UNSET_VALUE if (@graphics_type != 'vnc' && @graphics_type != 'spice') || @graphics_passwd == UNSET_VALUE @graphics_passwd = nil end @graphics_port = -1 if @graphics_port == UNSET_VALUE @graphics_ip = '127.0.0.1' if @graphics_ip == UNSET_VALUE @video_type = 'cirrus' if @video_type == UNSET_VALUE @video_vram = 9216 if @video_vram == UNSET_VALUE @video_accel3d = false if @video_accel3d == UNSET_VALUE @graphics_gl = @video_accel3d if @graphics_gl == UNSET_VALUE @sound_type = nil if @sound_type == UNSET_VALUE @keymap = 'en-us' if @keymap == UNSET_VALUE @kvm_hidden = false if @kvm_hidden == UNSET_VALUE @tpm_model = 'tpm-tis' if @tpm_model == UNSET_VALUE @tpm_type = 'passthrough' if @tpm_type == UNSET_VALUE @tpm_path = nil if @tpm_path == UNSET_VALUE @tpm_version = nil if @tpm_version == UNSET_VALUE @memballoon_enabled = nil if @memballoon_enabled == UNSET_VALUE @memballoon_model = 'virtio' if @memballoon_model == UNSET_VALUE @memballoon_pci_bus = '0x00' if @memballoon_pci_bus == UNSET_VALUE @memballoon_pci_slot = '0x0f' if @memballoon_pci_slot == UNSET_VALUE @nic_adapter_count = 8 if @nic_adapter_count == UNSET_VALUE @emulator_path = nil if @emulator_path == UNSET_VALUE # Boot order @boot_order = [] if @boot_order == UNSET_VALUE # Storage @disks = [] if @disks == UNSET_VALUE @disks.map! do |disk| disk[:device] = _get_device(@disks) if disk[:device].nil? disk end @cdroms = [] if @cdroms == UNSET_VALUE @cdroms.map! do |cdrom| cdrom[:dev] = _get_cdrom_dev(@cdroms) if cdrom[:dev].nil? cdrom end # Inputs @inputs = [{ type: 'mouse', bus: 'ps2' }] if @inputs == UNSET_VALUE # Channels @channels = [] if @channels == UNSET_VALUE # PCI device passthrough @pcis = [] if @pcis == UNSET_VALUE # Random number generator passthrough @rng = {} if @rng == UNSET_VALUE # Watchdog device @watchdog_dev = {} if @watchdog_dev == UNSET_VALUE # USB device passthrough @usbs = [] if @usbs == UNSET_VALUE # Redirected devices @redirdevs = [] if @redirdevs == UNSET_VALUE @redirfilters = [] if @redirfilters == UNSET_VALUE # USB controller if @usbctl_dev == UNSET_VALUE @usbctl_dev = if !@usbs.empty? or !@redirdevs.empty? then {:model => 'qemu-xhci'} else {} end end # smartcard device @smartcard_dev = {} if @smartcard_dev == UNSET_VALUE # Suspend mode @suspend_mode = 'pause' if @suspend_mode == UNSET_VALUE # Autostart @autostart = false if @autostart == UNSET_VALUE # Attach mgmt network @mgmt_attach = true if @mgmt_attach == UNSET_VALUE # Additional QEMU commandline arguments @qemu_args = [] if @qemu_args == UNSET_VALUE # Additional QEMU commandline environment variables @qemu_env = {} if @qemu_env == UNSET_VALUE @qemu_use_agent = false if @qemu_use_agent == UNSET_VALUE @serials = [{:type => 'pty', :source => nil}] if @serials == [] end def validate(machine) errors = _detected_errors # The @uri and @qemu_use_session should not conflict uri = _parse_uri(@uri) if (uri.scheme.start_with? "qemu") && (uri.path.include? "session") if @qemu_use_session != true errors << "the URI and qemu_use_session configuration conflict: uri:'#{@uri}' qemu_use_session:'#{@qemu_use_session}'" end end unless @qemu_use_agent == true || @qemu_use_agent == false errors << "libvirt.qemu_use_agent must be a boolean." end if @qemu_use_agent == true # if qemu agent is used to optain domain ip configuration, at least # one qemu channel has to be configured. As there are various options, # error out and leave configuration to the user unless machine.provider_config.channels.any? { |channel| channel[:target_name].start_with?("org.qemu.guest_agent") } errors << "qemu agent option enabled, but no qemu agent channel configured: please add at least one qemu agent channel to vagrant config" end end machine.provider_config.disks.each do |disk| if disk[:path] && (disk[:path][0] == '/') errors << "absolute volume paths like '#{disk[:path]}' not yet supported" end end machine.provider_config.serials.each do |serial| if serial[:source] and serial[:source][:path].nil? errors << "serial :source requires :path to be defined" end end machine.config.vm.networks.each do |_type, opts| if opts[:mac] if opts[:mac] =~ /\A([0-9a-fA-F]{12})\z/ opts[:mac] = opts[:mac].scan(/../).join(':') end unless opts[:mac] =~ /\A([0-9a-fA-F]{2}:){5}([0-9a-fA-F]{2})\z/ errors << "Configured NIC MAC '#{opts[:mac]}' is not in 'xx:xx:xx:xx:xx:xx' or 'xxxxxxxxxxxx' format" end end end if !machine.provider_config.volume_cache.nil? and machine.provider_config.volume_cache != UNSET_VALUE machine.ui.warn("Libvirt Provider: volume_cache is deprecated. Use disk_driver :cache => '#{machine.provider_config.volume_cache}' instead.") if !machine.provider_config.disk_driver_opts.empty? machine.ui.warn("Libvirt Provider: volume_cache has no effect when disk_driver is defined.") end end { 'Libvirt Provider' => errors } end def merge(other) super.tap do |result| c = disks.dup c += other.disks result.disks = c c = cdroms.dup c += other.cdroms result.cdroms = c result.disk_driver_opts = disk_driver_opts.merge(other.disk_driver_opts) c = clock_timers.dup c += other.clock_timers result.clock_timers = c c = qemu_env != UNSET_VALUE ? qemu_env.dup : {} c.merge!(other.qemu_env) if other.qemu_env != UNSET_VALUE result.qemu_env = c s = serials.dup s += other.serials result.serials = s end end private def finalize_from_uri # Parse uri to extract individual components uri = _parse_uri(@uri) system_uri = uri.dup system_uri.path = '/system' @system_uri = system_uri.to_s if @system_uri == UNSET_VALUE # only set @connect_via_ssh if not explicitly to avoid overriding # and allow an error to occur if the @uri and @connect_via_ssh disagree @connect_via_ssh = uri.scheme.include? "ssh" if @connect_via_ssh == UNSET_VALUE # Set qemu_use_session based on the URI if it wasn't set by the user if @qemu_use_session == UNSET_VALUE if (uri.scheme.start_with? "qemu") && (uri.path.include? "session") @qemu_use_session = true else @qemu_use_session = false end end # Extract host values from uri if provided, otherwise nil @host = uri.host @port = uri.port # only override username if there is a value provided @username = nil if @username == UNSET_VALUE @username = uri.user if uri.user if uri.query params = CGI.parse(uri.query) @id_ssh_key_file = params['keyfile'].first if params.has_key?('keyfile') end finalize_id_ssh_key_file end def resolve_ssh_key_file(key_file) # set ssh key for access to Libvirt host # if no slash, prepend $HOME/.ssh/ key_file = "#{ENV['HOME']}/.ssh/#{key_file}" if key_file && key_file !~ /\A\// key_file end def finalize_id_ssh_key_file # resolve based on the following roles # 1) if @connect_via_ssh is set to true, and id_ssh_key_file not current set, # set default if the file exists # 2) if supplied the key name, attempt to expand based on user home # 3) otherwise set to nil if @connect_via_ssh == true && @id_ssh_key_file == UNSET_VALUE # set default if using ssh while allowing a user using nil to disable this id_ssh_key_file = resolve_ssh_key_file('id_rsa') id_ssh_key_file = nil if !File.file?(id_ssh_key_file) elsif @id_ssh_key_file != UNSET_VALUE id_ssh_key_file = resolve_ssh_key_file(@id_ssh_key_file) else id_ssh_key_file = nil end @id_ssh_key_file = id_ssh_key_file end def finalize_proxy_command if @connect_via_ssh if @proxy_command == UNSET_VALUE proxy_command = "ssh '#{@host}' " proxy_command += "-p #{@port} " if @port proxy_command += "-l '#{@username}' " if @username proxy_command += "-i '#{@id_ssh_key_file}' " if @id_ssh_key_file proxy_command += '-W %h:%p' else inputs = { host: @host } inputs << { port: @port } if @port inputs[:username] = @username if @username inputs[:id_ssh_key_file] = @id_ssh_key_file if @id_ssh_key_file proxy_command = String.new(@proxy_command) # avoid needing to escape '%' symbols inputs.each do |key, value| proxy_command.gsub!("{#{key}}", value) end end @proxy_command = proxy_command else @proxy_command = nil end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/driver.rb000066400000000000000000000152241414232526500223420ustar00rootroot00000000000000# frozen_string_literal: true require 'fog/libvirt' require 'libvirt' require 'log4r' require 'json' module VagrantPlugins module ProviderLibvirt class Driver # store the connection at the instance level as this will be per # thread and allows for individual machines to use different # connection settings. # # possibly this should be a connection pool using the connection # settings as a key to allow identical connections to be reused # across machines. @connection = nil @system_connection = nil def initialize(machine) @logger = Log4r::Logger.new('vagrant_libvirt::driver') @machine = machine end def connection # If already connected to Libvirt, just use it and don't connect # again. return @connection if @connection # Get config options for Libvirt provider. config = @machine.provider_config uri = config.uri conn_attr = {} conn_attr[:provider] = 'libvirt' conn_attr[:libvirt_uri] = uri conn_attr[:libvirt_username] = config.username if config.username conn_attr[:libvirt_password] = config.password if config.password # Setup command for retrieving IP address for newly created machine # with some MAC address. Get it from dnsmasq leases table ip_command = %q( awk "/$mac/ {print \$1}" /proc/net/arp ) conn_attr[:libvirt_ip_command] = ip_command @logger.info("Connecting to Libvirt (#{uri}) ...") begin @connection = Fog::Compute.new(conn_attr) rescue Fog::Errors::Error => e raise Errors::FogLibvirtConnectionError, error_message: e.message end @connection end def system_connection # If already connected to Libvirt, just use it and don't connect # again. return @system_connection if @system_connection config = @machine.provider_config @system_connection = Libvirt::open_read_only(config.system_uri) @system_connection end def get_domain(machine) begin domain = connection.servers.get(machine.id) rescue Libvirt::RetrieveError => e if e.libvirt_code == ProviderLibvirt::Util::ErrorCodes::VIR_ERR_NO_DOMAIN @logger.debug("machine #{machine.name} domain not found #{e}.") return nil else raise e end end domain end def created?(machine) domain = get_domain(machine) !domain.nil? end def get_ipaddress(machine) # Find the machine domain = get_domain(machine) if domain.nil? # The machine can't be found return nil end get_domain_ipaddress(machine, domain) end def get_domain_ipaddress(machine, domain) # attempt to get ip address from qemu agent if @machine.provider_config.qemu_use_agent == true @logger.info('Get IP via qemu agent') return get_ipaddress_from_qemu_agent(domain, machine.id) end if @machine.provider_config.qemu_use_session return get_ipaddress_from_system domain.mac end # Get IP address from dhcp leases table begin ip_address = get_ipaddress_from_domain(domain) rescue Fog::Errors::TimeoutError @logger.info('Timeout at waiting for an ip address for machine %s' % machine.name) raise end unless ip_address @logger.info('No arp table entry found for machine %s' % machine.name) return nil end ip_address end def state(machine) # may be other error states with initial retreival we can't handle begin domain = get_domain(machine) rescue Libvirt::RetrieveError => e @logger.debug("Machine #{machine.id} not found #{e}.") return :not_created end # TODO: terminated no longer appears to be a valid fog state, remove? return :not_created if domain.nil? || domain.state.to_sym == :terminated state = domain.state.tr('-', '_').to_sym if state == :running begin get_domain_ipaddress(machine, domain) rescue Fog::Errors::TimeoutError => e @logger.debug("Machine #{machine.id} running but no IP address available: #{e}.") return :inaccessible end end return state end private def get_ipaddress_from_system(mac) ip_address = nil system_connection.list_all_networks.each do |net| leases = net.dhcp_leases(mac, 0) # Assume the lease expiring last is the current IP address ip_address = leases.sort_by { |lse| lse["expirytime"] }.last["ipaddr"] if !leases.empty? break if ip_address end ip_address end def get_ipaddress_from_qemu_agent(domain, machine_id) ip_address = nil addresses = nil libvirt_domain = connection.client.lookup_domain_by_uuid(machine_id) begin response = libvirt_domain.qemu_agent_command('{"execute":"guest-network-get-interfaces"}', timeout=10) @logger.debug("Got Response from qemu agent") @logger.debug(response) addresses = JSON.parse(response) rescue => e @logger.debug("Unable to receive IP via qemu agent: [%s]" % e.message) end unless addresses.nil? addresses["return"].each{ |interface| if domain.mac.downcase == interface["hardware-address"].downcase @logger.debug("Found mathing interface: [%s]" % interface["name"]) if interface.has_key?("ip-addresses") interface["ip-addresses"].each{ |ip| # returning ipv6 addresses might break windows guests because # winrm cant handle connection, winrm fails with "invalid uri" if ip["ip-address-type"] == "ipv4" ip_address = ip["ip-address"] @logger.debug("Return IP: [%s]" % ip_address) break end } end end } end ip_address end def get_ipaddress_from_domain(domain) ip_address = nil domain.wait_for(2) do addresses.each_pair do |type, ip| # Multiple leases are separated with a newline, return only # the most recent address ip_address = ip[0].split("\n").first if ip[0] != nil end ip_address != nil end ip_address end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/errors.rb000066400000000000000000000115431414232526500223630ustar00rootroot00000000000000# frozen_string_literal: true require 'vagrant' module VagrantPlugins module ProviderLibvirt module Errors class VagrantLibvirtError < Vagrant::Errors::VagrantError error_namespace('vagrant_libvirt.errors') end class CallChainError < VagrantLibvirtError error_key(:call_chain_error) end # package not supported class PackageNotSupported < VagrantLibvirtError error_key(:package_not_supported) end # Storage pools and volumes exceptions class NoStoragePool < VagrantLibvirtError error_key(:no_storage_pool) end class DomainVolumeExists < VagrantLibvirtError error_key(:domain_volume_exists) end class NoDomainVolume < VagrantLibvirtError error_key(:no_domain_volume) end class CreatingStoragePoolError < VagrantLibvirtError error_key(:creating_storage_pool_error) end class ImageUploadError < VagrantLibvirtError error_key(:image_upload_error) end class ImageDownloadError < VagrantLibvirtError error_key(:image_download_error) end # Box exceptions, capture all under one class BoxError < VagrantLibvirtError end class BoxFormatMissingAttribute < BoxError error_key(:box_format_missing_attribute) end class BoxFormatDuplicateVolume < BoxError error_key(:box_format_duplicate_volume) end class BadBoxImage < VagrantLibvirtError error_key(:bad_box_image) end class NoBoxVolume < VagrantLibvirtError error_key(:no_box_volume) end class NoBoxVirtualSizeSet < VagrantLibvirtError error_key(:no_box_virtual_size) end class NoDiskVirtualSizeSet < VagrantLibvirtError error_key(:no_disk_virtual_size) end class NoBoxFormatSet < VagrantLibvirtError error_key(:no_box_format) end class WrongBoxFormatSet < VagrantLibvirtError error_key(:wrong_box_format) end class WrongDiskFormatSet < VagrantLibvirtError error_key(:wrong_disk_format) end # Fog Libvirt exceptions class FogError < VagrantLibvirtError error_key(:fog_error) end class FogLibvirtConnectionError < VagrantLibvirtError error_key(:fog_libvirt_connection_error) end class FogCreateVolumeError < VagrantLibvirtError error_key(:fog_create_volume_error) end class FogCreateDomainVolumeError < VagrantLibvirtError error_key(:fog_create_domain_volume_error) end class FogCreateServerError < VagrantLibvirtError error_key(:fog_create_server_error) end # Network exceptions class ManagementNetworkError < VagrantLibvirtError error_key(:management_network_error) end class NetworkNameAndAddressMismatch < VagrantLibvirtError error_key(:network_name_and_address_mismatch) end class DHCPMismatch < VagrantLibvirtError error_key(:dhcp_mismatch) end class CreateNetworkError < VagrantLibvirtError error_key(:create_network_error) end class DestroyNetworkError < VagrantLibvirtError error_key(:destroy_network_error) end class NetworkNotAvailableError < VagrantLibvirtError error_key(:network_not_available_error) end class AutostartNetworkError < VagrantLibvirtError error_key(:autostart_network_error) end class ActivateNetworkError < VagrantLibvirtError error_key(:activate_network_error) end class TunnelPortNotDefined < VagrantLibvirtError error_key(:tunnel_port_not_defined) end class ManagementNetworkRequired < VagrantLibvirtError error_key(:management_network_required) end # Other exceptions class InterfaceSlotNotAvailable < VagrantLibvirtError error_key(:interface_slot_not_available) end class InterfaceSlotExhausted < VagrantLibvirtError error_key(:interface_slot_exhausted) end class RsyncError < VagrantLibvirtError error_key(:rsync_error) end class DomainNameExists < VagrantLibvirtError error_key(:domain_name_exists) end class NoDomainError < VagrantLibvirtError error_key(:no_domain_error) end class AttachDeviceError < VagrantLibvirtError error_key(:attach_device_error) end class DetachDeviceError < VagrantLibvirtError error_key(:detach_device_error) end class NoIpAddressError < VagrantLibvirtError error_key(:no_ip_address_error) end class DeleteSnapshotError < VagrantLibvirtError error_key(:delete_snapshot_error) end class SerialCannotCreatePathError < VagrantLibvirtError error_key(:serial_cannot_create_path_error) end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/plugin.rb000066400000000000000000000064171414232526500223510ustar00rootroot00000000000000# frozen_string_literal: true begin require 'vagrant' rescue LoadError raise 'The Vagrant Libvirt plugin must be run within Vagrant.' end # compatibility fix to define constant not available Vagrant <1.6 ::Vagrant::MachineState::NOT_CREATED_ID ||= :not_created module VagrantPlugins module ProviderLibvirt class Plugin < Vagrant.plugin('2') name 'libvirt' description <<-DESC Vagrant plugin to manage VMs in Libvirt. DESC config('libvirt', :provider) do require_relative 'config' Config end provider('libvirt', parallel: true, box_optional: true) do require_relative 'provider' Provider end action_hook(:remove_libvirt_image) do |hook| require_relative 'action' hook.after Vagrant::Action::Builtin::BoxRemove, Action.remove_libvirt_image end guest_capability('linux', 'mount_9p_shared_folder') do require_relative 'cap/mount_9p' Cap::Mount9P end guest_capability('linux', 'mount_virtiofs_shared_folder') do require_relative 'cap/mount_virtiofs' Cap::MountVirtioFS end provider_capability(:libvirt, :nic_mac_addresses) do require_relative 'cap/nic_mac_addresses' Cap::NicMacAddresses end provider_capability(:libvirt, :public_address) do require_relative 'cap/public_address' Cap::PublicAddress end # lower priority than nfs or rsync # https://github.com/vagrant-libvirt/vagrant-libvirt/pull/170 synced_folder('9p', 4) do require_relative 'cap/synced_folder_9p' VagrantPlugins::SyncedFolder9P::SyncedFolder end synced_folder('virtiofs', 5) do require_relative 'cap/synced_folder_virtiofs' VagrantPlugins::SyncedFolderVirtioFS::SyncedFolder end # This initializes the internationalization strings. def self.setup_i18n I18n.load_path << File.expand_path('locales/en.yml', ProviderLibvirt.source_root) I18n.reload! end # This sets up our log level to be whatever VAGRANT_LOG is. def self.setup_logging require 'log4r' level = nil begin level = Log4r.const_get(ENV['VAGRANT_LOG'].upcase) rescue NameError # This means that the logging constant wasn't found, # which is fine. We just keep `level` as `nil`. But # we tell the user. level = nil end # Some constants, such as "true" resolve to booleans, so the # above error checking doesn't catch it. This will check to make # sure that the log level is an integer, as Log4r requires. level = nil unless level.is_a?(Integer) # Set the logging level on all "vagrant" namespaced # logs as long as we have a valid level. if level logger = Log4r::Logger.new('vagrant_libvirt') logger.outputters = Log4r::Outputter.stderr logger.level = level logger = nil end end # Setup logging and i18n before any autoloading loads other classes # with logging configured as this prevents inheritance of the log level # from the parent logger. setup_logging setup_i18n end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/provider.rb000066400000000000000000000103601414232526500226750ustar00rootroot00000000000000# frozen_string_literal: true require 'vagrant' module VagrantPlugins module ProviderLibvirt autoload :Driver, 'vagrant-libvirt/driver' # This is the base class for a provider for the V2 API. A provider # is responsible for creating compute resources to match the # needs of a Vagrant-configured system. class Provider < Vagrant.plugin('2', :provider) def initialize(machine) @machine = machine raise 'REQUIRE USE RUBY >= 1.9.3 VERSION' if RUBY_VERSION < '1.9.3' end # This should return an action callable for the given name. def action(name) # Attempt to get the action method from the Action class if it # exists, otherwise return nil to show that we don't support the # given action. action_method = "action_#{name}" return Action.send(action_method) if Action.respond_to?(action_method) nil end def driver return @driver if @driver @driver = Driver.new(@machine) end # This method is called if the underying machine ID changes. Providers # can use this method to load in new data for the actual backing # machine or to realize that the machine is now gone (the ID can # become `nil`). def machine_id_changed; end # This should return a hash of information that explains how to # SSH into the machine. If the machine is not at a point where # SSH is even possible, then `nil` should be returned. def ssh_info # Return the ssh_info if already retrieved otherwise call the driver # and save the result. # # Ssh info has following format.. # # { # :host => "1.2.3.4", # :port => "22", # :username => "mitchellh", # :private_key_path => "/path/to/my/key" # } # note that modifing @machine.id or accessing @machine.state is not # thread safe, so be careful to avoid these here as this method may # be called from other threads of execution. return nil if state.id != :running ip = driver.get_ipaddress(@machine) # if can't determine the IP, just return nil and let the core # deal with it, similar to the docker provider return nil unless ip ssh_info = { host: ip, port: @machine.config.ssh.guest_port, forward_agent: @machine.config.ssh.forward_agent, forward_x11: @machine.config.ssh.forward_x11 } ssh_info[:proxy_command] = @machine.provider_config.proxy_command if @machine.provider_config.proxy_command ssh_info end def mac_addresses # Run a custom action called "read_mac_addresses" which will return # a list of mac addresses used by the machine. The returned data will # be in the following format: # # { # : # } env = @machine.action('read_mac_addresses') env[:machine_mac_addresses] end # This should return the state of the machine within this provider. # The state must be an instance of {MachineState}. def state state_id = nil state_id = :not_created unless @machine.id state_id = :not_created if !state_id && (!@machine.id || !driver.created?(@machine)) # Query the driver for the current state of the machine state_id = driver.state(@machine) if @machine.id && !state_id state_id = :unknown unless state_id # This is a special pseudo-state so that we don't set the # NOT_CREATED_ID while we're setting up the machine. This avoids # clearing the data dir. state_id = :preparing if @machine.id == 'preparing' # Get the short and long description short = state_id.to_s.tr('_', ' ') long = I18n.t("vagrant_libvirt.states.#{state_id}") # If we're not created, then specify the special ID flag if state_id == :not_created state_id = Vagrant::MachineState::NOT_CREATED_ID end # Return the MachineState object Vagrant::MachineState.new(state_id, short, long) end def to_s id = @machine.id.nil? ? 'new' : @machine.id "Libvirt (#{id})" end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/templates/000077500000000000000000000000001414232526500225145ustar00rootroot00000000000000vagrant-libvirt-0.7.0/lib/vagrant-libvirt/templates/default_storage_pool.xml.erb000066400000000000000000000004431414232526500302070ustar00rootroot00000000000000 default <%= @storage_pool_path %> 0755 <%= @storage_pool_uid %> <%= @storage_pool_gid %> vagrant-libvirt-0.7.0/lib/vagrant-libvirt/templates/domain.xml.erb000066400000000000000000000255031414232526500252610ustar00rootroot00000000000000 <%= @name %> <%= @title %> <%= @description %> <%= @uuid %> <%= @memory_size %> cpuset='<%= @cpuset %>'<% end %>><%= @cpus %> <% if @cpu_mode != 'host-passthrough' %> <% if @cpu_mode == 'custom' %><%= @cpu_model %><% end %> <% if @nested %> <% if @cpu_features.select{|x| x[:name] == 'vmx'}.empty? %> <% end %> <% if @cpu_features.select{|x| x[:name] == 'svm'}.empty? %> <% end %> <% end %> <% @cpu_features.each do |cpu_feature| %> <% end %> <% unless @cpu_topology.empty? %> <%# CPU topology -%> <% end %> <% end %> <% if @numa_nodes %> <% @numa_nodes.each_with_index do |node, index| %> <% end %> <% end %> <%- if @nodeset -%> <%- end -%> <% unless @memory_backing.empty? %> <% @memory_backing.each do |backing| %> <<%= backing[:name] %> <%= backing[:config].map { |k,v| "#{k}='#{v}'"}.join(' ') %>/> <% end %> <% end%> <% if @shares %> <%= @shares %> <% end %> <% if @machine_type %> <% if @machine_arch %> hvm <% else %> hvm <% end %> <% else %> <% if @machine_arch %> hvm <% else %> hvm <% end %> <% end %> <% if @loader %> <% if @nvram %> <%= @loader %> <% else %> <%= @loader %> <% end %> <% end %> <% if @nvram %> <%= @nvram %> <% end %> <% if @boot_order.count >= 1 %> <% end %> <%= @kernel %> <%= @initrd %> <%= @cmd_line %> <% if @dtb %> <%= @dtb %> <% end %> <% @features.each do |feature| %> <<%= feature %>/> <% end %> <% if @kvm_hidden %> <% end %> <% if !@features_hyperv.empty? %> <% @features_hyperv.each do |feature| %> <<%= feature[:name] %> state='<%= feature[:state] %>'<% if feature[:name] == 'spinlocks' %> retries='<%= feature[:retries] %>'<% end %> /> <% end %> <% end %> <% @clock_timers.each do |clock_timer| %> <%= attr %>='<%= value %>'<% end %>/> <% end %> <% if @emulator_path %> <%= @emulator_path %> <% end %> <% @domain_volumes.each_with_index do |volume, index| -%> /> <%# we need to ensure a unique target dev -%> <% end -%> <%# additional disks -%> <% @disks.each_with_index do |d, index| -%> /> <% if d[:shareable] %> <% end %> <% if d[:serial] %> <%= d[:serial] %> <% end %> <% if d[:wwn] %> <%= d[:wwn] %> <% end %> <%# this will get auto generated by Libvirt
-%> <% end -%> <% @cdroms.each do |c| %> <% end %> <% @serials.each_with_index do |serial, port| %> <% unless serial[:source].nil? %> <% end %> <% end %> <% console_log = @serials.first %> <% unless console_log[:source].nil? %> <% end %> <% @channels.each do |channel| %> <%if channel[:source_mode] or channel[:source_path] %> mode='<%= channel[:source_mode] %>' <% end %> <% if channel[:source_path] %> path="<%= channel[:source_path] %>" <% end %> /> <% end %> name="<%= channel[:target_name] %>" <% end %> <% if channel[:target_address] %> address="<%= channel[:target_address] %>" <% end %> <% if channel[:target_port] %> port="<%= channel[:target_port] %>" <% end %> /> <% end %> <% @inputs.each do |input| %> <% end %> <% if !@sound_type.nil? %> <%# Sound device-%> <%# End Sound%> <% end %> <% if @graphics_type != 'none' %> <%# Video device -%> <% if not @graphics_gl %>/><% else %>> <% end %> <%#End Video -%> <% end %> <% if @rng[:model] == "random"%> /dev/random <% end %> <% @pcis.each do |pci| %>
<% end %> <% @usbs.each do |usb| %> <% if usb[:vendor] %> <% end %> <% if usb[:product] %> <% end %> <% if usb[:bus] && usb[:device] %>
<% end %> <% end %> <% unless @redirdevs.empty? %> <% @redirdevs.each do |redirdev| %> <% end %> <% unless @redirfilters.empty? %> <% @redirfilters.each do |usbdev| %> <% end %> <% end %> <% end %> <% unless @watchdog_dev.empty? %> <%# Watchdog Device -%> <% end %> <% unless @smartcard_dev.empty? -%> <% if @smartcard_dev[:mode] == 'passthrough' %> <% if @smartcard_dev[:type] == 'tcp' %> <% else %> <% end %> <% end %> <% end -%> <% if @tpm_path || @tpm_version -%> <%# TPM Device -%> version='<%= @tpm_version %>'<% end %>> <% if @tpm_path -%> <% end -%> <% end -%> <% if not @usbctl_dev.empty? %> <%# USB Controller -%> /> <% end %> <% unless @memballoon_enabled.nil? %> <% if @memballoon_enabled %>
<% else %> <% end %> <% end %> <% if not @qemu_args.empty? or not @qemu_env.empty? %> <% @qemu_args.each do |arg| %> <% end %> <% @qemu_env.each do |env_var, env_value| %> <% end %> <% end %> vagrant-libvirt-0.7.0/lib/vagrant-libvirt/templates/private_network.xml.erb000066400000000000000000000027521414232526500272360ustar00rootroot00000000000000 <%= @network_name %> <% if @network_domain_name %> <% end %> <% if @network_mtu %> <% end %> <% if (@network_forward_mode != 'none' && @network_forward_mode != 'veryisolated') %> <% if @network_forward_device %> <% else %> <% end %> <% end %> <% if @network_forward_mode != 'veryisolated' %> <% if @network_dhcp_enabled %> <% if @network_dhcp_bootp_file %> <% if @network_dhcp_bootp_server %> <% else %> <% end %> <% end %> <% end %> <% end %> <% if !@network_ipv6_address.nil? && !@network_ipv6_prefix.nil? %> <% end %> vagrant-libvirt-0.7.0/lib/vagrant-libvirt/templates/public_interface.xml.erb000066400000000000000000000020251414232526500273020ustar00rootroot00000000000000 trustGuestRxFilters='yes'<% end %>> <% if @mac %> <% end %> <%if @type == 'direct'%> <% elsif !@portgroup.nil? %> <% else %> <% end %> <% if @driver_name and @driver_queues %> <% elsif @driver_queues %> <% elsif @driver_name %> <% end %> <% if @ovs %> <% if @ovs_interfaceid %> <% end %> <% end %> <% if @pci_bus and @pci_slot %>
<% end %> vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util.rb000066400000000000000000000010231414232526500220140ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Util autoload :ErbTemplate, 'vagrant-libvirt/util/erb_template' autoload :Collection, 'vagrant-libvirt/util/collection' autoload :Timer, 'vagrant-libvirt/util/timer' autoload :NetworkUtil, 'vagrant-libvirt/util/network_util' autoload :StorageUtil, 'vagrant-libvirt/util/storage_util' autoload :ErrorCodes, 'vagrant-libvirt/util/error_codes' autoload :Ui, 'vagrant-libvirt/util/ui' end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/000077500000000000000000000000001414232526500214735ustar00rootroot00000000000000vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/byte_number.rb000066400000000000000000000016341414232526500243370ustar00rootroot00000000000000class ByteNumber < Numeric def initialize(int) @int = int end def to_s @int.to_s end def to_i @int end def to_f @int.to_f end def to_B to_i end def to_KB _compute_unit_to_n_kilo(1) end def to_MB _compute_unit_to_n_kilo(2) end def to_GB _compute_unit_to_n_kilo(3) end def coerce(other) to_i.coerce(other) end def <=>(other) to_i <=> other end def +(other) to_i + other end def -(other) to_i - other end def *(other) to_i * other end def /(other) to_i / other end def pow(n) self.class.new(to_i ** n) end def self.from_GB(value) self.new(value*(1024**3)) end private def _compute_unit_to_n_kilo(n=0) (to_f/(1024 ** n)).ceil end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/collection.rb000066400000000000000000000010431414232526500241510ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Util module Collection # This method finds a matching _thing_ in a collection of # _things_. This works matching if the ID or NAME equals to # `name`. Or, if `name` is a regexp, a partial match is chosen # as well. def self.find_matching(collection, name) collection.each do |single| return single if single.name == name end nil end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/erb_template.rb000066400000000000000000000016101414232526500244610ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Util module ErbTemplate # TODO: remove and use nokogiri builder def to_xml(template_name = nil, data = binding) erb = template_name || self.class.to_s.split('::').last.downcase path = File.join(File.dirname(__FILE__), '..', 'templates') template = "#{erb}.xml" # TODO: according to erubis documentation, we should rather use evaluate and forget about # binding since the template may then change variables values Vagrant::Util::TemplateRenderer.render_with(:render, template, template_root: path) do |renderer| iv = data.eval ("instance_variables.collect {|i| [i, instance_variable_get(i.to_sym)]}") iv.each {|k, v| renderer.instance_variable_set(k, v)} end end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/error_codes.rb000066400000000000000000000142071414232526500243320ustar00rootroot00000000000000# frozen_string_literal: true # Ripped from http://libvirt.org/html/libvirt-virterror.html#virErrorNumber. module VagrantPlugins module ProviderLibvirt module Util module ErrorCodes VIR_ERR_OK = 0 VIR_ERR_INTERNAL_ERROR = 1 # internal error VIR_ERR_NO_MEMORY = 2 # memory allocation failure VIR_ERR_NO_SUPPORT = 3 # no support for this function VIR_ERR_UNKNOWN_HOST = 4 # could not resolve hostname VIR_ERR_NO_CONNECT = 5 # can't connect to hypervisor VIR_ERR_INVALID_CONN = 6 # invalid connection object VIR_ERR_INVALID_DOMAIN = 7 # invalid domain object VIR_ERR_INVALID_ARG = 8 # invalid function argument VIR_ERR_OPERATION_FAILED = 9 # a command to hypervisor failed VIR_ERR_GET_FAILED = 10 # a HTTP GET command to failed VIR_ERR_POST_FAILED = 11 # a HTTP POST command to failed VIR_ERR_HTTP_ERROR = 12 # unexpected HTTP error code VIR_ERR_SEXPR_SERIAL = 13 # failure to serialize an S-Expr VIR_ERR_NO_XEN = 14 # could not open Xen hypervisor control VIR_ERR_XEN_CALL = 15 # failure doing an hypervisor call VIR_ERR_OS_TYPE = 16 # unknown OS type VIR_ERR_NO_KERNEL = 17 # missing kernel information VIR_ERR_NO_ROOT = 18 # missing root device information VIR_ERR_NO_SOURCE = 19 # missing source device information VIR_ERR_NO_TARGET = 20 # missing target device information VIR_ERR_NO_NAME = 21 # missing domain name information VIR_ERR_NO_OS = 22 # missing domain OS information VIR_ERR_NO_DEVICE = 23 # missing domain devices information VIR_ERR_NO_XENSTORE = 24 # could not open Xen Store control VIR_ERR_DRIVER_FULL = 25 # too many drivers registered VIR_ERR_CALL_FAILED = 26 # not supported by the drivers (DEPRECATED) VIR_ERR_XML_ERROR = 27 # an XML description is not well formed or broken VIR_ERR_DOM_EXIST = 28 # the domain already exist VIR_ERR_OPERATION_DENIED = 29 # operation forbidden on read-only connections VIR_ERR_OPEN_FAILED = 30 # failed to open a conf file VIR_ERR_READ_FAILED = 31 # failed to read a conf file VIR_ERR_PARSE_FAILED = 32 # failed to parse a conf file VIR_ERR_CONF_SYNTAX = 33 # failed to parse the syntax of a conf file VIR_ERR_WRITE_FAILED = 34 # failed to write a conf file VIR_ERR_XML_DETAIL = 35 # detail of an XML error VIR_ERR_INVALID_NETWORK = 36 # invalid network object VIR_ERR_NETWORK_EXIST = 37 # the network already exist VIR_ERR_SYSTEM_ERROR = 38 # general system call failure VIR_ERR_RPC = 39 # some sort of RPC error VIR_ERR_GNUTLS_ERROR = 40 # error from a GNUTLS call VIR_WAR_NO_NETWORK = 41 # failed to start network VIR_ERR_NO_DOMAIN = 42 # domain not found or unexpectedly disappeared VIR_ERR_NO_NETWORK = 43 # network not found VIR_ERR_INVALID_MAC = 44 # invalid MAC address VIR_ERR_AUTH_FAILED = 45 # authentication failed VIR_ERR_INVALID_STORAGE_POOL = 46 # invalid storage pool object VIR_ERR_INVALID_STORAGE_VOL = 47 # invalid storage vol object VIR_WAR_NO_STORAGE = 48 # failed to start storage VIR_ERR_NO_STORAGE_POOL = 49 # storage pool not found VIR_ERR_NO_STORAGE_VOL = 50 # storage volume not found VIR_WAR_NO_NODE = 51 # failed to start node driver VIR_ERR_INVALID_NODE_DEVICE = 52 # invalid node device object VIR_ERR_NO_NODE_DEVICE = 53 # node device not found VIR_ERR_NO_SECURITY_MODEL = 54 # security model not found VIR_ERR_OPERATION_INVALID = 55 # operation is not applicable at this time VIR_WAR_NO_INTERFACE = 56 # failed to start interface driver VIR_ERR_NO_INTERFACE = 57 # interface driver not running VIR_ERR_INVALID_INTERFACE = 58 # invalid interface object VIR_ERR_MULTIPLE_INTERFACES = 59 # more than one matching interface found VIR_WAR_NO_NWFILTER = 60 # failed to start nwfilter driver VIR_ERR_INVALID_NWFILTER = 61 # invalid nwfilter object VIR_ERR_NO_NWFILTER = 62 # nw filter pool not found VIR_ERR_BUILD_FIREWALL = 63 # nw filter pool not found VIR_WAR_NO_SECRET = 64 # failed to start secret storage VIR_ERR_INVALID_SECRET = 65 # invalid secret VIR_ERR_NO_SECRET = 66 # secret not found VIR_ERR_CONFIG_UNSUPPORTED = 67 # unsupported configuration construct VIR_ERR_OPERATION_TIMEOUT = 68 # timeout occurred during operation VIR_ERR_MIGRATE_PERSIST_FAILED = 69 # a migration worked, but making the VM persist on the dest host failed VIR_ERR_HOOK_SCRIPT_FAILED = 70 # a synchronous hook script failed VIR_ERR_INVALID_DOMAIN_SNAPSHOT = 71 # invalid domain snapshot VIR_ERR_NO_DOMAIN_SNAPSHOT = 72 # domain snapshot not found VIR_ERR_INVALID_STREAM = 73 # stream pointer not valid VIR_ERR_ARGUMENT_UNSUPPORTED = 74 # valid API use but unsupported by the given driver VIR_ERR_STORAGE_PROBE_FAILED = 75 # storage pool probe failed VIR_ERR_STORAGE_POOL_BUILT = 76 # storage pool already built VIR_ERR_SNAPSHOT_REVERT_RISKY = 77 # force was not requested for a risky domain snapshot revert VIR_ERR_OPERATION_ABORTED = 78 # operation on a domain was canceled/aborted by user VIR_ERR_AUTH_CANCELLED = 79 # authentication cancelled VIR_ERR_NO_DOMAIN_METADATA = 80 # The metadata is not present VIR_ERR_MIGRATE_UNSAFE = 81 # Migration is not safe VIR_ERR_OVERFLOW = 82 # integer overflow VIR_ERR_BLOCK_COPY_ACTIVE = 83 # action prevented by block copy job VIR_ERR_OPERATION_UNSUPPORTED = 84 # The requested operation is not supported VIR_ERR_SSH = 85 # error in ssh transport driver VIR_ERR_AGENT_UNRESPONSIVE = 86 # guest agent is unresponsive, not running or not usable VIR_ERR_RESOURCE_BUSY = 87 # resource is already in use VIR_ERR_ACCESS_DENIED = 88 # operation on the object/resource was denied VIR_ERR_DBUS_SERVICE = 89 # error from a dbus service VIR_ERR_STORAGE_VOL_EXIST = 90 # the storage vol already exists end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/network_util.rb000066400000000000000000000171351414232526500245550ustar00rootroot00000000000000# frozen_string_literal: true require 'ipaddr' require 'nokogiri' require 'vagrant/util/network_ip' class IPAddr def get_mask if @addr _to_string(@mask_addr) end end end module VagrantPlugins module ProviderLibvirt module Util module NetworkUtil include Vagrant::Util::NetworkIP def configured_networks(env, logger) qemu_use_session = env[:machine].provider_config.qemu_use_session qemu_use_agent = env[:machine].provider_config.qemu_use_agent management_network_device = env[:machine].provider_config.management_network_device management_network_name = env[:machine].provider_config.management_network_name management_network_address = env[:machine].provider_config.management_network_address management_network_mode = env[:machine].provider_config.management_network_mode management_network_mac = env[:machine].provider_config.management_network_mac management_network_guest_ipv6 = env[:machine].provider_config.management_network_guest_ipv6 management_network_autostart = env[:machine].provider_config.management_network_autostart management_network_pci_bus = env[:machine].provider_config.management_network_pci_bus management_network_pci_slot = env[:machine].provider_config.management_network_pci_slot management_network_domain = env[:machine].provider_config.management_network_domain management_network_mtu = env[:machine].provider_config.management_network_mtu logger.info "Using #{management_network_name} at #{management_network_address} as the management network #{management_network_mode} is the mode" begin management_network_ip = IPAddr.new(management_network_address) rescue ArgumentError raise Errors::ManagementNetworkError, error_message: "#{management_network_address} is not a valid IP address" end # capture address into $1 and mask into $2 management_network_ip.inspect =~ /IPv4:(.*)\/(.*)>/ if Regexp.last_match(2) == '255.255.255.255' raise Errors::ManagementNetworkError, error_message: "#{management_network_address} does not include both an address and subnet mask" end if qemu_use_session management_network_options = { iface_type: :public_network, dev: management_network_device, mode: 'bridge', type: 'bridge', bus: management_network_pci_bus, slot: management_network_pci_slot } else management_network_options = { iface_type: :private_network, network_name: management_network_name, ip: Regexp.last_match(1), netmask: Regexp.last_match(2), dhcp_enabled: true, forward_mode: management_network_mode, guest_ipv6: management_network_guest_ipv6, autostart: management_network_autostart, bus: management_network_pci_bus, slot: management_network_pci_slot } end unless management_network_mac.nil? management_network_options[:mac] = management_network_mac end unless management_network_domain.nil? management_network_options[:domain_name] = management_network_domain end unless management_network_mtu.nil? management_network_options[:mtu] = management_network_mtu end unless management_network_pci_bus.nil? and management_network_pci_slot.nil? management_network_options[:bus] = management_network_pci_bus management_network_options[:slot] = management_network_pci_slot end if (env[:machine].config.vm.box && !env[:machine].provider_config.mgmt_attach) raise Errors::ManagementNetworkRequired end # add management network to list of networks to check # unless mgmt_attach set to false networks = if env[:machine].provider_config.mgmt_attach [management_network_options] else [] end env[:machine].config.vm.networks.each do |type, original_options| logger.debug "In config found network type #{type} options #{original_options}" # Options can be specified in Vagrantfile in short format (:ip => ...), # or provider format # (:libvirt__network_name => ...). # https://github.com/mitchellh/vagrant/blob/master/lib/vagrant/util/scoped_hash_override.rb options = scoped_hash_override(original_options, :libvirt) # store type in options # use default values if not already set options = { iface_type: type, netmask: options[:network_address] ? IPAddr.new(options[:network_address]).get_mask : '255.255.255.0', dhcp_enabled: true, forward_mode: 'nat', always_destroy: true }.merge(options) if options[:type].to_s == 'dhcp' && options[:ip].nil? options[:network_name] = options[:network_name] ? options[:network_name] : 'vagrant-private-dhcp' end # add to list of networks to check networks.push(options) end networks end # Return a list of all (active and inactive) Libvirt networks as a list # of hashes with their name, network address and status (active or not) def libvirt_networks(libvirt_client) libvirt_networks = [] active = libvirt_client.list_networks inactive = libvirt_client.list_defined_networks # Iterate over all (active and inactive) networks. active.concat(inactive).each do |network_name| libvirt_network = libvirt_client.lookup_network_by_name( network_name ) # Parse ip address and netmask from the network xml description. xml = Nokogiri::XML(libvirt_network.xml_desc) ip = xml.xpath('/network/ip/@address').first ip = ip.value if ip netmask = xml.xpath('/network/ip/@netmask').first netmask = netmask.value if netmask dhcp_enabled = if xml.at_xpath('//network/ip/dhcp') true else false end domain_name = xml.at_xpath('/network/domain/@name') domain_name = domain_name.value if domain_name # Calculate network address of network from ip address and # netmask. network_address = (network_address(ip, netmask) if ip && netmask) libvirt_networks << { name: network_name, ip_address: ip, netmask: netmask, network_address: network_address, dhcp_enabled: dhcp_enabled, bridge_name: libvirt_network.bridge_name, domain_name: domain_name, created: true, active: libvirt_network.active?, autostart: libvirt_network.autostart?, libvirt_network: libvirt_network } end libvirt_networks end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/nfs.rb000066400000000000000000000010271414232526500226060ustar00rootroot00000000000000# frozen_string_literal: true require 'vagrant/action/builtin/mixin_synced_folders' module VagrantPlugins module ProviderLibvirt module Util module Nfs include Vagrant::Action::Builtin::MixinSyncedFolders # We're using NFS if we have any synced folder with NFS configured. If # we are not using NFS we don't need to do the extra work to # populate these fields in the environment. def using_nfs? !!synced_folders(@machine)[:nfs] end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/storage_util.rb000066400000000000000000000013461414232526500245250ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Util module StorageUtil def storage_uid(env) env[:machine].provider_config.qemu_use_session ? Process.uid : 0 end def storage_gid(env) env[:machine].provider_config.qemu_use_session ? Process.gid : 0 end def storage_pool_path(env) if env[:machine].provider_config.storage_pool_path env[:machine].provider_config.storage_pool_path elsif env[:machine].provider_config.qemu_use_session File.expand_path('~/.local/share/libvirt/images') else '/var/lib/libvirt/images' end end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/timer.rb000066400000000000000000000006161414232526500231430ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Util class Timer # A basic utility method that times the execution of the given # block and returns it. def self.time start_time = Time.now.to_f yield end_time = Time.now.to_f end_time - start_time end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/util/ui.rb000066400000000000000000000013031414232526500224320ustar00rootroot00000000000000# frozen_string_literal: true module VagrantPlugins module ProviderLibvirt module Util module Ui # Since v2.2.8 Vagrant support --no-tty option, which silences # progress bars and other interactive elements for cleaner logs # in scripts, but requires a slight change in UI object handling. # This helper allows the vagrant-libvirt plugin to stay compatible # with the older Vagrant versions. # See: https://github.com/hashicorp/vagrant/pull/11465/ def rewriting(ui) if ui.respond_to?(:rewriting) ui.rewriting {|rw| yield rw} else yield ui end end end end end end vagrant-libvirt-0.7.0/lib/vagrant-libvirt/version.rb000066400000000000000000000050371414232526500225350ustar00rootroot00000000000000# frozen_string_literal: true require 'open3' require 'tmpdir' module VagrantPlugins module ProviderLibvirt VERSION_FILE = File.dirname(__FILE__) + "/version" GIT_ARCHIVE_VERSION = "bb267e391049e22d872e5f700b5cd0aa2a4cdac7 HEAD -> master, tag: 0.7.0" HOMEPAGE = 'https://github.com/vagrant-libvirt/vagrant-libvirt' def self.get_version if File.exist?(VERSION_FILE) # built gem version = File.read(VERSION_FILE) elsif self.inside_git_repository # local repo git_version = `git describe --tags` version = self.version_from_describe(git_version) elsif !GIT_ARCHIVE_VERSION.start_with?('$Format') # archive - format string replaced during export hash, refs = GIT_ARCHIVE_VERSION.split(' ', 2) tag = refs.split(',').select { |ref| ref.strip.start_with?("tag:") }.first if tag != nil # tagged version = tag.strip.split(' ').last else version = "" # arbitrary branch/commit Dir.mktmpdir do |dir| stdout_and_stderr, status = Open3.capture2e("git -C #{dir} clone --bare #{HOMEPAGE}") raise "failed to clone original to resolve version: #{stdout_and_stderr}" unless status.success? stdout_and_stderr, status = Open3.capture2e("git --git-dir=#{dir}/vagrant-libvirt.git describe --tags #{hash}") raise "failed to determine version for #{hash}: #{stdout_and_stderr}" unless status.success? version = version_from_describe(stdout_and_stderr) end # in this case write the version file to avoid cloning a second time File.write(VERSION_FILE, version) end else # no idea version = "9999" end return version.freeze end def self.write_version File.write(VERSION_FILE, self.get_version) end private def self.inside_git_repository _, status = Open3.capture2e("git rev-parse --git-dir") status.success? end def self.version_from_describe(describe) version_parts = describe.split('-').first(2) # drop the git sha if it exists if version_parts.length > 1 # increment the patch number so that this is marked as a pre-release of the # next possible release main_version_parts = Gem::Version.new(version_parts[0]).segments main_version_parts[-1] = main_version_parts.last + 1 version_parts = main_version_parts + ["pre", version_parts[1]] end version = version_parts.join(".") end end end vagrant-libvirt-0.7.0/locales/000077500000000000000000000000001414232526500162575ustar00rootroot00000000000000vagrant-libvirt-0.7.0/locales/en.yml000066400000000000000000000201511414232526500174030ustar00rootroot00000000000000en: vagrant_libvirt: already_created: |- The domain is already created. not_created: |- Domain is not created. Please run `vagrant up` first. not_running: |- Domain is not running. Please run `vagrant up` or `vagrant resume` first. not_suspended: |- Domain is not suspended. finding_volume: |- Checking if volume is available. creating_domain: |- Creating domain with the following settings... manual_resize_required: |- Created volume larger than box defaults, will require manual resizing of filesystems to utilize. box_version_missing: |- No verison detected for %{name}, using timestamp to watch for modifications. Consider generating a local metadata for the box with a version to allow better handling. See https://www.vagrantup.com/docs/boxes/format#box-metadata for further details. uploading_volume: |- Uploading base box image as volume into Libvirt storage... creating_domain_volume: |- Creating image (snapshot of base box volume). removing_domain_volume: |- Removing image (snapshot of base box volume). starting_domain: |- Starting domain. terminating: |- Removing domain... poweroff_domain: |- Poweroff domain. destroy_domain: |- Removing domain... shutdown_domain: |- Attempting direct shutdown of domain... halt_domain: |- Halting domain... resuming_domain: |- Resuming domain... suspending_domain: |- Suspending domain... package_domain: |- Packaging domain... waiting_for_ready: |- Waiting for domain to become "ready"... waiting_for_ip: |- Waiting for domain to get an IP address... waiting_for_ssh: |- Waiting for SSH to become available... booted: |- Machine is booted. rsync_folder: |- Rsyncing folder: %{hostpath} => %{guestpath} ready: |- Machine is booted and ready for use! remove_stale_volume: |- Remove stale volume... warnings: ignoring_virtual_size_too_small: |- Ignoring requested virtual disk size of '%{requested}' as it is below the minimum box image size of '%{minimum}'. forwarding_udp: |- Forwarding UDP ports is not supported. Ignoring. creating_domain_console_access_disabled: |- Serial console is being redirected, access via virsh will be disabled. errors: call_chain_error: Invalid action chain, must ensure that '%{require_action}' is called prior to calling '%{current_action}' package_not_supported: No support for package with Libvirt. Create box manually. fog_error: |- There was an error talking to Libvirt. The error message is shown below: %{message} no_matching_volume: |- No matching volume was found! Please check your volume setting to make sure you have a valid volume chosen. no_storage_pool: |- No usable storage pool found! Please check if storage pool is created and available. box_format_duplicate_volume: |- Encountered a duplicate volume name '%{volume}' generated for disk '%{new_disk}', due to already allocated for disk '%{orig_disk}'. box_format_missing_attribute: |- Invalid box metadata, missing expected attribute: '%{attribute}' bad_box_image: |- Received error when query the box image details from '%{image}'. Stdout: %{out} Stderr: %{err} no_box_volume: |- Volume for box image is missing in storage pools. Try to run vagrant again, or check if storage volume is accessible. domain_volume_exists: |- Volume for domain is already created. Please run 'vagrant destroy' first. no_domain_volume: |- Volume for domain is missing. Try to run 'vagrant up' again. interface_slot_not_available: |- Interface adapter number is already in use. Please specify other adapter number. interface_slot_exhausted: |- Available interface adapters have been exhausted. Please increase the nic_adapter_count. rsync_error: |- There was an error when attempting to rsync a share folder. Please inspect the error message below for more info. Host path: %{hostpath} Guest path: %{guestpath} Error: %{stderr} no_box_virtual_size: |- No image virtual size specified for box. no_disk_virtual_size: |- No image virtual size specified for disk with index %{disk_index}. no_box_format: |- No image format specified for box. wrong_box_format: |- Wrong image format specified for box. wrong_disk_format: |- Wrong image format specified for disk with index %{disk_index}. fog_libvirt_connection_error: |- Error while connecting to Libvirt: %{error_message} fog_create_volume_error: |- Error while creating a storage pool volume: %{error_message} fog_create_domain_volume_error: |- Error while creating volume for domain: %{error_message} fog_create_server_error: |- Error while creating domain: %{error_message} domain_name_exists: |- Name `%{domain_name}` of domain about to create is already taken. Please try to run `vagrant up` command again. creating_storage_pool_error: |- There was error while creating Libvirt storage pool: %{error_message} image_upload_error: |- Error while uploading image to storage pool: %{error_message} image_download_error: |- Error while downloading volume '%{volume_name}' from storage pool '%{pool_name}': %{error_message} no_domain_error: |- No domain found. %{error_message} attach_device_error: |- Error while attaching new device to domain. %{error_message} detach_device_error: |- Error while detaching device from domain. %{error_message} no_ip_address_error: |- No IP address found. management_network_error: |- Error in specification of management network: %{error_message}. network_name_and_address_mismatch: |- Address %{ip_address} does not match with network name %{network_name}. Please fix your configuration and run vagrant again. dhcp_mismatch: |- Network %{network_name} exists but does not have dhcp %{requested}. Please fix your configuration and run vagrant again. create_network_error: |- Error occurred while creating new network: %{error_message}. network_not_available_error: |- Network %{network_name} is not available. Specify available network name, or an ip address if you want to create a new network. activate_network_error: |- Error while activating network: %{error_message}. autostart_network_error: |- Error while setting up autostart on network: %{error_message}. destroy_network_error: |- Error while removing network %{network_name}. %{error_message}. delete_snapshot_error: |- Error while deleting snapshot: %{error_message}. tunnel_port_not_defined: |- tunnel UDP or TCP port not defined. management_network_required: |- Management network can't be disabled when VM use box. Please fix your configuration and run vagrant again. serial_cannot_create_path_error: |- Error creating path for serial port output log: %{path} states: paused: |- The Libvirt domain is suspended. Run `vagrant resume` to resume it. shutting_down: |- The Libvirt domain is shutting down. Wait for it to complete and then run `vagrant up` to start it or `vagrant destroy` to remove. shutoff: |- The Libvirt domain is not running. Run `vagrant up` to start it. not_created: |- The Libvirt domain is not created. Run `vagrant up` to create it. running: |- The Libvirt domain is running. To stop this machine, you can run `vagrant halt`. To destroy the machine, you can run `vagrant destroy`. preparing: |- The vagrant machine is being prepared for creation, please wait for it to reach a steady state before issuing commands on it. vagrant-libvirt-0.7.0/spec/000077500000000000000000000000001414232526500155675ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/spec_helper.rb000066400000000000000000000027241414232526500204120ustar00rootroot00000000000000# frozen_string_literal: true require 'simplecov' require 'simplecov-lcov' # patch simplecov configuration if ! SimpleCov::Configuration.method_defined? :branch_coverage? module SimpleCov module Configuration def branch_coverage? return false end end end end SimpleCov::Formatter::LcovFormatter.config do |config| config.report_with_single_file = true config.single_report_path = 'coverage/lcov.info' end SimpleCov.formatters = SimpleCov::Formatter::MultiFormatter.new( [ SimpleCov::Formatter::HTMLFormatter, SimpleCov::Formatter::LcovFormatter, ] ) SimpleCov.start do add_filter 'spec/' end require 'vagrant-libvirt' require 'support/environment_helper' require 'vagrant-spec/unit' Dir[File.dirname(__FILE__) + '/support/**/*.rb'].each { |f| require f } RSpec.configure do |config| # ensure that setting of LIBVIRT_DEFAULT_URI in the environment is not picked # up directly by tests, instead they must set as needed. Some build envs will # may have it set to 'qemu:///session'. config.before(:suite) do ENV.delete('LIBVIRT_DEFAULT_URI') end config.mock_with :rspec do |mocks| # This option should be set when all dependencies are being loaded # before a spec run, as is the case in a typical spec helper. It will # cause any verifying double instantiation for a class that does not # exist to raise, protecting against incorrectly spelt names. mocks.verify_doubled_constant_names = true end end vagrant-libvirt-0.7.0/spec/support/000077500000000000000000000000001414232526500173035ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/support/binding_proc.rb000066400000000000000000000011111414232526500222570ustar00rootroot00000000000000# frozen_string_literal: true ## # A simple extension of the Proc class that supports setting a custom binding # and evaluates everything in the Proc using the new binding. class ProcWithBinding < Proc ## # Set the binding for this instance def apply_binding(bind, *args) @binding = bind instance_exec(*args, &self) end def method_missing(method, *args) begin method_from_binding = eval("method(#{method.inspect})", @binding) return method_from_binding.call(*args) rescue NameError # fall through on purpose end super end end vagrant-libvirt-0.7.0/spec/support/environment_helper.rb000066400000000000000000000015141414232526500235340ustar00rootroot00000000000000# frozen_string_literal: true require 'ostruct' require 'pathname' class EnvironmentHelper attr_writer :domain_name attr_accessor :random_hostname, :name, :default_prefix def [](value) send(value.to_sym) end def cpus 4 end def memory 1024 end %w(cpus cpu_mode loader nvram boot_order machine_type disk_bus disk_device nested volume_cache kernel cmd_line initrd graphics_type graphics_autoport graphics_port graphics_ip graphics_passwd video_type video_vram keymap storage_pool_name disks cdroms driver).each do |name| define_method(name.to_sym) do nil end end def machine self end def provider_config self end def root_path Pathname.new('./spec/support/foo') end def domain_name # noop end def libvirt_compute OpenStruct.new(servers: []) end end vagrant-libvirt-0.7.0/spec/support/libvirt_context.rb000066400000000000000000000023511414232526500230500ustar00rootroot00000000000000# frozen_string_literal: true require 'fog/libvirt' require 'fog/libvirt/models/compute/server' require 'libvirt' shared_context 'libvirt' do include_context 'unit' let(:libvirt_context) { true } let(:id) { 'dummy-vagrant_dummy' } let(:connection) { double('connection') } let(:domain) { instance_double('::Fog::Libvirt::Compute::Server') } let(:libvirt_client) { instance_double('::Libvirt::Connect') } let(:libvirt_domain) { instance_double('::Libvirt::Domain') } let(:logger) { double('logger') } def connection_result(options = {}) result = options.fetch(:result, nil) double('connection_result' => result) end before (:each) do # we don't want unit tests to ever run commands on the system; so we wire # in a double to ensure any unexpected messages raise exceptions stub_const('::Fog::Compute', connection) # drivers also call vm_exists? during init; allow(connection).to receive(:servers) .and_return(connection_result(result: nil)) allow(connection).to receive(:client).and_return(libvirt_client) allow(machine).to receive(:id).and_return(id) allow(Log4r::Logger).to receive(:new).and_return(logger) end end vagrant-libvirt-0.7.0/spec/support/matchers/000077500000000000000000000000001414232526500211115ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/support/matchers/have_file_content.rb000066400000000000000000000035121414232526500251130ustar00rootroot00000000000000# frozen_string_literal: true require "rspec/expectations/version" # # Taken from https://github.com/cucumber/aruba/blob/main/lib/aruba/matchers/file/have_file_content.rb # with minor modifications # # @!method have_file_content(content) # This matchers checks if has content. `content` can be a string, # regexp or an RSpec matcher. # # @param [String, Regexp, Matcher] content # Specifies the content of the file # # @return [Boolean] The result # # false: # * if file does not exist # * if file content is not equal string # * if file content does not include regexp # * if file content does not match the content specification # # true: # * if file content includes regexp # * if file content is equal string # * if file content matches the content specification # # @example Use matcher with string # # RSpec.describe do # it { expect(file1).to have_file_content('a') } # end # # @example Use matcher with regexp # # RSpec.describe do # it { expect(file1).to have_file_content(/a/) } # end # # @example Use matcher with an RSpec matcher # # RSpec.describe do # it { expect(file1).to have_file_content(a_string_starting_with 'a') } # it { expect(files1).to include a_file_having_content(a_string_starting_with 'a') } # end RSpec::Matchers.define :have_file_content do |expected| match do |actual| next false unless File.exists?(actual) @actual = File.read(actual).chomp @expected = if expected.is_a? String expected.chomp else expected end values_match?(@expected, @actual) end diffable if expected.is_a? String description { "have file content: #{description_of expected}" } end RSpec::Matchers.alias_matcher :a_file_having_content, :have_file_content vagrant-libvirt-0.7.0/spec/support/sharedcontext.rb000066400000000000000000000024731414232526500225110ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' shared_context 'unit' do include_context 'vagrant-unit' let(:vagrantfile_providerconfig) { '' } let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.box = "vagrant-libvirt/test" config.vm.define :test config.vm.provider :libvirt do |libvirt| #{vagrantfile_providerconfig} end end EOF end let(:test_env) do test_env = isolated_environment test_env.vagrantfile vagrantfile test_env end let(:env) { { env: iso_env, machine: machine, ui: ui, root_path: '/rootpath' } } let(:conf) { Vagrant::Config::V2::DummyConfig.new } let(:ui) { Vagrant::UI::Silent.new } let(:iso_env) { test_env.create_vagrant_env ui_class: Vagrant::UI::Basic } let(:machine) { iso_env.machine(:test, :libvirt) } # Mock the communicator to prevent SSH commands for being executed. let(:communicator) { double('communicator') } # Mock the guest operating system. let(:guest) { double('guest') } let(:app) { ->(env) {} } let(:plugin) { register_plugin } before (:each) do allow(machine).to receive(:guest).and_return(guest) allow(machine).to receive(:communicate).and_return(communicator) end end vagrant-libvirt-0.7.0/spec/support/temporary_dir.rb000066400000000000000000000003161414232526500225100ustar00rootroot00000000000000# frozen_string_literal: true shared_context 'temporary_dir' do around do |example| Dir.mktmpdir("rspec-") do |dir| @temp_dir = dir example.run end end attr_reader :temp_dir end vagrant-libvirt-0.7.0/spec/unit/000077500000000000000000000000001414232526500165465ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/unit/action/000077500000000000000000000000001414232526500200235ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/unit/action/clean_machine_folder_spec.rb000066400000000000000000000033731414232526500254710ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'vagrant-libvirt/action/clean_machine_folder' describe VagrantPlugins::ProviderLibvirt::Action::CleanMachineFolder do subject { described_class.new(app, env) } include_context 'unit' describe '#call' do before do FileUtils.touch(File.join(machine.data_dir, "box.meta")) end context 'with default options' do it 'should verbosely remove the folder' do expect(ui).to receive(:info).with('Deleting the machine folder') expect(subject.call(env)).to be_nil expect(File.exists?(machine.data_dir)).to eq(true) expect(Dir.entries(machine.data_dir)).to match_array([".", ".."]) end end context 'when the data dir doesn\'t exist' do before do Dir.mktmpdir do |d| # returns a temporary directory that has been already deleted when running expect(machine).to receive(:data_dir).and_return(d.to_s).exactly(3).times end end it 'should remove the folder' do expect(ui).to receive(:info).with('Deleting the machine folder') expect(subject.call(env)).to be_nil expect(File.exists?(machine.data_dir)).to eq(true) expect(Dir.entries(machine.data_dir)).to match_array([".", ".."]) end end context 'with quiet option enabled' do subject { described_class.new(app, env, {:quiet => true}) } it 'should quietly remove the folder' do expect(ui).to_not receive(:info).with('Deleting the machine folder') expect(subject.call(env)).to be_nil expect(File.exists?(machine.data_dir)).to eq(true) expect(Dir.entries(machine.data_dir)).to match_array([".", ".."]) end end end end vagrant-libvirt-0.7.0/spec/unit/action/create_domain_spec.rb000066400000000000000000000155261414232526500241650ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' require 'vagrant-libvirt/errors' require 'vagrant-libvirt/util/byte_number' require 'vagrant-libvirt/action/create_domain' describe VagrantPlugins::ProviderLibvirt::Action::CreateDomain do subject { described_class.new(app, env) } include_context 'unit' include_context 'libvirt' let(:libvirt_client) { double('libvirt_client') } let(:servers) { double('servers') } let(:volumes) { double('volumes') } let(:domain_volume) { double('domain_volume') } let(:domain_xml) { File.read(File.join(File.dirname(__FILE__), File.basename(__FILE__, '.rb'), domain_xml_file)) } let(:storage_pool_xml) { File.read(File.join(File.dirname(__FILE__), File.basename(__FILE__, '.rb'), storage_pool_xml_file)) } let(:libvirt_storage_pool) { double('storage_pool') } describe '#call' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver) .to receive(:connection).and_return(connection) allow(connection).to receive(:client).and_return(libvirt_client) allow(connection).to receive(:servers).and_return(servers) allow(connection).to receive(:volumes).and_return(volumes) allow(volumes).to receive(:all).and_return([domain_volume]) allow(domain_volume).to receive(:pool_name).and_return('default') allow(domain_volume).to receive(:[]).with('name').and_return('vagrant-test_default.img') allow(domain_volume).to receive(:path).and_return('/var/lib/libvirt/images/vagrant-test_default.img') allow(machine).to receive_message_chain("box.name") { 'vagrant-libvirt/test' } allow(logger).to receive(:info) allow(logger).to receive(:debug) allow(ui).to receive(:info) env[:domain_name] = "vagrant-test_default" env[:box_volumes] = [] env[:box_volumes].push({ :path=>"/test/box.img", :name=>"test_vagrant_box_image_1.1.1_0.img", :virtual_size=> ByteNumber.new(5), }) # should be ignored for system session and used for user session allow(Process).to receive(:uid).and_return(9999) allow(Process).to receive(:gid).and_return(9999) end context 'connection => qemu:///system' do let(:domain_xml_file) { 'default_domain.xml' } context 'default pool' do it 'should execute correctly' do expect(servers).to receive(:create).with(xml: domain_xml).and_return(machine) expect(volumes).to_not receive(:create) # additional disks only expect(subject.call(env)).to be_nil end context 'with no box' do let(:storage_pool_xml_file) { 'default_system_storage_pool.xml' } let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.define :test end EOF end it 'should query for the storage pool path' do expect(libvirt_client).to receive(:lookup_storage_pool_by_name).and_return(libvirt_storage_pool) expect(libvirt_storage_pool).to receive(:xml_desc).and_return(storage_pool_xml) expect(servers).to receive(:create).and_return(machine) expect(subject.call(env)).to be_nil end end context 'additional disks' do let(:vagrantfile_providerconfig) do <<-EOF libvirt.storage :file, :size => '20G' EOF end context 'volume create failed' do it 'should raise an exception' do expect(volumes).to receive(:create).and_raise(Libvirt::Error) expect{ subject.call(env) }.to raise_error(VagrantPlugins::ProviderLibvirt::Errors::FogCreateDomainVolumeError) end end context 'volume create succeeded' do let(:domain_xml_file) { 'additional_disks_domain.xml' } it 'should complete' do expect(volumes).to receive(:create).with( hash_including( :path => "/var/lib/libvirt/images/vagrant-test_default-vdb.qcow2", :owner => 0, :group => 0, :pool_name => "default", ) ) expect(servers).to receive(:create).with(xml: domain_xml).and_return(machine) expect(subject.call(env)).to be_nil end end end end context 'no default pool' do let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.define :test end EOF end it 'should raise an exception' do expect(libvirt_client).to receive(:lookup_storage_pool_by_name).and_return(nil) expect{ subject.call(env) }.to raise_error(VagrantPlugins::ProviderLibvirt::Errors::NoStoragePool) end end end context 'connection => qemu:///session' do let(:vagrantfile_providerconfig) do <<-EOF libvirt.qemu_use_session = true EOF end context 'default pool' do it 'should execute correctly' do expect(servers).to receive(:create).and_return(machine) expect(subject.call(env)).to be_nil end context 'with no box' do let(:storage_pool_xml_file) { 'default_user_storage_pool.xml' } let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.define :test config.vm.provider :libvirt do |libvirt| #{vagrantfile_providerconfig} end end EOF end it 'should query for the storage pool path' do expect(libvirt_client).to receive(:lookup_storage_pool_by_name).and_return(libvirt_storage_pool) expect(libvirt_storage_pool).to receive(:xml_desc).and_return(storage_pool_xml) expect(servers).to receive(:create).and_return(machine) expect(subject.call(env)).to be_nil end end context 'additional disks' do let(:vagrantfile_providerconfig) do <<-EOF libvirt.qemu_use_session = true libvirt.storage :file, :size => '20G' EOF end context 'volume create succeeded' do it 'should complete' do expect(volumes).to receive(:create).with( hash_including( :path => "/var/lib/libvirt/images/vagrant-test_default-vdb.qcow2", :owner => 9999, :group => 9999, :pool_name => "default", ) ) expect(servers).to receive(:create).and_return(machine) expect(subject.call(env)).to be_nil end end end end end end end vagrant-libvirt-0.7.0/spec/unit/action/create_domain_spec/000077500000000000000000000000001414232526500236275ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/unit/action/create_domain_spec/additional_disks_domain.xml000066400000000000000000000026221414232526500312070ustar00rootroot00000000000000 vagrant-test_default Source: /rootpath/Vagrantfile 524288 1 hvm vagrant-libvirt-0.7.0/spec/unit/action/create_domain_spec/default_domain.xml000066400000000000000000000022141414232526500273230ustar00rootroot00000000000000 vagrant-test_default Source: /rootpath/Vagrantfile 524288 1 hvm vagrant-libvirt-0.7.0/spec/unit/action/create_domain_spec/default_system_storage_pool.xml000066400000000000000000000006751414232526500321660ustar00rootroot00000000000000 default 434e1b75-4a72-45d7-8a98-ebd90c125d22 10737418240 10737418240 10737418240 /var/lib/libvirt/images 0755 0 0 vagrant-libvirt-0.7.0/spec/unit/action/create_domain_spec/default_user_storage_pool.xml000066400000000000000000000006751414232526500316200ustar00rootroot00000000000000 default 434e1b75-4a72-45d7-8a98-ebd90c125d22 10737418240 10737418240 10737418240 /var/lib/libvirt/images 0755 0 0 vagrant-libvirt-0.7.0/spec/unit/action/create_domain_volume_spec.rb000066400000000000000000000071761414232526500255560ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' require 'vagrant-libvirt/action/destroy_domain' require 'vagrant-libvirt/util/byte_number' describe VagrantPlugins::ProviderLibvirt::Action::CreateDomainVolume do subject { described_class.new(app, env) } include_context 'unit' include_context 'libvirt' let(:libvirt_domain) { double('libvirt_domain') } let(:libvirt_client) { double('libvirt_client') } let(:volumes) { double('volumes') } let(:all) { double('all') } let(:box_volume) { double('box_volume') } def read_test_file(name) File.read(File.join(File.dirname(__FILE__), File.basename(__FILE__, '.rb'), name)) end describe '#call' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver) .to receive(:connection).and_return(connection) allow(connection).to receive(:client).and_return(libvirt_client) allow(connection).to receive(:volumes).and_return(volumes) allow(volumes).to receive(:all).and_return(all) allow(all).to receive(:first).and_return(box_volume) allow(box_volume).to receive(:id).and_return(nil) env[:domain_name] = 'test' allow(logger).to receive(:debug) end context 'when one disk' do before do allow(box_volume).to receive(:path).and_return('/test/path_0.img') env[:box_volumes] = [ { :name=>"test_vagrant_box_image_1.1.1_0.img", :virtual_size=>ByteNumber.new(5368709120) } ] end it 'should create one disk in storage' do expected_xml = read_test_file('one_disk_in_storage.xml') expect(ui).to receive(:info).with('Creating image (snapshot of base box volume).') expect(logger).to receive(:debug).with('Using pool default for base box snapshot') expect(volumes).to receive(:create).with( :xml => expected_xml, :pool_name => "default" ) expect(subject.call(env)).to be_nil end end context 'when three disks' do before do allow(box_volume).to receive(:path).and_return( '/test/path_0.img', '/test/path_1.img', '/test/path_2.img', ) env[:box_volumes] = [ { :name=>"test_vagrant_box_image_1.1.1_0.img", :virtual_size=>ByteNumber.new(5368709120) }, { :name=>"test_vagrant_box_image_1.1.1_1.img", :virtual_size=>ByteNumber.new(10737423360) }, { :name=>"test_vagrant_box_image_1.1.1_2.img", :virtual_size=>ByteNumber.new(21474836480) } ] end it 'should create three disks in storage' do expect(ui).to receive(:info).with('Creating image (snapshot of base box volume).') expect(logger).to receive(:debug).with('Using pool default for base box snapshot') expect(volumes).to receive(:create).with( :xml => read_test_file('three_disks_in_storage_disk_0.xml'), :pool_name => "default" ) expect(logger).to receive(:debug).with('Using pool default for base box snapshot') expect(volumes).to receive(:create).with( :xml => read_test_file('three_disks_in_storage_disk_1.xml'), :pool_name => "default" ) expect(logger).to receive(:debug).with('Using pool default for base box snapshot') expect(volumes).to receive(:create).with( :xml => read_test_file('three_disks_in_storage_disk_2.xml'), :pool_name => "default" ) expect(subject.call(env)).to be_nil end end end end vagrant-libvirt-0.7.0/spec/unit/action/create_domain_volume_spec/000077500000000000000000000000001414232526500252165ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/unit/action/create_domain_volume_spec/one_disk_in_storage.xml000066400000000000000000000007421414232526500317500ustar00rootroot00000000000000 test.img 5368709120 0 0 /test/path_0.img 0 0 vagrant-libvirt-0.7.0/spec/unit/action/create_domain_volume_spec/three_disks_in_storage_disk_0.xml000066400000000000000000000007421414232526500337120ustar00rootroot00000000000000 test.img 5368709120 0 0 /test/path_0.img 0 0 vagrant-libvirt-0.7.0/spec/unit/action/create_domain_volume_spec/three_disks_in_storage_disk_1.xml000066400000000000000000000007451414232526500337160ustar00rootroot00000000000000 test_1.img 10737423360 0 0 /test/path_1.img 0 0 vagrant-libvirt-0.7.0/spec/unit/action/create_domain_volume_spec/three_disks_in_storage_disk_2.xml000066400000000000000000000007451414232526500337170ustar00rootroot00000000000000 test_2.img 21474836480 0 0 /test/path_2.img 0 0 vagrant-libvirt-0.7.0/spec/unit/action/destroy_domain_spec.rb000066400000000000000000000065031414232526500244060ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' require 'vagrant-libvirt/action/destroy_domain' describe VagrantPlugins::ProviderLibvirt::Action::DestroyDomain do subject { described_class.new(app, env) } include_context 'unit' include_context 'libvirt' let(:driver) { double('driver') } let(:libvirt_domain) { double('libvirt_domain') } let(:libvirt_client) { double('libvirt_client') } let(:servers) { double('servers') } before do allow(machine.provider).to receive('driver').and_return(driver) allow(driver).to receive(:connection).and_return(connection) end describe '#call' do before do allow(connection).to receive(:client).and_return(libvirt_client) allow(libvirt_client).to receive(:lookup_domain_by_uuid) .and_return(libvirt_domain) allow(connection).to receive(:servers).and_return(servers) allow(servers).to receive(:get).and_return(domain) # always see this at the start of #call expect(ui).to receive(:info).with('Removing domain...') end context 'when no snapshots' do let(:root_disk) { double('libvirt_root_disk') } before do allow(libvirt_domain).to receive(:list_snapshots).and_return([]) allow(libvirt_domain).to receive(:has_managed_save?).and_return(nil) allow(root_disk).to receive(:name).and_return('test.img') end context 'when only has root disk' do it 'calls fog to destroy volumes' do expect(domain).to receive(:destroy).with(destroy_volumes: true) expect(subject.call(env)).to be_nil end end context 'when has additional disks' do let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.define :test config.vm.provider :libvirt do |libvirt| libvirt.storage :file end end EOF end let(:extra_disk) { double('libvirt_extra_disk') } before do allow(extra_disk).to receive(:name).and_return('test-vdb.qcow2') end it 'destroys disks individually' do allow(libvirt_domain).to receive(:name).and_return('test') allow(domain).to receive(:volumes).and_return([extra_disk], [root_disk]) expect(domain).to receive(:destroy).with(destroy_volumes: false) expect(extra_disk).to receive(:destroy) # extra disk remove expect(root_disk).to receive(:destroy) # root disk remove expect(subject.call(env)).to be_nil end end context 'when has CDROMs attached' do let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.define :test config.vm.provider :libvirt do |libvirt| libvirt.storage :file, :device => :cdrom end end EOF end it 'uses explicit removal of disks' do allow(libvirt_domain).to receive(:name).and_return('test') allow(domain).to receive(:volumes).and_return([root_disk, nil]) expect(domain).to_not receive(:destroy).with(destroy_volumes: true) expect(root_disk).to receive(:destroy) # root disk remove expect(subject.call(env)).to be_nil end end end end end vagrant-libvirt-0.7.0/spec/unit/action/forward_ports_spec.rb000066400000000000000000000164771414232526500242740ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' require 'vagrant-libvirt/errors' require 'vagrant-libvirt/action/forward_ports' describe VagrantPlugins::ProviderLibvirt::Action::ForwardPorts do subject { described_class.new(app, env) } include_context 'unit' let(:machine_config) { double("machine_config") } let(:vm_config) { double("vm_config") } let(:provider_config) { double("provider_config") } before (:each) do allow(machine).to receive(:config).and_return(machine_config) allow(machine).to receive(:provider_config).and_return(provider_config) allow(machine_config).to receive(:vm).and_return(vm_config) allow(vm_config).to receive(:networks).and_return([]) allow(provider_config).to receive(:forward_ssh_port).and_return(false) end describe '#call' do context 'with none defined' do it 'should skip calling forward_ports' do expect(subject).to_not receive(:forward_ports) expect(subject.call(env)).to be_nil end end context 'with network including one forwarded port' do let(:networks) { [ [:private_network, {:ip=>"10.20.30.40", :protocol=>"tcp", :id=>"6b8175ed-3220-4b63-abaf-0bb8d7cdd723"}], [:forwarded_port, port_options], ]} let(:port_options){ {guest: 80, host: 8080} } it 'should compile a single port forward to set up' do expect(vm_config).to receive(:networks).and_return(networks) expect(ui).to_not receive(:warn) expect(subject).to receive(:forward_ports).and_return(nil) expect(subject.call(env)).to be_nil expect(env[:forwarded_ports]).to eq([networks[1][1]]) end context 'when host port in protected range' do let(:port_options){ {guest: 8080, host: 80} } it 'should emit a warning' do expect(vm_config).to receive(:networks).and_return(networks) expect(ui).to receive(:warn).with(include("You are trying to forward to privileged ports")) expect(subject).to receive(:forward_ports).and_return(nil) expect(subject.call(env)).to be_nil end end end context 'when udp protocol is selected' do let(:port_options){ {guest: 80, host: 8080, protocol: "udp"} } it 'should skip and emit warning' do expect(vm_config).to receive(:networks).and_return([[:forwarded_port, port_options]]) expect(ui).to receive(:warn).with("Forwarding UDP ports is not supported. Ignoring.") expect(subject).to_not receive(:forward_ports) expect(subject.call(env)).to be_nil end end context 'when default ssh port forward provided' do let(:networks){ [ [:private_network, {:ip=>"10.20.30.40", :protocol=>"tcp", :id=>"6b8175ed-3220-4b63-abaf-0bb8d7cdd723"}], [:forwarded_port, {guest: 80, host: 8080}], [:forwarded_port, {guest: 22, host: 2222, host_ip: '127.0.0.1', id: 'ssh'}], ]} context 'with default config' do it 'should not forward the ssh port' do expect(vm_config).to receive(:networks).and_return(networks) expect(subject).to receive(:forward_ports) expect(subject.call(env)).to be_nil expect(env[:forwarded_ports]).to eq([networks[1][1]]) end end context 'with forward_ssh_port enabled' do before do allow(provider_config).to receive(:forward_ssh_port).and_return(true) end it 'should forward the port' do expect(vm_config).to receive(:networks).and_return(networks) expect(subject).to receive(:forward_ports) expect(subject.call(env)).to be_nil expect(env[:forwarded_ports]).to eq(networks.drop(1).map { |_, opts| opts }) end end end end describe '#forward_ports' do let(:pid_dir){ machine.data_dir.join('pids') } before (:each) do allow(env).to receive(:[]).and_call_original allow(machine).to receive(:ssh_info).and_return( { :host => "localhost", :username => "vagrant", :port => 22, :private_key_path => ["/home/test/.ssh/id_rsa"], } ) allow(provider_config).to receive(:proxy_command).and_return(nil) end context 'with port to forward' do let(:port_options){ {guest: 80, host: 8080, guest_ip: "192.168.1.121"} } it 'should spawn ssh to setup forwarding' do expect(env).to receive(:[]).with(:forwarded_ports).and_return([port_options]) expect(ui).to receive(:info).with("#{port_options[:guest]} (guest) => #{port_options[:host]} (host) (adapter eth0)") expect(subject).to receive(:spawn).with(/ssh -n -o User=vagrant -o Port=22.*-L \*:8080:192.168.1.121:80 -N localhost/, anything).and_return(9999) expect(subject.forward_ports(env)).to eq([port_options]) expect(pid_dir.join('ssh_8080.pid')).to have_file_content("9999") end end context 'with privileged host port' do let(:port_options){ {guest: 80, host: 80, guest_ip: "192.168.1.121"} } it 'should spawn ssh to setup forwarding' do expect(env).to receive(:[]).with(:forwarded_ports).and_return([port_options]) expect(ui).to receive(:info).with("#{port_options[:guest]} (guest) => #{port_options[:host]} (host) (adapter eth0)") expect(ui).to receive(:info).with('Requesting sudo for host port(s) <= 1024') expect(subject).to receive(:system).with('sudo -v').and_return(true) expect(subject).to receive(:spawn).with(/sudo ssh -n -o User=vagrant -o Port=22.*-L \*:80:192.168.1.121:80 -N localhost/, anything).and_return(10000) expect(subject.forward_ports(env)).to eq([port_options]) expect(pid_dir.join('ssh_80.pid')).to have_file_content("10000") end end end end describe VagrantPlugins::ProviderLibvirt::Action::ClearForwardedPorts do subject { described_class.new(app, env) } include_context 'unit' include_context 'libvirt' describe '#call' do context 'no forwarded ports' do it 'should skip checking if pids are running' do expect(subject).to_not receive(:ssh_pid?) expect(logger).to receive(:info).with('No ssh pids found') expect(subject.call(env)).to be_nil end end context 'multiple forwarded ports' do before do data_dir = machine.data_dir.join('pids') data_dir.mkdir unless data_dir.directory? [ {:port => '8080', :pid => '10001'}, {:port => '8081', :pid => '10002'}, ].each do |port_pid| File.write(data_dir.to_s + "/ssh_#{port_pid[:port]}.pid", port_pid[:pid]) end end it 'should terminate each of the processes' do expect(logger).to receive(:info).with(no_args) # don't know how to test translations from vagrant expect(subject).to receive(:ssh_pid?).with("10001").and_return(true) expect(subject).to receive(:ssh_pid?).with("10002").and_return(true) expect(logger).to receive(:debug).with(/Killing pid/).twice() expect(logger).to receive(:info).with('Removing ssh pid files') expect(subject).to receive(:system).with("kill 10001") expect(subject).to receive(:system).with("kill 10002") expect(subject.call(env)).to be_nil expect(Dir.entries(machine.data_dir.join('pids'))).to match_array(['.', '..']) end end end end vagrant-libvirt-0.7.0/spec/unit/action/halt_domain_spec.rb000066400000000000000000000033361414232526500236460ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' require 'vagrant-libvirt/action/halt_domain' describe VagrantPlugins::ProviderLibvirt::Action::HaltDomain do subject { described_class.new(app, env) } include_context 'unit' include_context 'libvirt' let(:driver) { double('driver') } let(:libvirt_domain) { double('libvirt_domain') } let(:servers) { double('servers') } before do allow(machine.provider).to receive('driver').and_return(driver) allow(driver).to receive(:created?).and_return(true) allow(driver).to receive(:connection).and_return(connection) end describe '#call' do before do allow(connection).to receive(:servers).and_return(servers) allow(servers).to receive(:get).and_return(domain) allow(ui).to receive(:info).with('Halting domain...') end context "when state is not running" do before { expect(driver).to receive(:state).at_least(1). and_return(:not_created) } it "should not poweroff when state is not running" do expect(domain).not_to receive(:poweroff) subject.call(env) end it "should not print halting message" do expect(ui).not_to receive(:info) subject.call(env) end end context "when state is running" do before do expect(driver).to receive(:state).and_return(:running) end it "should poweroff" do expect(domain).to receive(:poweroff) subject.call(env) end it "should print halting message" do allow(domain).to receive(:poweroff) expect(ui).to receive(:info).with('Halting domain...') subject.call(env) end end end end vagrant-libvirt-0.7.0/spec/unit/action/handle_box_image_spec.rb000066400000000000000000000417331414232526500246370ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'json' require 'support/sharedcontext' require 'support/libvirt_context' require 'vagrant-libvirt/action/destroy_domain' require 'vagrant-libvirt/util/byte_number' describe VagrantPlugins::ProviderLibvirt::Action::HandleBoxImage do subject { described_class.new(app, env) } include_context 'unit' include_context 'libvirt' let(:libvirt_client) { double('libvirt_client') } let(:volumes) { double('volumes') } let(:all) { double('all') } let(:box_volume) { double('box_volume') } let(:fog_volume) { double('fog_volume') } let(:config) { double('config') } qemu_json_return_5G = JSON.dump({ "virtual-size": 5368709120, "filename": "/test/box.img", "cluster-size": 65536, "format": "qcow2", "actual-size": 655360, "dirty-flag": false }) byte_number_5G = ByteNumber.new(5368709120) qemu_json_return_10G = JSON.dump({ "virtual-size": 10737423360, "filename": "/test/disk.qcow2", "cluster-size": 65536, "format": "qcow2", "actual-size": 655360, "dirty-flag": false }) byte_number_10G = ByteNumber.new(10737423360) qemu_json_return_20G = JSON.dump({ "virtual-size": 21474836480, "filename": "/test/box_2.img", "cluster-size": 65536, "format": "qcow2", "actual-size": 1508708352, "dirty-flag": false }) byte_number_20G = ByteNumber.new(21474836480) describe '#call' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver) .to receive(:connection).and_return(connection) allow(connection).to receive(:client).and_return(libvirt_client) allow(connection).to receive(:volumes).and_return(volumes) allow(volumes).to receive(:all).and_return(all) allow(env[:ui]).to receive(:clear_line) end context 'when one disk in metadata.json' do before do allow(all).to receive(:first).and_return(box_volume) allow(box_volume).to receive(:id).and_return(1) allow(env[:machine]).to receive_message_chain("box.name") { 'test' } allow(env[:machine]).to receive_message_chain("box.version") { '1.1.1' } allow(env[:machine]).to receive_message_chain("box.metadata") { Hash[ 'virtual_size'=> 5, 'format' => 'qcow2' ] } allow(env[:machine]).to receive_message_chain("box.directory.join") do |arg| '/test/' + arg.to_s end end it 'should have one disk in machine env' do expect(subject.call(env)).to be_nil expect(env[:box_volume_number]).to eq(1) expect(env[:box_volumes]).to eq( [ { :path=>"/test/box.img", :name=>"test_vagrant_box_image_1.1.1_box.img", :virtual_size=>byte_number_5G, :format=>"qcow2" } ] ) end context 'when no box version set' do let(:box_mtime) { Time.now } before do expect(env[:machine]).to receive_message_chain("box.version") { nil } expect(File).to receive(:mtime).and_return(box_mtime) end it 'should use the box file timestamp' do expect(ui).to receive(:warn).with( "No verison detected for test, using timestamp to watch for modifications. Consider\n" + "generating a local metadata for the box with a version to allow better handling.\n" + 'See https://www.vagrantup.com/docs/boxes/format#box-metadata for further details.' ) expect(subject.call(env)).to be_nil expect(env[:box_volume_number]).to eq(1) expect(env[:box_volumes]).to eq( [ { :path=>"/test/box.img", :name=>"test_vagrant_box_image_0_#{box_mtime.to_i}_box.img", :virtual_size=>byte_number_5G, :format=>"qcow2" } ] ) end end context 'When config.machine_virtual_size is set and smaller than box_virtual_size' do before do allow(env[:machine]).to receive_message_chain("provider_config.machine_virtual_size").and_return(1) end it 'should warning must be raise' do expect(ui).to receive(:warn).with("Ignoring requested virtual disk size of '1' as it is below\nthe minimum box image size of '5'.") expect(subject.call(env)).to be_nil expect(env[:box_volumes]).to eq( [ { :path=>"/test/box.img", :name=>"test_vagrant_box_image_1.1.1_box.img", :virtual_size=>byte_number_5G, :format=>"qcow2" } ] ) end end context 'When config.machine_virtual_size is set and higher than box_virtual_size' do before do allow(env[:machine]).to receive_message_chain("provider_config.machine_virtual_size").and_return(20) end it 'should be use' do expect(ui).to receive(:info).with("Created volume larger than box defaults, will require manual resizing of\nfilesystems to utilize.") expect(subject.call(env)).to be_nil expect(env[:box_volumes]).to eq( [ { :path=>"/test/box.img", :name=>"test_vagrant_box_image_1.1.1_box.img", :virtual_size=>byte_number_20G, :format=>"qcow2" } ] ) end end context 'when disk image not in storage pool' do before do allow(File).to receive(:exist?).and_return(true) allow(File).to receive(:size).and_return(5*1024*1024*1024) allow(all).to receive(:first).and_return(nil) allow(subject).to receive(:upload_image).and_return(true) allow(volumes).to receive(:create).and_return(fog_volume) end it 'should upload disk' do expect(ui).to receive(:info).with('Uploading base box image as volume into Libvirt storage...') expect(logger).to receive(:info).with('Creating volume test_vagrant_box_image_1.1.1_box.img in storage pool default.') expect(volumes).to receive(:create).with( hash_including( :name => "test_vagrant_box_image_1.1.1_box.img", :allocation => "5120M", :capacity => "5368709120B", ) ) expect(subject).to receive(:upload_image) expect(subject.call(env)).to be_nil end end context 'when disk image already in storage pool' do before do allow(all).to receive(:first).and_return(box_volume) allow(box_volume).to receive(:id).and_return(1) end it 'should skip disk upload' do expect(volumes).not_to receive(:create) expect(subject).not_to receive(:upload_image) expect(subject.call(env)).to be_nil end end end context 'when three disks in metadata.json' do let(:status) { double } before do allow(all).to receive(:first).and_return(box_volume) allow(box_volume).to receive(:id).and_return(1) allow(env[:machine]).to receive_message_chain("box.name") { 'test' } allow(env[:machine]).to receive_message_chain("box.version") { '1.1.1' } allow(env[:machine]).to receive_message_chain("box.metadata") { Hash[ 'disks' => [ { 'path' => 'box.img', 'name' => 'send_box_name', }, { 'path' => 'disk.qcow2', }, { 'path' => 'box_2.img', }, ], ]} allow(env[:machine]).to receive_message_chain("box.directory.join") do |arg| '/test/' + arg.to_s end allow(status).to receive(:success?).and_return(true) allow(Open3).to receive(:capture3).with('qemu-img', 'info', '--output=json', '/test/box.img').and_return([ qemu_json_return_5G, "", status ]) allow(Open3).to receive(:capture3).with('qemu-img', 'info', '--output=json', '/test/disk.qcow2').and_return([ qemu_json_return_10G, "", status ]) allow(Open3).to receive(:capture3).with('qemu-img', 'info', '--output=json', '/test/box_2.img').and_return([ qemu_json_return_20G, "", status ]) end it 'should have three disks in machine env' do expect(subject.call(env)).to be_nil expect(env[:box_volume_number]).to eq(3) expect(env[:box_volumes]).to eq( [ { :path=>"/test/box.img", :name=>"test_vagrant_box_image_1.1.1_send_box_name.img", :virtual_size=>byte_number_5G, :format=>"qcow2" }, { :path=>"/test/disk.qcow2", :name=>"test_vagrant_box_image_1.1.1_disk.img", :virtual_size=>byte_number_10G, :format=>"qcow2" }, { :path=>"/test/box_2.img", :name=>"test_vagrant_box_image_1.1.1_box_2.img", :virtual_size=>byte_number_20G, :format=>"qcow2" } ] ) end context 'when none of the disks in storage pool' do before do allow(File).to receive(:exist?).and_return(true) allow(File).to receive(:size).and_return(5*1024*1024*1024, 10*1024*1024*1024, 20*1024*1024*1024) allow(all).to receive(:first).and_return(nil) allow(subject).to receive(:upload_image).and_return(true) allow(volumes).to receive(:create).and_return(fog_volume) end it 'should upload all 3 disks' do expect(ui).to receive(:info).with('Uploading base box image as volume into Libvirt storage...') expect(logger).to receive(:info).with('Creating volume test_vagrant_box_image_1.1.1_send_box_name.img in storage pool default.') expect(volumes).to receive(:create).with( hash_including( :name => "test_vagrant_box_image_1.1.1_send_box_name.img", :allocation => "5120M", :capacity => "5368709120B", ) ) expect(subject).to receive(:upload_image) expect(ui).to receive(:info).with('Uploading base box image as volume into Libvirt storage...') expect(logger).to receive(:info).with('Creating volume test_vagrant_box_image_1.1.1_disk.img in storage pool default.') expect(volumes).to receive(:create).with( hash_including( :name => "test_vagrant_box_image_1.1.1_disk.img", :allocation => "10240M", :capacity => "10737423360B", ) ) expect(subject).to receive(:upload_image) expect(ui).to receive(:info).with('Uploading base box image as volume into Libvirt storage...') expect(logger).to receive(:info).with('Creating volume test_vagrant_box_image_1.1.1_box_2.img in storage pool default.') expect(volumes).to receive(:create).with( hash_including( :name => "test_vagrant_box_image_1.1.1_box_2.img", :allocation => "20480M", :capacity => "21474836480B", ) ) expect(subject).to receive(:upload_image) expect(subject.call(env)).to be_nil end end context 'when only disk 0 in storage pool' do before do allow(File).to receive(:exist?).and_return(true) allow(File).to receive(:size).and_return(10*1024*1024*1024, 20*1024*1024*1024) allow(all).to receive(:first).and_return(box_volume, nil, nil) allow(box_volume).to receive(:id).and_return(1) allow(subject).to receive(:upload_image).and_return(true) allow(volumes).to receive(:create).and_return(fog_volume) end it 'upload disks 1 and 2 only' do expect(ui).to receive(:info).with('Uploading base box image as volume into Libvirt storage...') expect(logger).to receive(:info).with('Creating volume test_vagrant_box_image_1.1.1_disk.img in storage pool default.') expect(volumes).to receive(:create).with(hash_including(:name => "test_vagrant_box_image_1.1.1_disk.img")) expect(subject).to receive(:upload_image) expect(ui).to receive(:info).with('Uploading base box image as volume into Libvirt storage...') expect(logger).to receive(:info).with('Creating volume test_vagrant_box_image_1.1.1_box_2.img in storage pool default.') expect(volumes).to receive(:create).with(hash_including(:name => "test_vagrant_box_image_1.1.1_box_2.img")) expect(subject).to receive(:upload_image) expect(subject.call(env)).to be_nil end end context 'when has all disks on storage pool' do before do allow(all).to receive(:first).and_return(box_volume) allow(box_volume).to receive(:id).and_return(1) end it 'should skip disk upload' do expect(ui).not_to receive(:info).with('Uploading base box image as volume into Libvirt storage...') expect(volumes).not_to receive(:create) expect(subject).not_to receive(:upload_image) expect(subject.call(env)).to be_nil end end end context 'when wrong box format in metadata.json' do before do allow(all).to receive(:first).and_return(box_volume) allow(box_volume).to receive(:id).and_return(1) allow(env[:machine]).to receive_message_chain("box.name") { 'test' } allow(env[:machine]).to receive_message_chain("box.version") { '1.1.1' } allow(env[:machine]).to receive_message_chain("box.metadata") { Hash[ 'virtual_size'=> 5, 'format' => 'wrongFormat' ] } allow(env[:machine]).to receive_message_chain("box.directory.join") do |arg| '/test/' + arg.to_s end end it 'should raise WrongBoxFormatSet exception' do expect{ subject.call(env) }.to raise_error(VagrantPlugins::ProviderLibvirt::Errors::WrongBoxFormatSet) end end context 'when invalid format in metadata.json' do let(:status) { double } before do allow(all).to receive(:first).and_return(box_volume) allow(box_volume).to receive(:id).and_return(1) allow(env[:machine]).to receive_message_chain("box.name") { 'test' } allow(env[:machine]).to receive_message_chain("box.version") { '1.1.1' } allow(env[:machine]).to receive_message_chain("box.metadata") { box_metadata } allow(env[:machine]).to receive_message_chain("box.directory.join") do |arg| '/test/' + arg.to_s end allow(status).to receive(:success?).and_return(true) allow(Open3).to receive(:capture3).with('qemu-img', 'info', "--output=json", '/test/box.img').and_return([ qemu_json_return_5G, "", status ]) allow(Open3).to receive(:capture3).with('qemu-img', 'info', "--output=json", '/test/disk.qcow2').and_return([ qemu_json_return_10G, "", status ]) allow(Open3).to receive(:capture3).with('qemu-img', 'info', "--output=json", '/test/box_2.img').and_return([ qemu_json_return_20G, "", status ]) end context 'with one disk having wrong disk format' do let(:box_metadata) { Hash[ 'disks' => [ { 'path' => 'box.img', 'name' =>'send_box_name.img', 'format' => 'wrongFormat' }, { 'path' => 'disk.qcow2', }, { 'path' => 'box_2.img', }, ], ] } it 'should be ignored' do expect(subject.call(env)).to be_nil end end context 'with one disk missing path' do let(:box_metadata) { Hash[ 'disks' => [ { 'path' => 'box.img', }, { 'name' => 'send_box_name', }, { 'path' => 'box_2.img', }, ], ] } it 'should raise an exception' do expect{ subject.call(env) }.to raise_error(VagrantPlugins::ProviderLibvirt::Errors::BoxFormatMissingAttribute, /: 'disks\[1\]\['path'\]'/) end end context 'with one disk name duplicating a path of another' do let(:box_metadata) { Hash[ 'disks' => [ { 'path' => 'box.img', 'name' => 'box_2', }, { 'path' => 'disk.qcow2', }, { 'path' => 'box_2.img', }, ], ] } it 'should raise an exception' do expect{ subject.call(env) }.to raise_error(VagrantPlugins::ProviderLibvirt::Errors::BoxFormatDuplicateVolume, /test_vagrant_box_image_1.1.1_box_2.img.*'disks\[2\]'.*'disks\[0\]'/) end end end end end vagrant-libvirt-0.7.0/spec/unit/action/package_domain_spec.rb000066400000000000000000000266621414232526500243200ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'vagrant-libvirt/action/clean_machine_folder' describe VagrantPlugins::ProviderLibvirt::Action::PackageDomain do subject { described_class.new(app, env) } include_context 'unit' include_context 'libvirt' include_context 'temporary_dir' let(:libvirt_client) { double('libvirt_client') } let(:libvirt_domain) { double('libvirt_domain') } let(:servers) { double('servers') } let(:volumes) { double('volumes') } let(:metadata_file) { double('file') } let(:vagrantfile_file) { double('file') } describe '#call' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver) .to receive(:connection).and_return(connection) allow(connection).to receive(:client).and_return(libvirt_client) allow(libvirt_client).to receive(:lookup_domain_by_uuid).and_return(libvirt_domain) allow(connection).to receive(:servers).and_return(servers) allow(servers).to receive(:get).and_return(domain) allow(connection).to receive(:volumes).and_return(volumes) allow(logger).to receive(:info) env["package.directory"] = temp_dir end context 'with defaults' do let(:root_disk) { double('libvirt_domain_disk') } before do allow(root_disk).to receive(:name).and_return('default_domain.img') allow(domain).to receive(:volumes).and_return([root_disk]) allow(libvirt_domain).to receive(:name).and_return('default_domain') allow(subject).to receive(:download_image).and_return(true) end it 'should succeed' do expect(ui).to receive(:info).with('Packaging domain...') expect(ui).to receive(:info).with(/Downloading default_domain.img to .*\/box.img/) expect(ui).to receive(:info).with('Image has backing image, copying image and rebasing ...') expect(subject).to receive(:`).with(/qemu-img info .*\/box.img | grep 'backing file:' | cut -d ':' -f2/).and_return("some image") expect(subject).to receive(:`).with(/qemu-img rebase -p -b "" .*\/box.img/) expect(subject).to receive(:`).with(/virt-sysprep --no-logfile --operations .* -a .*\/box.img .*/) expect(subject).to receive(:`).with(/virt-sparsify --in-place .*\/box.img/) expect(subject).to receive(:`).with(/qemu-img info --output=json .*\/box.img/).and_return( { 'virtual-size': 5*1024*1024*1024 }.to_json ) expect(File).to receive(:write).with( /.*\/metadata.json/, <<-EOF.unindent { "provider": "libvirt", "format": "qcow2", "virtual_size": 5 } EOF ) expect(File).to receive(:write).with(/.*\/Vagrantfile/, /.*/) expect(subject.call(env)).to be_nil end end context 'with nil volume' do let(:root_disk) { double('libvirt_domain_disk') } before do allow(root_disk).to receive(:name).and_return('default_domain.img') allow(domain).to receive(:volumes).and_return([nil, root_disk]) allow(libvirt_domain).to receive(:name).and_return('default_domain') allow(subject).to receive(:download_image).and_return(true) end it 'should succeed' do expect(ui).to receive(:info).with('Packaging domain...') expect(ui).to receive(:info).with(/Downloading default_domain.img to .*\/box.img/) expect(ui).to receive(:info).with('Image has backing image, copying image and rebasing ...') expect(subject).to receive(:`).with(/qemu-img info .*\/box.img | grep 'backing file:' | cut -d ':' -f2/).and_return("some image") expect(subject).to receive(:`).with(/qemu-img rebase -p -b "" .*\/box.img/) expect(subject).to receive(:`).with(/virt-sysprep --no-logfile --operations .* -a .*\/box.img .*/) expect(subject).to receive(:`).with(/virt-sparsify --in-place .*\/box.img/) expect(subject).to receive(:`).with(/qemu-img info --output=json .*\/box.img/).and_return( { 'virtual-size': 5*1024*1024*1024 }.to_json ) expect(subject.call(env)).to be_nil end end context 'when detecting the format' do let(:root_disk) { double('libvirt_domain_disk') } let(:disk2) { double('libvirt_additional_disk') } let(:fake_env) { Hash.new } before do allow(root_disk).to receive(:name).and_return('default_domain.img') allow(disk2).to receive(:name).and_return('disk2.img') allow(libvirt_domain).to receive(:name).and_return('default_domain') end context 'with two disks' do before do allow(domain).to receive(:volumes).and_return([root_disk, disk2]) end it 'should emit a warning' do expect(ui).to receive(:info).with('Packaging domain...') expect(ui).to receive(:warn).with(/Detected more than one volume for machine.*\n.*/) expect(subject).to receive(:package_v1) expect(subject.call(env)).to be_nil end end context 'with format set to v1' do before do allow(domain).to receive(:volumes).and_return([root_disk]) stub_const("ENV", fake_env) fake_env['VAGRANT_LIBVIRT_BOX_FORMAT_VERSION'] = "v1" end it 'should call v1 packaging' do expect(ui).to receive(:info).with('Packaging domain...') expect(subject).to receive(:package_v1) expect(subject.call(env)).to be_nil end end context 'with format set to v2' do before do allow(domain).to receive(:volumes).and_return([root_disk]) stub_const("ENV", fake_env) fake_env['VAGRANT_LIBVIRT_BOX_FORMAT_VERSION'] = "v2" end it 'should call v1 packaging' do expect(ui).to receive(:info).with('Packaging domain...') expect(subject).to receive(:package_v2) expect(subject.call(env)).to be_nil end end context 'with invalid format' do before do allow(domain).to receive(:volumes).and_return([root_disk]) stub_const("ENV", fake_env) fake_env['VAGRANT_LIBVIRT_BOX_FORMAT_VERSION'] = "bad format" end it 'should emit a warning and default to v1' do expect(ui).to receive(:info).with('Packaging domain...') expect(ui).to receive(:warn).with(/Unrecognized value for.*defaulting to v1/) expect(subject).to receive(:package_v1) expect(subject.call(env)).to be_nil end end end context 'with v2 format' do let(:disk1) { double('libvirt_domain_disk') } let(:disk2) { double('libvirt_additional_disk') } let(:fake_env) { Hash.new } before do allow(disk1).to receive(:name).and_return('default_domain.img') allow(disk2).to receive(:name).and_return('disk2.img') allow(libvirt_domain).to receive(:name).and_return('default_domain') allow(subject).to receive(:download_image).and_return(true).twice() stub_const("ENV", fake_env) fake_env['VAGRANT_LIBVIRT_BOX_FORMAT_VERSION'] = "v2" end context 'with 2 disks' do before do allow(domain).to receive(:volumes).and_return([disk1, disk2]) end it 'should succeed' do expect(ui).to receive(:info).with('Packaging domain...') expect(ui).to receive(:info).with(/Downloading default_domain.img to .*\/box_1.img/) expect(ui).to receive(:info).with('Image has backing image, copying image and rebasing ...') expect(subject).to receive(:`).with(/qemu-img info .*\/box_1.img | grep 'backing file:' | cut -d ':' -f2/).and_return("some image") expect(subject).to receive(:`).with(/qemu-img rebase -p -b "" .*\/box_1.img/) expect(subject).to receive(:`).with(/virt-sysprep --no-logfile --operations .* -a .*\/box_1.img .*/) expect(subject).to receive(:`).with(/virt-sparsify --in-place .*\/box_1.img/) expect(ui).to receive(:info).with(/Downloading disk2.img to .*\/box_2.img/) expect(ui).to receive(:info).with('Image has backing image, copying image and rebasing ...') expect(subject).to receive(:`).with(/qemu-img info .*\/box_2.img | grep 'backing file:' | cut -d ':' -f2/).and_return("some image") expect(subject).to receive(:`).with(/qemu-img rebase -p -b "" .*\/box_2.img/) expect(subject).to receive(:`).with(/virt-sparsify --in-place .*\/box_2.img/) expect(File).to receive(:write).with( /.*\/metadata.json/, <<-EOF.unindent.rstrip() { "provider": "libvirt", "format": "qcow2", "disks": [ { "path": "box_1.img" }, { "path": "box_2.img" } ] } EOF ) expect(File).to receive(:write).with(/.*\/Vagrantfile/, /.*/) expect(subject.call(env)).to be_nil end end context 'with 1 disk' do before do allow(domain).to receive(:volumes).and_return([disk1]) end it 'should succeed' do expect(ui).to receive(:info).with('Packaging domain...') expect(ui).to receive(:info).with(/Downloading default_domain.img to .*\/box_1.img/) expect(ui).to receive(:info).with('Image has backing image, copying image and rebasing ...') expect(subject).to receive(:`).with(/qemu-img info .*\/box_1.img | grep 'backing file:' | cut -d ':' -f2/).and_return("some image") expect(subject).to receive(:`).with(/qemu-img rebase -p -b "" .*\/box_1.img/) expect(subject).to receive(:`).with(/virt-sysprep --no-logfile --operations .* -a .*\/box_1.img .*/) expect(subject).to receive(:`).with(/virt-sparsify --in-place .*\/box_1.img/) expect(File).to receive(:write).with( /.*\/metadata.json/, <<-EOF.unindent.rstrip() { "provider": "libvirt", "format": "qcow2", "disks": [ { "path": "box_1.img" } ] } EOF ) expect(File).to receive(:write).with(/.*\/Vagrantfile/, /.*/) expect(subject.call(env)).to be_nil end end end end describe '#vagrantfile_content' do context 'with defaults' do it 'should output expected content' do expect(subject.vagrantfile_content(env)).to eq( <<-EOF.unindent Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.driver = "kvm" end end EOF ) end end context 'with custom user vagrantfile' do before do env["package.vagrantfile"] = "_Vagrantfile" end it 'should output Vagrantfile containing reference' do expect(subject.vagrantfile_content(env)).to eq( <<-EOF.unindent Vagrant.configure("2") do |config| config.vm.provider :libvirt do |libvirt| libvirt.driver = "kvm" end # Load include vagrant file if it exists after the auto-generated # so it can override any of the settings include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__) load include_vagrantfile if File.exist?(include_vagrantfile) end EOF ) end end end end vagrant-libvirt-0.7.0/spec/unit/action/prepare_nfs_settings_spec.rb000066400000000000000000000031701414232526500256070ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'vagrant-libvirt/action/prepare_nfs_settings' describe VagrantPlugins::ProviderLibvirt::Action::PrepareNFSSettings do subject { described_class.new(app, env) } include_context 'unit' describe '#call' do before do # avoid requiring nfsd installed to run tests allow(machine.env.host).to receive(:capability?).with(:nfs_installed).and_return(true) allow(machine.env.host).to receive(:capability).with(:nfs_installed).and_return(true) end context 'when enabled' do let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.box = "vagrant-libvirt/test" config.vm.define :test config.vm.synced_folder ".", "/vagrant", type: "nfs" config.vm.provider :libvirt do |libvirt| #{vagrantfile_providerconfig} end end EOF end let(:socket) { double('socket') } before do allow(::TCPSocket).to receive(:new).and_return(socket) allow(socket).to receive(:close) end it 'should retrieve the guest IP address' do times_called = 0 expect(::TCPSocket).to receive(:new) do # force reaching later code times_called += 1 times_called < 2 ? raise("StandardError") : socket end expect(machine).to receive(:ssh_info).and_return({:host => '192.168.1.2'}) expect(communicator).to receive(:execute).and_yield(:stdout, "192.168.1.2\n192.168.2.2") expect(subject.call(env)).to be_nil end end end end vagrant-libvirt-0.7.0/spec/unit/action/set_name_of_domain_spec.rb000066400000000000000000000012551414232526500251730ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' describe VagrantPlugins::ProviderLibvirt::Action::SetNameOfDomain do before :each do @env = EnvironmentHelper.new end it 'builds unique domain name' do @env.random_hostname = true dmn = VagrantPlugins::ProviderLibvirt::Action::SetNameOfDomain.new(Object.new, @env) first = dmn.build_domain_name(@env) second = dmn.build_domain_name(@env) expect(first).to_not eq(second) end it 'builds simple domain name' do @env.default_prefix = 'pre_' dmn = VagrantPlugins::ProviderLibvirt::Action::SetNameOfDomain.new(Object.new, @env) expect(dmn.build_domain_name(@env)).to eq('pre_') end end vagrant-libvirt-0.7.0/spec/unit/action/shutdown_domain_spec.rb000066400000000000000000000120301414232526500245600ustar00rootroot00000000000000require 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' require 'vagrant-libvirt/action/shutdown_domain' describe VagrantPlugins::ProviderLibvirt::Action::StartShutdownTimer do subject { described_class.new(app, env) } include_context 'unit' describe '#call' do it 'should set shutdown_start_time' do expect(env[:shutdown_start_time]).to eq(nil) expect(subject.call(env)).to eq(nil) expect(env[:shutdown_start_time]).to_not eq(nil) end end end describe VagrantPlugins::ProviderLibvirt::Action::ShutdownDomain do subject { described_class.new(app, env, target_state, current_state) } include_context 'unit' include_context 'libvirt' let(:driver) { double('driver') } let(:libvirt_domain) { double('libvirt_domain') } let(:servers) { double('servers') } let(:current_state) { :running } let(:target_state) { :shutoff } before do allow(machine.provider).to receive('driver').and_return(driver) allow(driver).to receive(:created?).and_return(true) allow(driver).to receive(:connection).and_return(connection) end describe '#call' do before do allow(connection).to receive(:servers).and_return(servers) allow(servers).to receive(:get).and_return(domain) allow(ui).to receive(:info).with('Attempting direct shutdown of domain...') allow(env).to receive(:[]).and_call_original allow(env).to receive(:[]).with(:shutdown_start_time).and_return(Time.now) end context "when state is shutoff" do before do allow(driver).to receive(:state).and_return(:shutoff) end it "should not shutdown" do expect(domain).not_to receive(:poweroff) subject.call(env) end it "should not print shutdown message" do expect(ui).not_to receive(:info) subject.call(env) end it "should provide a true result" do subject.call(env) expect(env[:result]).to be_truthy end end context "when state is running" do before do allow(driver).to receive(:state).and_return(:running) end it "should shutdown" do expect(domain).to receive(:wait_for) expect(domain).to receive(:shutdown) subject.call(env) end it "should print shutdown message" do expect(domain).to receive(:wait_for) expect(domain).to receive(:shutdown) expect(ui).to receive(:info).with('Attempting direct shutdown of domain...') subject.call(env) end context "when final state is not shutoff" do before do expect(driver).to receive(:state).and_return(:running).exactly(3).times expect(domain).to receive(:wait_for) expect(domain).to receive(:shutdown) end it "should provide a false result" do subject.call(env) expect(env[:result]).to be_falsey end end context "when final state is shutoff" do before do expect(driver).to receive(:state).and_return(:running).exactly(2).times expect(driver).to receive(:state).and_return(:shutoff).exactly(1).times expect(domain).to receive(:wait_for) expect(domain).to receive(:shutdown) end it "should provide a true result" do subject.call(env) expect(env[:result]).to be_truthy end end context "when timeout exceeded" do before do expect(machine).to receive_message_chain('config.vm.graceful_halt_timeout').and_return(1) expect(Time).to receive(:now).and_return(env[:shutdown_start_time] + 2) expect(driver).to receive(:state).and_return(:running).exactly(1).times expect(domain).to_not receive(:wait_for) expect(domain).to_not receive(:shutdown) end it "should provide a false result" do subject.call(env) expect(env[:result]).to be_falsey end end context "when timeout not exceeded" do before do expect(machine).to receive_message_chain('config.vm.graceful_halt_timeout').and_return(2) expect(Time).to receive(:now).and_return(env[:shutdown_start_time] + 1.5) expect(driver).to receive(:state).and_return(:running).exactly(3).times expect(domain).to receive(:wait_for) do |time| expect(time).to be < 1 expect(time).to be > 0 end expect(domain).to receive(:shutdown) end it "should wait for the reduced time" do subject.call(env) expect(env[:result]).to be_falsey end end end context "when required action not run" do before do expect(env).to receive(:[]).with(:shutdown_start_time).and_call_original end it "should raise an exception" do expect { subject.call(env) }.to raise_error( VagrantPlugins::ProviderLibvirt::Errors::CallChainError, /Invalid action chain, must ensure that '.*ShutdownTimer' is called prior to calling '.*ShutdownDomain'/ ) end end end end vagrant-libvirt-0.7.0/spec/unit/action/start_domain_spec.rb000066400000000000000000000164551414232526500240610ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' require 'vagrant-libvirt/errors' require 'vagrant-libvirt/action/start_domain' describe VagrantPlugins::ProviderLibvirt::Action::StartDomain do subject { described_class.new(app, env) } include_context 'unit' include_context 'libvirt' let(:libvirt_domain) { double('libvirt_domain') } let(:libvirt_client) { double('libvirt_client') } let(:servers) { double('servers') } let(:domain_xml) { File.read(File.join(File.dirname(__FILE__), File.basename(__FILE__, '.rb'), test_file)) } let(:updated_domain_xml) { File.read(File.join(File.dirname(__FILE__), File.basename(__FILE__, '.rb'), updated_test_file)) } describe '#call' do let(:test_file) { 'default.xml' } before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver) .to receive(:connection).and_return(connection) allow(connection).to receive(:client).and_return(libvirt_client) allow(libvirt_client).to receive(:lookup_domain_by_uuid).and_return(libvirt_domain) allow(connection).to receive(:servers).and_return(servers) allow(servers).to receive(:get).and_return(domain) allow(logger).to receive(:debug) expect(logger).to receive(:info) expect(ui).to_not receive(:error) allow(libvirt_domain).to receive(:xml_desc).and_return(domain_xml) allow(libvirt_domain).to receive(:max_memory).and_return(512*1024) allow(libvirt_domain).to receive(:num_vcpus).and_return(1) end it 'should execute without changing' do allow(libvirt_domain).to receive(:undefine) expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end context 'when previously running default config' do let(:test_file) { 'existing.xml' } it 'should execute without changing' do allow(libvirt_domain).to receive(:undefine) expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end end context 'tpm' do context 'passthrough tpm added' do let(:updated_test_file) { 'default_added_tpm_path.xml' } let(:vagrantfile_providerconfig) do <<-EOF libvirt.tpm_path = '/dev/tpm0' libvirt.tpm_type = 'passthrough' libvirt.tpm_model = 'tpm-tis' EOF end it 'should modify the domain tpm_path' do expect(libvirt_domain).to receive(:undefine) expect(logger).to receive(:debug).with('tpm config changed') expect(servers).to receive(:create).with(xml: updated_domain_xml) expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end end context 'emulated tpm added' do let(:updated_test_file) { 'default_added_tpm_version.xml' } let(:vagrantfile_providerconfig) do <<-EOF libvirt.tpm_type = 'emulator' libvirt.tpm_model = 'tpm-crb' libvirt.tpm_version = '2.0' EOF end it 'should modify the domain tpm_path' do expect(libvirt_domain).to receive(:undefine) expect(logger).to receive(:debug).with('tpm config changed') expect(servers).to receive(:create).with(xml: updated_domain_xml) expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end end context 'same passthrough tpm config' do let(:test_file) { 'default_added_tpm_path.xml' } let(:updated_test_file) { 'default_added_tpm_path.xml' } let(:vagrantfile_providerconfig) do <<-EOF libvirt.tpm_path = '/dev/tpm0' libvirt.tpm_type = 'passthrough' libvirt.tpm_model = 'tpm-tis' EOF end it 'should execute without changing' do expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end end context 'same emulated tpm config' do let(:test_file) { 'default_added_tpm_version.xml' } let(:updated_test_file) { 'default_added_tpm_version.xml' } let(:vagrantfile_providerconfig) do <<-EOF libvirt.tpm_type = 'emulator' libvirt.tpm_model = 'tpm-crb' libvirt.tpm_version = '2.0' EOF end it 'should execute without changing' do expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end end context 'change from passthrough to emulated' do let(:test_file) { 'default_added_tpm_path.xml' } let(:updated_test_file) { 'default_added_tpm_version.xml' } let(:vagrantfile_providerconfig) do <<-EOF libvirt.tpm_type = 'emulator' libvirt.tpm_model = 'tpm-crb' libvirt.tpm_version = '2.0' EOF end it 'should modify the domain' do expect(libvirt_domain).to receive(:undefine) expect(logger).to receive(:debug).with('tpm config changed') expect(servers).to receive(:create).with(xml: updated_domain_xml) expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end end end context 'clock_timers' do let(:test_file) { 'clock_timer_rtc.xml' } context 'timers unchanged' do let(:vagrantfile_providerconfig) do <<-EOF libvirt.clock_timer(:name => "rtc") EOF end it 'should not modify the domain' do expect(logger).to_not receive(:debug).with('clock timers config changed') expect(servers).to_not receive(:create) expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end end context 'timers added' do let(:vagrantfile_providerconfig) do <<-EOF libvirt.clock_timer(:name => "rtc") libvirt.clock_timer(:name => "tsc") EOF end it 'should modify the domain' do expect(libvirt_domain).to receive(:undefine) expect(logger).to receive(:debug).with('clock timers config changed') expect(servers).to receive(:create).with(xml: match(/\s*\s*\s*<\/clock>/)) expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end end context 'timers removed' do it 'should modify the domain' do expect(libvirt_domain).to receive(:undefine) expect(logger).to receive(:debug).with('clock timers config changed') expect(servers).to receive(:create).with(xml: match(/\s*<\/clock>/)) expect(libvirt_domain).to receive(:autostart=) expect(domain).to receive(:start) expect(subject.call(env)).to be_nil end end end end end vagrant-libvirt-0.7.0/spec/unit/action/start_domain_spec/000077500000000000000000000000001414232526500235215ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/unit/action/start_domain_spec/clock_timer_rtc.xml000066400000000000000000000014411414232526500274060ustar00rootroot00000000000000 <description/> <uuid/> <memory/> <vcpu>1</vcpu> <cpu mode='host-model'> <model fallback='allow'/> </cpu> <os> <type>hvm</type> <kernel/> <initrd/> <cmdline/> </os> <features> <acpi/> <apic/> <pae/> </features> <clock offset='utc'> <timer name='rtc'/> </clock> <devices> <serial type='pty'> <target port='0'/> </serial> <console type='pty'> <target port='0'/> </console> <input bus='ps2' type='mouse'/> <graphics autoport='yes' keymap='en-us' listen='127.0.0.1' port='-1' type='vnc'/> <video> <model heads='1' type='cirrus' vram='9216'/> </video> </devices> </domain> �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������vagrant-libvirt-0.7.0/spec/unit/action/start_domain_spec/default.xml��������������������������������0000664�0000000�0000000�00000001377�14142325265�0025677�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<domain xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0' type=''> <name/> <title/> <description/> <uuid/> <memory/> <vcpu>1</vcpu> <cpu mode='host-model'> <model fallback='allow'/> </cpu> <os> <type>hvm</type> <kernel/> <initrd/> <cmdline/> </os> <features> <acpi/> <apic/> <pae/> </features> <clock offset='utc'/> <devices> <serial type='pty'> <target port='0'/> </serial> <console type='pty'> <target port='0'/> </console> <input bus='ps2' type='mouse'/> <graphics autoport='yes' keymap='en-us' listen='127.0.0.1' port='-1' type='vnc'/> <video> <model heads='1' type='cirrus' vram='9216'/> </video> </devices> </domain> �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������vagrant-libvirt-0.7.0/spec/unit/action/start_domain_spec/default_added_tpm_path.xml�����������������0000664�0000000�0000000�00000001532�14142325265�0030705�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<domain xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0' type=''> <name/> <title/> <description/> <uuid/> <memory/> <vcpu>1</vcpu> <cpu mode='host-model'> <model fallback='allow'/> </cpu> <os> <type>hvm</type> <kernel/> <initrd/> <cmdline/> </os> <features> <acpi/> <apic/> <pae/> </features> <clock offset='utc'/> <devices> <serial type='pty'> <target port='0'/> </serial> <console type='pty'> <target port='0'/> </console> <input bus='ps2' type='mouse'/> <graphics autoport='yes' keymap='en-us' listen='127.0.0.1' port='-1' type='vnc'/> <video> <model heads='1' type='cirrus' vram='9216'/> </video> <tpm model='tpm-tis'><backend type='passthrough'><device path='/dev/tpm0'/></backend></tpm></devices> </domain> ����������������������������������������������������������������������������������������������������������������������������������������������������������������������vagrant-libvirt-0.7.0/spec/unit/action/start_domain_spec/default_added_tpm_version.xml��������������0000664�0000000�0000000�00000001502�14142325265�0031433�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<domain xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0' type=''> <name/> <title/> <description/> <uuid/> <memory/> <vcpu>1</vcpu> <cpu mode='host-model'> <model fallback='allow'/> </cpu> <os> <type>hvm</type> <kernel/> <initrd/> <cmdline/> </os> <features> <acpi/> <apic/> <pae/> </features> <clock offset='utc'/> <devices> <serial type='pty'> <target port='0'/> </serial> <console type='pty'> <target port='0'/> </console> <input bus='ps2' type='mouse'/> <graphics autoport='yes' keymap='en-us' listen='127.0.0.1' port='-1' type='vnc'/> <video> <model heads='1' type='cirrus' vram='9216'/> </video> <tpm model='tpm-crb'><backend type='emulator' version='2.0'/></tpm></devices> </domain> ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������vagrant-libvirt-0.7.0/spec/unit/action/start_domain_spec/existing.xml�������������������������������0000664�0000000�0000000�00000004301�14142325265�0026073�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<domain type='qemu'> <name>vagrant-libvirt_default</name> <uuid>881a931b-0110-4d10-81aa-47a1a19f5726</uuid> <description>Source: /home/test/vagrant-libvirt/Vagrantfile</description> <memory unit='KiB'>2097152</memory> <currentMemory unit='KiB'>2097152</currentMemory> <vcpu placement='static'>2</vcpu> <os> <type arch='x86_64' machine='pc-i440fx-6.0'>hvm</type> <boot dev='hd'/> </os> <features> <acpi/> <apic/> <pae/> </features> <cpu mode='host-model' check='partial'/> <clock offset='utc'/> <on_poweroff>destroy</on_poweroff> <on_reboot>restart</on_reboot> <on_crash>destroy</on_crash> <devices> <emulator>/usr/bin/qemu-system-x86_64</emulator> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='/var/lib/libvirt/images/vagrant-libvirt_default.img'/> <target dev='vda' bus='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </disk> <controller type='usb' index='0' model='piix3-uhci'> <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x2'/> </controller> <controller type='pci' index='0' model='pci-root'/> <interface type='network'> <mac address='52:54:00:7d:14:0e'/> <source network='vagrant-libvirt'/> <model type='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x05' function='0x0'/> </interface> <serial type='pty'> <target type='isa-serial' port='0'> <model name='isa-serial'/> </target> </serial> <console type='pty'> <target type='serial' port='0'/> </console> <input type='mouse' bus='ps2'/> <input type='keyboard' bus='ps2'/> <graphics type='vnc' port='-1' autoport='yes' listen='127.0.0.1' keymap='en-us'> <listen type='address' address='127.0.0.1'/> </graphics> <audio id='1' type='none'/> <video> <model type='cirrus' vram='9216' heads='1' primary='yes'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/> </video> <memballoon model='virtio'> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </memballoon> </devices> </domain> �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������vagrant-libvirt-0.7.0/spec/unit/action/wait_till_up_spec.rb�����������������������������������������0000664�0000000�0000000�00000011307�14142325265�0024060�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true require 'vagrant-libvirt/action/wait_till_up' require 'vagrant-libvirt/errors' require 'spec_helper' require 'support/sharedcontext' require 'support/libvirt_context' describe VagrantPlugins::ProviderLibvirt::Action::WaitTillUp do subject { described_class.new(app, env) } include_context 'vagrant-unit' include_context 'libvirt' include_context 'unit' let (:driver) { VagrantPlugins::ProviderLibvirt::Driver.new env[:machine] } describe '#call' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Provider).to receive(:driver) .and_return(driver) allow(driver).to receive(:get_domain).and_return(domain) allow(driver).to receive(:state).and_return(:running) # return some information for domain when needed allow(domain).to receive(:mac).and_return('9C:D5:53:F1:5A:E7') end context 'when machine does not exist' do before do allow(driver).to receive(:get_domain).and_return(nil) end it 'raises exception' do expect(app).to_not receive(:call) expect { subject.call(env) }.to raise_error(::VagrantPlugins::ProviderLibvirt::Errors::NoDomainError, /No domain found. Domain dummy-vagrant_dummy not found/) end end context 'when machine is booting' do context 'if interrupted looking for IP' do before do env[:interrupted] = true end it 'should exit' do expect(app).to_not receive(:call) expect(ui).to receive(:info).with('Waiting for domain to get an IP address...') expect(logger).to receive(:debug).with(/Searching for IP for MAC address: .*/) expect(subject.call(env)).to be_nil end end context 'multiple timeouts waiting for IP' do before do allow(env).to receive(:[]).and_call_original allow(env).to receive(:[]).with(:interrupted).and_return(false) allow(logger).to receive(:debug) allow(logger).to receive(:info) end it 'should abort after hitting limit' do expect(domain).to receive(:wait_for).at_least(300).times.and_raise(::Fog::Errors::TimeoutError) expect(app).to_not receive(:call) expect(ui).to receive(:info).with('Waiting for domain to get an IP address...') expect(ui).to_not receive(:info).with('Waiting for SSH to become available...') expect {subject.call(env) }.to raise_error(::Fog::Errors::TimeoutError) end end end context 'when machine boots and ip available' do before do allow(domain).to receive(:wait_for).and_return(true) allow(env).to receive(:[]).and_call_original allow(env).to receive(:[]).with(:interrupted).and_return(false) allow(driver).to receive(:get_domain_ipaddress).and_return('192.168.121.2') end it 'should call the next hook' do expect(app).to receive(:call) expect(ui).to receive(:info).with('Waiting for domain to get an IP address...') expect(logger).to receive(:debug).with(/Searching for IP for MAC address: .*/) expect(logger).to receive(:info).with('Got IP address 192.168.121.2') expect(logger).to receive(:info).with(/Time for getting IP: .*/) expect(subject.call(env)).to be_nil end end end describe '#recover' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver).to receive(:get_domain).and_return(machine) allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver).to receive(:state) .and_return(:not_created) allow(env).to receive(:[]).and_call_original end it 'should do nothing by default' do expect(env).to_not receive(:[]).with(:action_runner) # cleanup expect(subject.recover(env)).to be_nil end context 'with machine coming up' do before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver).to receive(:state) .and_return(:running) env[:destroy_on_error] = true end context 'and user has disabled destroy on failure' do before do env[:destroy_on_error] = false end it 'skips terminate on failure' do expect(env).to_not receive(:[]).with(:action_runner) # cleanup expect(subject.recover(env)).to be_nil end end context 'and using default settings' do let(:runner) { double('runner') } it 'deletes VM on failure' do expect(env).to receive(:[]).with(:action_runner).and_return(runner) # cleanup expect(runner).to receive(:run) expect(subject.recover(env)).to be_nil end end end end end �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������vagrant-libvirt-0.7.0/spec/unit/action_spec.rb������������������������������������������������������0000664�0000000�0000000�00000012703�14142325265�0021365�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true require 'spec_helper' require 'support/sharedcontext' require 'vagrant/action/runner' require 'vagrant-libvirt/action' describe VagrantPlugins::ProviderLibvirt::Action do subject { described_class } include_context 'libvirt' include_context 'unit' let(:libvirt_domain) { double('libvirt_domain') } let(:runner) { Vagrant::Action::Runner.new(env) } let(:state) { double('state') } before do allow_any_instance_of(VagrantPlugins::ProviderLibvirt::Driver) .to receive(:connection).and_return(connection) allow(machine).to receive(:id).and_return('test-machine-id') allow(machine).to receive(:state).and_return(state) allow(logger).to receive(:info) allow(logger).to receive(:debug) allow(logger).to receive(:error) end def allow_action_env_result(action, *responses) results = responses.dup allow_any_instance_of(action).to receive(:call) do |cls, env| app = cls.instance_variable_get(:@app) env[:result] = results[0] if results.length > 1 results.shift end app.call(env) end end describe '#action_halt' do context 'not created' do before do expect(state).to receive(:id).and_return(:not_created) end it 'should execute without error' do expect(ui).to receive(:info).with('Domain is not created. Please run `vagrant up` first.') expect { runner.run(subject.action_halt) }.not_to raise_error end end context 'running' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsCreated, true) allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsSuspended, false) allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsRunning, true) end context 'when shutdown domain works' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::ShutdownDomain, true) allow_action_env_result(Vagrant::Action::Builtin::GracefulHalt, true) allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsRunning, true, false) end it 'should skip calling HaltDomain' do expect(ui).to_not receive(:info).with('Domain is not created. Please run `vagrant up` first.') expect_any_instance_of(VagrantPlugins::ProviderLibvirt::Action::HaltDomain).to_not receive(:call) expect { runner.run(subject.action_halt) }.not_to raise_error end end context 'when shutdown domain fails' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::ShutdownDomain, false) allow_action_env_result(Vagrant::Action::Builtin::GracefulHalt, false) end it 'should call halt' do expect_any_instance_of(VagrantPlugins::ProviderLibvirt::Action::HaltDomain).to receive(:call) expect { runner.run(subject.action_halt) }.not_to raise_error end end end end describe '#action_ssh' do context 'when not created' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsCreated, false) end it 'should cause an error' do expect{ machine.action(:ssh, ssh_opts: {})}.to raise_error(Vagrant::Errors::VMNotCreatedError) end end context 'when created' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsCreated, true) end context 'when not running' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsRunning, false) end it 'should cause an error' do expect{ machine.action(:ssh, ssh_opts: {})}.to raise_error(Vagrant::Errors::VMNotRunningError) end end context 'when running' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsRunning, true) end it 'should call SSHExec' do expect_any_instance_of(Vagrant::Action::Builtin::SSHExec).to receive(:call).and_return(0) expect(machine.action(:ssh, ssh_opts: {})).to match(hash_including({:action_name => :machine_action_ssh})) end end end end describe '#action_ssh_run' do context 'when not created' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsCreated, false) end it 'should cause an error' do expect{ machine.action(:ssh_run, ssh_opts: {})}.to raise_error(Vagrant::Errors::VMNotCreatedError) end end context 'when created' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsCreated, true) end context 'when not running' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsRunning, false) end it 'should cause an error' do expect{ machine.action(:ssh_run, ssh_opts: {})}.to raise_error(Vagrant::Errors::VMNotRunningError) end end context 'when running' do before do allow_action_env_result(VagrantPlugins::ProviderLibvirt::Action::IsRunning, true) end it 'should call SSHRun' do expect_any_instance_of(Vagrant::Action::Builtin::SSHRun).to receive(:call).and_return(0) expect(machine.action(:ssh_run, ssh_opts: {})).to match(hash_including({:action_name => :machine_action_ssh_run})) end end end end end �������������������������������������������������������������vagrant-libvirt-0.7.0/spec/unit/config_spec.rb������������������������������������������������������0000664�0000000�0000000�00000051752�14142325265�0021364�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true require 'support/binding_proc' require 'spec_helper' require 'support/sharedcontext' require 'vagrant-libvirt/config' describe VagrantPlugins::ProviderLibvirt::Config do include_context 'unit' let(:fake_env) { Hash.new } describe '#clock_timer' do it 'should handle all options' do expect( subject.clock_timer( :name => 'rtc', :track => 'wall', :tickpolicy => 'delay', :present => 'yes', ).length ).to be(1) expect( subject.clock_timer( :name => 'tsc', :tickpolicy => 'delay', :frequency => '100', :mode => 'auto', :present => 'yes', ).length ).to be(2) end it 'should correctly save the options' do opts = {:name => 'rtc', :track => 'wall'} expect(subject.clock_timer(opts).length).to be(1) expect(subject.clock_timers[0]).to eq(opts) opts[:name] = 'tsc' expect(subject.clock_timers[0]).to_not eq(opts) end it 'should error name option is missing' do expect{ subject.clock_timer(:track => "wall") }.to raise_error("Clock timer name must be specified") end it 'should error if nil value for option supplied' do expect{ subject.clock_timer(:name => "rtc", :track => nil) }.to raise_error("Value of timer option track is nil") end it 'should error if unrecognized option specified' do expect{ subject.clock_timer(:name => "tsc", :badopt => "value") }.to raise_error("Unknown clock timer option: badopt") end end describe '#finalize!' do it 'is valid with defaults' do subject.finalize! end context '@uri' do before(:example) do stub_const("ENV", fake_env) fake_env['HOME'] = "/home/tests" end # table describing expected behaviour of inputs that affect the resulting uri as # well as any subsequent settings that might be inferred if the uri was # explicitly set. [ # settings [ # all default {}, {:uri => "qemu:///system"}, ], # explicit uri settings [ # transport and hostname {:uri => "qemu+ssh://localhost/system"}, {:uri => "qemu+ssh://localhost/system", :connect_via_ssh => true, :host => "localhost", :username => nil}, ], [ # tcp transport with port {:uri => "qemu+tcp://localhost:5000/system"}, {:uri => "qemu+tcp://localhost:5000/system", :connect_via_ssh => false, :host => "localhost", :username => nil}, ], [ # connect explicit to unix socket {:uri => "qemu+unix:///system"}, {:uri => "qemu+unix:///system", :connect_via_ssh => false, :host => nil, :username => nil}, ], [ # via libssh2 should enable ssh as well {:uri => "qemu+libssh2://user@remote/system?known_hosts=/home/user/.ssh/known_hosts"}, { :uri => "qemu+libssh2://user@remote/system?known_hosts=/home/user/.ssh/known_hosts", :connect_via_ssh => true, :host => "remote", :username => "user", }, ], [ # xen {:uri => "xen://remote/system?no_verify=1"}, { :uri => "xen://remote/system?no_verify=1", :connect_via_ssh => false, :host => "remote", :username => nil, :id_ssh_key_file => nil, }, { :setup => ProcWithBinding.new { expect(File).to_not receive(:file?) } } ], [ # xen {:uri => "xen+ssh://remote/system?no_verify=1"}, { :uri => "xen+ssh://remote/system?no_verify=1", :connect_via_ssh => true, :host => "remote", :username => nil, :id_ssh_key_file => "/home/tests/.ssh/id_rsa", }, { :setup => ProcWithBinding.new { expect(File).to receive(:file?).with("/home/tests/.ssh/id_rsa").and_return(true) } } ], # with LIBVIRT_DEFAULT_URI [ # all other set to default {}, {:uri => "custom:///custom_path", :qemu_use_session => false}, { :env => {'LIBVIRT_DEFAULT_URI' => "custom:///custom_path"}, } ], [ # with session {}, {:uri => "qemu:///session", :qemu_use_session => true}, { :env => {'LIBVIRT_DEFAULT_URI' => "qemu:///session"}, } ], [ # with session and using ssh infer connect by ssh and ignore host as not provided {}, {:uri => "qemu+ssh:///session", :qemu_use_session => true, :connect_via_ssh => true, :host => nil}, { :env => {'LIBVIRT_DEFAULT_URI' => "qemu+ssh:///session"}, } ], [ # with session and using ssh to specific host with additional query options provided, infer host and ssh {}, {:uri => "qemu+ssh://remote/session?keyfile=my_id_rsa", :qemu_use_session => true, :connect_via_ssh => true, :host => 'remote'}, { :env => {'LIBVIRT_DEFAULT_URI' => "qemu+ssh://remote/session?keyfile=my_id_rsa"}, } ], [ # when session not set {}, {:uri => "qemu:///system", :qemu_use_session => false}, { :env => {'LIBVIRT_DEFAULT_URI' => "qemu:///system"}, } ], [ # when session appearing elsewhere {}, {:uri => "qemu://remote/system?keyfile=my_session_id", :qemu_use_session => false}, { :env => {'LIBVIRT_DEFAULT_URI' => "qemu://remote/system?keyfile=my_session_id"}, } ], # ignore LIBVIRT_DEFAULT_URI due to explicit settings [ # when uri explicitly set {:uri => 'qemu:///system'}, {:uri => 'qemu:///system'}, { :env => {'LIBVIRT_DEFAULT_URI' => 'qemu://session'}, } ], [ # when host explicitly set {:host => 'remote'}, {:uri => 'qemu://remote/system'}, { :env => {'LIBVIRT_DEFAULT_URI' => 'qemu://session'}, } ], [ # when connect_via_ssh explicitly set {:connect_via_ssh => true}, {:uri => 'qemu+ssh://localhost/system?no_verify=1'}, { :env => {'LIBVIRT_DEFAULT_URI' => 'qemu://session'}, } ], [ # when username explicitly set without ssh {:username => 'my_user' }, {:uri => 'qemu:///system', :username => 'my_user'}, { :env => {'LIBVIRT_DEFAULT_URI' => 'qemu://session'}, } ], [ # when username explicitly set with host but without ssh {:username => 'my_user', :host => 'remote'}, {:uri => 'qemu://remote/system', :username => 'my_user'}, { :env => {'LIBVIRT_DEFAULT_URI' => 'qemu://session'}, } ], [ # when password explicitly set {:password => 'some_password'}, {:uri => 'qemu:///system', :password => 'some_password'}, { :env => {'LIBVIRT_DEFAULT_URI' => 'qemu://session'}, } ], # driver settings [ # set to kvm only {:driver => 'kvm'}, {:uri => "qemu:///system"}, ], [ # set to qemu only {:driver => 'qemu'}, {:uri => "qemu:///system"}, ], [ # set to qemu with session enabled {:driver => 'qemu', :qemu_use_session => true}, {:uri => "qemu:///session"}, ], [ # set to openvz only {:driver => 'openvz'}, {:uri => "openvz:///system"}, ], [ # set to esx {:driver => 'esx'}, {:uri => "esx:///"}, ], [ # set to vbox only {:driver => 'vbox'}, {:uri => "vbox:///session"}, ], # connect_via_ssh settings [ # enabled {:connect_via_ssh => true}, {:uri => "qemu+ssh://localhost/system?no_verify=1"}, ], [ # enabled with user {:connect_via_ssh => true, :username => 'my_user'}, {:uri => "qemu+ssh://my_user@localhost/system?no_verify=1"}, ], [ # enabled with host {:connect_via_ssh => true, :host => 'remote_server'}, {:uri => "qemu+ssh://remote_server/system?no_verify=1"}, ], # id_ssh_key_file behaviour [ # set should take given value {:connect_via_ssh => true, :id_ssh_key_file => '/path/to/keyfile'}, {:uri => 'qemu+ssh://localhost/system?no_verify=1&keyfile=/path/to/keyfile', :connect_via_ssh => true}, ], [ # set should infer use of ssh {:id_ssh_key_file => '/path/to/keyfile'}, {:uri => 'qemu+ssh://localhost/system?no_verify=1&keyfile=/path/to/keyfile', :connect_via_ssh => true}, ], [ # connect_via_ssh should enable default but ignore due to not existing {:connect_via_ssh => true}, {:uri => 'qemu+ssh://localhost/system?no_verify=1', :id_ssh_key_file => nil}, { :setup => ProcWithBinding.new { expect(File).to receive(:file?).with("/home/tests/.ssh/id_rsa").and_return(false) } } ], [ # connect_via_ssh should enable default and include due to existing {:connect_via_ssh => true}, {:uri => 'qemu+ssh://localhost/system?no_verify=1&keyfile=/home/tests/.ssh/id_rsa', :id_ssh_key_file => '/home/tests/.ssh/id_rsa'}, { :setup => ProcWithBinding.new { expect(File).to receive(:file?).with("/home/tests/.ssh/id_rsa").and_return(true) } } ], # socket behaviour [ # set {:socket => '/var/run/libvirt/libvirt-sock'}, {:uri => "qemu:///system?socket=/var/run/libvirt/libvirt-sock"}, ], ].each do |inputs, outputs, options| opts = {} opts.merge!(options) if options it "should handle inputs #{inputs} with env (#{opts[:env]})" do # allow some of these to fail for now if marked as such if !opts[:allow_failure].nil? pending(opts[:allow_failure]) end if !opts[:setup].nil? opts[:setup].apply_binding(binding) end inputs.each do |k, v| subject.instance_variable_set("@#{k}", v) end if !opts[:env].nil? opts[:env].each do |k, v| fake_env[k] = v end end subject.finalize! # ensure failed output indicates which settings are incorrect in the failed test got = subject.instance_variables.each_with_object({}) do |name, hash| if outputs.key?(name.to_s[1..-1].to_sym) hash["#{name.to_s[1..-1]}".to_sym] =subject.instance_variable_get(name) end end expect(got).to eq(outputs) end end context 'when invalid @driver is defined' do it "should raise exception for unrecognized" do subject.driver = "bad-driver" expect { subject.finalize! }.to raise_error("Require specify driver bad-driver") end end context 'when invalid @uri is defined' do it "should raise exception for unrecognized" do subject.uri = "://bad-uri" expect { subject.finalize! }.to raise_error("@uri set to invalid uri '://bad-uri'") end end end context '@system_uri' do [ # system uri [ # transport and hostname {:uri => "qemu+ssh://localhost/session"}, {:uri => "qemu+ssh://localhost/session", :system_uri => "qemu+ssh://localhost/system"}, ], [ # explicitly set {:qemu_use_session => true, :system_uri => "custom://remote/system"}, {:uri => "qemu:///session", :system_uri => "custom://remote/system"}, ], ].each do |inputs, outputs, options| opts = {} opts.merge!(options) if options it "should handle inputs #{inputs} with env (#{opts[:env]})" do # allow some of these to fail for now if marked as such if !opts[:allow_failure].nil? pending(opts[:allow_failure]) end if !opts[:setup].nil? opts[:setup].apply_binding(binding) end inputs.each do |k, v| subject.instance_variable_set("@#{k}", v) end if !opts[:env].nil? opts[:env].each do |k, v| fake_env[k] = v end end subject.finalize! # ensure failed output indicates which settings are incorrect in the failed test got = subject.instance_variables.each_with_object({}) do |name, hash| if outputs.key?(name.to_s[1..-1].to_sym) hash["#{name.to_s[1..-1]}".to_sym] =subject.instance_variable_get(name) end end expect(got).to eq(outputs) end end end context '@proxy_command' do before(:example) do stub_const("ENV", fake_env) fake_env['HOME'] = "/home/tests" end [ # no connect_via_ssh [ {:host => "remote"}, nil, ], # connect_via_ssh [ # host {:connect_via_ssh => true, :host => 'remote'}, "ssh 'remote' -W %h:%p", ], [ # include user {:connect_via_ssh => true, :host => 'remote', :username => 'myuser'}, "ssh 'remote' -l 'myuser' -W %h:%p", ], [ # remote contains port {:connect_via_ssh => true, :host => 'remote:2222'}, "ssh 'remote' -p 2222 -W %h:%p", ], [ # include user and default ssh key exists {:connect_via_ssh => true, :host => 'remote', :username => 'myuser'}, "ssh 'remote' -l 'myuser' -i '/home/tests/.ssh/id_rsa' -W %h:%p", { :setup => ProcWithBinding.new { expect(File).to receive(:file?).with("/home/tests/.ssh/id_rsa").and_return(true) } } ], # disable id_ssh_key_file [ {:connect_via_ssh => true, :host => 'remote', :id_ssh_key_file => nil}, "ssh 'remote' -W %h:%p", ], [ # include user {:connect_via_ssh => true, :host => 'remote', :id_ssh_key_file => nil}, "ssh 'remote' -W %h:%p", ], # use @uri [ {:uri => 'qemu+ssh://remote/system'}, "ssh 'remote' -W %h:%p", ], [ {:uri => 'qemu+ssh://myuser@remote/system'}, "ssh 'remote' -l 'myuser' -W %h:%p", ], [ {:uri => 'qemu+ssh://remote/system?keyfile=/some/path/to/keyfile'}, "ssh 'remote' -i '/some/path/to/keyfile' -W %h:%p", ], # provide custom template [ {:connect_via_ssh => true, :host => 'remote', :proxy_command => "ssh {host} nc %h %p" }, "ssh remote nc %h %p", ], [ {:connect_via_ssh => true, :host => 'remote', :username => 'myuser', :proxy_command => "ssh {host} nc %h %p" }, "ssh remote nc %h %p", ], [ {:connect_via_ssh => true, :host => 'remote', :username => 'myuser', :proxy_command => "ssh {host} -l {username} nc %h %p" }, "ssh remote -l myuser nc %h %p", ], ].each do |inputs, proxy_command, options| opts = {} opts.merge!(options) if options it "should handle inputs #{inputs}" do # allow some of these to fail for now if marked as such if !opts[:allow_failure].nil? pending(opts[:allow_failure]) end if !opts[:setup].nil? opts[:setup].apply_binding(binding) end inputs.each do |k, v| subject.instance_variable_set("@#{k}", v) end subject.finalize! expect(subject.proxy_command).to eq(proxy_command) end end end context '@usbctl_dev' do it 'should be empty by default' do subject.finalize! expect(subject.usbctl_dev).to eq({}) end context 'when usb devices added' do it 'should inject a default controller' do subject.usb :vendor => '0x1234', :product => '0xabcd' subject.finalize! expect(subject.usbctl_dev).to eq({:model => 'qemu-xhci'}) end context 'when user specified a controller' do it 'should retain the user setting' do subject.usb :vendor => '0x1234', :product => '0xabcd' subject.usb_controller :model => 'pii3-uchi' subject.finalize! expect(subject.usbctl_dev).to eq({:model => 'pii3-uchi'}) end end end context 'when redirdevs entries added' do it 'should inject a default controller' do subject.redirdev :type => 'spicevmc' subject.finalize! expect(subject.usbctl_dev).to eq({:model => 'qemu-xhci'}) end context 'when user specified a controller' do it 'should retain the user setting' do subject.redirdev :type => 'spicevmc' subject.usb_controller :model => 'pii3-uchi' subject.finalize! expect(subject.usbctl_dev).to eq({:model => 'pii3-uchi'}) end end end end end def assert_invalid subject.finalize! errors = subject.validate(machine) raise "No errors: #{errors.inspect}" if errors.values.all?(&:empty?) end def assert_valid subject.finalize! errors = subject.validate(machine) raise "Errors: #{errors.inspect}" unless errors.values.all?(&:empty?) end describe '#validate' do it 'is valid with defaults' do assert_valid end context 'with disks defined' do before { expect(machine).to receive(:provider_config).and_return(subject).at_least(:once) } it 'is valid if relative path used for disk' do subject.storage :file, path: '../path/to/file.qcow2' assert_valid end it 'should be invalid if absolute path used for disk' do subject.storage :file, path: '/absolute/path/to/file.qcow2' assert_invalid end end context 'with mac defined' do let (:vm) { double('vm') } before { expect(machine.config).to receive(:vm).and_return(vm) } it 'is valid with valid mac' do expect(vm).to receive(:networks).and_return([[:public, { mac: 'aa:bb:cc:dd:ee:ff' }]]) assert_valid end it 'is valid with MAC containing no delimiters' do network = [:public, { mac: 'aabbccddeeff' }] expect(vm).to receive(:networks).and_return([network]) assert_valid expect(network[1][:mac]).to eql('aa:bb:cc:dd:ee:ff') end it 'should be invalid if MAC not formatted correctly' do expect(vm).to receive(:networks).and_return([[:public, { mac: 'aa/bb/cc/dd/ee/ff' }]]) assert_invalid end end end describe '#merge' do let(:one) { described_class.new } let(:two) { described_class.new } subject { one.merge(two) } context 'storage' do context 'with disks' do context 'assigned specific devices' do it 'should merge disks with specific devices' do one.storage(:file, device: 'vdb') two.storage(:file, device: 'vdc') subject.finalize! expect(subject.disks).to include(include(device: 'vdb'), include(device: 'vdc')) end end context 'without devices given' do it 'should merge disks with different devices assigned automatically' do one.storage(:file) two.storage(:file) subject.finalize! expect(subject.disks).to include(include(device: 'vdb'), include(device: 'vdc')) end end end context 'with cdroms only' do context 'assigned specific devs' do it 'should merge disks with specific devices' do one.storage(:file, device: :cdrom, dev: 'hda') two.storage(:file, device: :cdrom, dev: 'hdb') subject.finalize! expect(subject.cdroms).to include(include(dev: 'hda'), include(dev: 'hdb')) end end context 'without devs given' do it 'should merge cdroms with different devs assigned automatically' do one.storage(:file, device: :cdrom) two.storage(:file, device: :cdrom) subject.finalize! expect(subject.cdroms).to include(include(dev: 'hda'), include(dev: 'hdb')) end end end end context 'clock_timers' do it 'should merge clock_timers' do one.clock_timer(:name => 'rtc', :tickpolicy => 'catchup') two.clock_timer(:name => 'hpet', :present => 'no') expect(subject.clock_timers).to include(include(name: 'rtc'), include(name: 'hpet')) end end end end ����������������������vagrant-libvirt-0.7.0/spec/unit/driver_spec.rb������������������������������������������������������0000664�0000000�0000000�00000023571�14142325265�0021410�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# frozen_string_literal: true require 'fog/libvirt/requests/compute/dhcp_leases' require 'spec_helper' require 'support/binding_proc' require 'support/sharedcontext' require 'vagrant-libvirt/driver' describe VagrantPlugins::ProviderLibvirt::Driver do include_context 'unit' include_context 'libvirt' subject { described_class.new(machine) } let(:vagrantfile) do <<-EOF Vagrant.configure('2') do |config| config.vm.define :test1 do |node| node.vm.provider :libvirt do |domain| domain.uri = "qemu+ssh://user@remote1/system" end end config.vm.define :test2 do |node| node.vm.provider :libvirt do |domain| domain.uri = "qemu+ssh://vms@remote2/system" end end end EOF end # need to override the default package iso_env as using a different # name for the test machines above. let(:machine) { iso_env.machine(:test1, :libvirt) } let(:machine2) { iso_env.machine(:test2, :libvirt) } let(:connection1) { double("connection 1") } let(:connection2) { double("connection 2") } let(:system_connection1) { double("system connection 1") } let(:system_connection2) { double("system connection 2") } # make it easier for distros that want to switch the default value for # qemu_use_session to true by ensuring it is explicitly false for tests. before do allow(machine.provider_config).to receive(:qemu_use_session).and_return(false) allow(logger).to receive(:info) allow(logger).to receive(:debug) end describe '#connection' do it 'should configure a separate connection per machine' do expect(Fog::Compute).to receive(:new).with( hash_including({:libvirt_uri => 'qemu+ssh://user@remote1/system'})).and_return(connection1) expect(Fog::Compute).to receive(:new).with( hash_including({:libvirt_uri => 'qemu+ssh://vms@remote2/system'})).and_return(connection2) expect(machine.provider.driver.connection).to eq(connection1) expect(machine2.provider.driver.connection).to eq(connection2) end it 'should configure the connection once' do expect(Fog::Compute).to receive(:new).once().and_return(connection1) expect(machine.provider.driver.connection).to eq(connection1) expect(machine.provider.driver.connection).to eq(connection1) expect(machine.provider.driver.connection).to eq(connection1) end end describe '#system_connection' do # note that the urls for the two tests are currently # incorrect here as they should be the following: # qemu+ssh://user@remote1/system # qemu+ssh://vms@remote2/system # # In that the system uri should be resolved based on # the provider uri so that for: # uri => qemu+ssh://user@remote1/session # system_uri should be 'qemu+ssh://user@remote1/system' # and not 'qemu:///system'. it 'should configure a separate connection per machine' do expect(Libvirt).to receive(:open_read_only).with('qemu+ssh://user@remote1/system').and_return(system_connection1) expect(Libvirt).to receive(:open_read_only).with('qemu+ssh://vms@remote2/system').and_return(system_connection2) expect(machine.provider.driver.system_connection).to eq(system_connection1) expect(machine2.provider.driver.system_connection).to eq(system_connection2) end it 'should configure the connection once' do expect(Libvirt).to receive(:open_read_only).with('qemu+ssh://user@remote1/system').and_return(system_connection1) expect(machine.provider.driver.system_connection).to eq(system_connection1) expect(machine.provider.driver.system_connection).to eq(system_connection1) expect(machine.provider.driver.system_connection).to eq(system_connection1) end end describe '#get_ipaddress' do context 'when domain exists' do # not used yet, but this is the form that is returned from addresses let(:addresses) { { :public => ["192.168.122.111"], :private => ["192.168.122.111"], } } before do allow(subject).to receive(:get_domain).and_return(domain) end it 'should retrieve the address via domain fog-libvirt API' do # ideally should be able to yield a block to wait_for and check that # the 'addresses' function on the domain is called correctly. expect(domain).to receive(:wait_for).and_return(nil) expect(subject.get_ipaddress(machine)).to eq(nil) end context 'when qemu_use_agent is enabled' do let(:qemu_agent_interfaces) { <<-EOF { "return": [ { "name": "lo", "ip-addresses": [ { "ip-address-type": "ipv4", "ip-address": "127.0.0.1", "prefix": 8 } ], "hardware-address": "00:00:00:00:00:00" }, { "name": "eth0", "ip-addresses": [ { "ip-address-type": "ipv4", "ip-address": "192.168.122.42", "prefix": 24 } ], "hardware-address": "52:54:00:f8:67:98" } ] } EOF } before do allow(machine.provider_config).to receive(:qemu_use_agent).and_return(true) end it 'should retrieve the address via the agent' do expect(subject).to receive(:connection).and_return(connection) expect(libvirt_client).to receive(:lookup_domain_by_uuid).and_return(libvirt_domain) expect(libvirt_domain).to receive(:qemu_agent_command).and_return(qemu_agent_interfaces) expect(domain).to receive(:mac).and_return("52:54:00:f8:67:98").exactly(2).times expect(subject.get_ipaddress(machine)).to eq("192.168.122.42") end context 'when qemu_use_session is enabled' do before do allow(machine.provider_config).to receive(:qemu_use_session).and_return(true) end it 'should still retrieve the address via the agent' do expect(subject).to receive(:connection).and_return(connection) expect(libvirt_client).to receive(:lookup_domain_by_uuid).and_return(libvirt_domain) expect(libvirt_domain).to receive(:qemu_agent_command).and_return(qemu_agent_interfaces) expect(domain).to receive(:mac).and_return("52:54:00:f8:67:98").exactly(2).times expect(subject.get_ipaddress(machine)).to eq("192.168.122.42") end end end context 'when qemu_use_session is enabled' do let(:networks) { [instance_double('::Fog::Libvirt::Compute::Real')] } let(:dhcp_leases) { { "iface" =>"virbr0", "expirytime" =>1636287162, "type" =>0, "mac" =>"52:54:00:8b:dc:5f", "ipaddr" =>"192.168.122.43", "prefix" =>24, "hostname" =>"vagrant-default_test", "clientid" =>"ff:00:8b:dc:5f:00:01:00:01:29:1a:65:42:52:54:00:8b:dc:5f", } } before do allow(machine.provider_config).to receive(:qemu_use_session).and_return(true) end it 'should retreive the address via the system dhcp-leases API' do expect(domain).to receive(:mac).and_return("52:54:00:8b:dc:5f") expect(subject).to receive(:system_connection).and_return(system_connection1) expect(system_connection1).to receive(:list_all_networks).and_return(networks) expect(networks[0]).to receive(:dhcp_leases).and_return([dhcp_leases]) expect(subject.get_ipaddress(machine)).to eq("192.168.122.43") end context 'when qemu_use_agent is enabled' do before do allow(machine.provider_config).to receive(:qemu_use_agent).and_return(true) end it 'should retrieve the address via the agent' do expect(subject).to receive(:get_ipaddress_from_qemu_agent).and_return("192.168.122.44") expect(subject.get_ipaddress(machine)).to eq("192.168.122.44") end end end end end describe '#state' do let(:domain) { double('domain') } before do allow(subject).to receive(:get_domain).and_return(domain) end [ [ 'not found', :not_created, { :setup => ProcWithBinding.new do expect(subject).to receive(:get_domain).and_return(nil) end, } ], [ 'libvirt error', :not_created, { :setup => ProcWithBinding.new do expect(subject).to receive(:get_domain).and_raise(Libvirt::RetrieveError, 'missing') end, } ], [ 'terminated', :not_created, { :setup => ProcWithBinding.new do expect(domain).to receive(:state).and_return('terminated') end, } ], [ 'no IP returned', :inaccessible, { :setup => ProcWithBinding.new do expect(domain).to receive(:state).and_return('running').twice() expect(subject).to receive(:get_domain_ipaddress).and_raise(Fog::Errors::TimeoutError) end, } ], [ 'running', :running, { :setup => ProcWithBinding.new do expect(domain).to receive(:state).and_return('running').twice() expect(subject).to receive(:get_domain_ipaddress).and_return('192.168.121.2') end, } ], ].each do |name, expected, options| opts = {} opts.merge!(options) if options it "should handle '#{name}' by returning '#{expected}'" do if !opts[:setup].nil? opts[:setup].apply_binding(binding) end expect(subject.state(machine)).to eq(expected) end end end end ���������������������������������������������������������������������������������������������������������������������������������������vagrant-libvirt-0.7.0/spec/unit/templates/����������������������������������������������������������0000775�0000000�0000000�00000000000�14142325265�0020544�5����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������vagrant-libvirt-0.7.0/spec/unit/templates/domain_all_settings.xml�����������������������������������0000664�0000000�0000000�00000012164�14142325265�0025311�0����������������������������������������������������������������������������������������������������ustar�00root����������������������������root����������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<domain type='kvm' xmlns:qemu='http://libvirt.org/schemas/domain/qemu/1.0'> <name></name> <title>title description 1 qemu64 1024 hvm /efi/loader /usr/bin/kvm-spice /dev/random
vagrant-libvirt-0.7.0/spec/unit/templates/domain_custom_cpu_model.xml000066400000000000000000000015321414232526500261570ustar00rootroot00000000000000 1 SandyBridge hvm vagrant-libvirt-0.7.0/spec/unit/templates/domain_defaults.xml000066400000000000000000000015231414232526500244250ustar00rootroot00000000000000 1 hvm vagrant-libvirt-0.7.0/spec/unit/templates/domain_spec.rb000066400000000000000000000155521414232526500233620ustar00rootroot00000000000000# frozen_string_literal: true require 'support/sharedcontext' require 'vagrant-libvirt/config' require 'vagrant-libvirt/util/erb_template' describe 'templates/domain' do include_context 'unit' class DomainTemplateHelper < VagrantPlugins::ProviderLibvirt::Config include VagrantPlugins::ProviderLibvirt::Util::ErbTemplate attr_accessor :domain_volumes def initialize super @domain_volumes = [] end def finalize! super end end let(:domain) { DomainTemplateHelper.new } let(:xml_expected) { File.read(File.join(File.dirname(__FILE__), test_file)) } context 'when only defaults used' do let(:test_file) { 'domain_defaults.xml' } it 'renders template' do domain.finalize! expect(domain.to_xml('domain')).to eq xml_expected end end context 'when all settings enabled' do before do domain.title = 'title' domain.description = 'description' domain.instance_variable_set('@domain_type', 'kvm') domain.cpu_mode = 'custom' domain.cpu_feature(name: 'AAA', policy: 'required') domain.hyperv_feature(name: 'BBB', state: 'on') domain.clock_offset = 'variable' domain.clock_timer(name: 't1') domain.clock_timer(name: 't2', track: 'b', tickpolicy: 'c', frequency: 'd', mode: 'e', present: 'yes') domain.hyperv_feature(name: 'spinlocks', state: 'on', retries: '4096') domain.cputopology(sockets: '1', cores: '3', threads: '2') domain.machine_type = 'pc-compatible' domain.machine_arch = 'x86_64' domain.loader = '/efi/loader' domain.boot('network') domain.boot('cdrom') domain.boot('hd') domain.emulator_path = '/usr/bin/kvm-spice' domain.instance_variable_set('@domain_volume_cache', 'deprecated') domain.disk_bus = 'ide' domain.disk_device = 'vda' domain.disk_driver(:cache => 'unsafe', :io => 'threads', :copy_on_read => 'on', :discard => 'unmap', :detect_zeroes => 'on') domain.domain_volumes.push({ :dev => 1.vdev.to_s, :cache => 'unsafe', :bus => domain.disk_bus, :path => '/var/lib/libvirt/images/test.qcow2' }) domain.domain_volumes.push({ :dev => 2.vdev.to_s, :cache => 'unsafe', :bus => domain.disk_bus, :path => '/var/lib/libvirt/images/test2.qcow2' }) domain.storage(:file, path: 'test-disk1.qcow2') domain.storage(:file, path: 'test-disk2.qcow2', io: 'threads', copy_on_read: 'on', discard: 'unmap', detect_zeroes: 'on') domain.disks.each do |disk| disk[:absolute_path] = '/var/lib/libvirt/images/' + disk[:path] end domain.storage(:file, device: :cdrom) domain.storage(:file, device: :cdrom) domain.channel(type: 'unix', target_name: 'org.qemu.guest_agent.0', target_type: 'virtio') domain.channel(type: 'spicevmc', target_name: 'com.redhat.spice.0', target_type: 'virtio') domain.channel(type: 'unix', target_type: 'guestfwd', target_address: '192.0.2.42', target_port: '4242', source_path: '/tmp/foo') domain.random(model: 'random') domain.serial(:type => 'file', :source => {:path => '/var/log/vm_consoles/machine.log'}) domain.pci(bus: '0x06', slot: '0x12', function: '0x5') domain.pci(domain: '0x0001', bus: '0x03', slot: '0x00', function: '0x0') domain.usb_controller(model: 'nec-xhci', ports: '4') domain.usb(bus: '1', device: '2', vendor: '0x1234', product: '0xabcd') domain.redirdev(type: 'tcp', host: 'localhost', port: '4000') domain.redirfilter(class: '0x0b', vendor: '0x08e6', product: '0x3437', version: '2.00', allow: 'yes') domain.watchdog(model: 'i6300esb', action: 'reset') domain.smartcard(mode: 'passthrough') domain.tpm_path = '/dev/tpm0' domain.qemuargs(value: '-device') domain.qemuargs(value: 'dummy-device') domain.qemuenv(QEMU_AUDIO_DRV: 'pa') domain.qemuenv(QEMU_AUDIO_TIMER_PERIOD: '150') domain.qemuenv(QEMU_PA_SAMPLES: '1024') domain.qemuenv(QEMU_PA_SERVER: '/run/user/1000/pulse/native') domain.shares = '1024' domain.cpuset = '1-4,^3,6' domain.nodeset = '1-4,^3,6' domain.video_accel3d = true end let(:test_file) { 'domain_all_settings.xml' } it 'renders template' do domain.finalize! expect(domain.to_xml('domain')).to eq xml_expected end end context 'when custom cpu model enabled' do before do domain.cpu_mode = 'custom' domain.cpu_model = 'SandyBridge' end let(:test_file) { 'domain_custom_cpu_model.xml' } it 'renders template' do domain.finalize! expect(domain.to_xml('domain')).to eq xml_expected end end context 'when tpm 2.0 device is specified' do before do domain.tpm_version = '2.0' domain.tpm_type = 'emulator' domain.tpm_model = 'tpm-crb' end let(:test_file) { 'tpm/version_2.0.xml' } it 'renders template' do domain.finalize! expect(domain.to_xml('domain')).to eq xml_expected end end context 'when tpm 1.2 device is implicitly used' do before do domain.tpm_path = '/dev/tpm0' end let(:test_file) { 'tpm/version_1.2.xml' } it 'renders template' do domain.finalize! expect(domain.to_xml('domain')).to eq xml_expected end end context 'memballoon' do context 'default' do it 'renders without specifying the xml tag' do domain.finalize! expect(domain.to_xml('domain')).to_not match(/memballoon/) end end context 'memballon enabled' do before do domain.memballoon_enabled = true end it 'renders with memballon element' do domain.finalize! expect(domain.to_xml('domain')).to match(//) expect(domain.to_xml('domain')).to match(/
/) end context 'all settings specified' do before do domain.memballoon_model = "virtio-non-transitional" domain.memballoon_pci_bus = "0x01" domain.memballoon_pci_slot = "0x05" end it 'renders with specified values' do domain.finalize! expect(domain.to_xml('domain')).to match(//) expect(domain.to_xml('domain')).to match(/
/) end end end context 'memballon disabled' do before do domain.memballoon_enabled = false end it 'renders the memballoon element with model none' do domain.finalize! expect(domain.to_xml('domain')).to match(//) end end end end vagrant-libvirt-0.7.0/spec/unit/templates/tpm/000077500000000000000000000000001414232526500213445ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/unit/templates/tpm/version_1.2.xml000066400000000000000000000017171414232526500241410ustar00rootroot00000000000000 1 hvm vagrant-libvirt-0.7.0/spec/unit/templates/tpm/version_2.0.xml000066400000000000000000000016671414232526500241440ustar00rootroot00000000000000 1 hvm vagrant-libvirt-0.7.0/spec/unit/util/000077500000000000000000000000001414232526500175235ustar00rootroot00000000000000vagrant-libvirt-0.7.0/spec/unit/util/byte_number_spec.rb000066400000000000000000000012621414232526500233760ustar00rootroot00000000000000# frozen_string_literal: true require 'spec_helper' require 'vagrant-libvirt/util/byte_number' describe ByteNumber do describe '#ByteNumber to Gigrabyte' do it 'should return bigger size' do expect( ByteNumber.new("10737423360").to_GB).to eq(11) expect( ByteNumber.new("737423360").to_GB).to eq(1) expect( ByteNumber.new("110737423360").to_GB).to eq(104) end end describe '#ByteNumber from Gigrabyte' do it 'should convert' do expect( ByteNumber.from_GB(5).to_i).to eq(5368709120) end end describe '#ByteNumber pow' do it 'should be work like interger' do expect( ByteNumber.new(5).pow(5).to_i).to eq(5**5) end end endvagrant-libvirt-0.7.0/tests/000077500000000000000000000000001414232526500157775ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/cpus/000077500000000000000000000000001414232526500167515ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/cpus/Vagrantfile000066400000000000000000000004661414232526500211440ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "infernix/tinycore" config.ssh.shell = "/bin/sh" config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.provider :libvirt do |libvirt| libvirt.cpus = 2 end end vagrant-libvirt-0.7.0/tests/default_prefix/000077500000000000000000000000001414232526500210005ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/default_prefix/Vagrantfile000066400000000000000000000005271414232526500231710ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "infernix/tinycore" config.ssh.shell = "/bin/sh" config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.provider :libvirt do |libvirt| libvirt.default_prefix = "changed_default_prefix" end end vagrant-libvirt-0.7.0/tests/memory/000077500000000000000000000000001414232526500173075ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/memory/Vagrantfile000066400000000000000000000004731414232526500215000ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "infernix/tinycore" config.ssh.shell = "/bin/sh" config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.provider :libvirt do |libvirt| libvirt.memory = 1000 end end vagrant-libvirt-0.7.0/tests/package_complex_example/000077500000000000000000000000001414232526500226345ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/package_complex_example/Vagrantfile000066400000000000000000000010451414232526500250210ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "generic/debian10" config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.provider :libvirt do |libvirt| libvirt.driver = "qemu" libvirt.cpus = 2 libvirt.memory = 2048 end # note by default packaging the resulting machine will bundle the generated # ssh key with the resulting box, to disable this behaviour need to # uncomment the following line. #config.ssh.insert_key = false end vagrant-libvirt-0.7.0/tests/package_complex_example/Vagrantfile.testbox000066400000000000000000000004711414232526500265120ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : Vagrant.configure("2") do |config| config.vm.box = "test-package-complex-example" config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.provider :libvirt do |libvirt| libvirt.driver = "qemu" libvirt.cpus = 2 libvirt.memory = 2048 end end vagrant-libvirt-0.7.0/tests/package_complex_example/scripts/000077500000000000000000000000001414232526500243235ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/package_complex_example/scripts/sysprep.sh000066400000000000000000000020021414232526500263560ustar00rootroot00000000000000#!/bin/sh -eux # consider purging any packages you don't need here echo "autoremoving packages and cleaning apt data" apt-get -y autoremove; apt-get -y clean; # repeat what machine-ids does in sysprep as this script needs to run via customize # which has a bug resulting in the machine-ids being regenerated if [ -f /etc/machine-id ] then truncate --size=0 /etc/machine-id fi if [ -f /var/lib/dbus/machine-id ] then truncate --size=0 /run/machine-id fi echo "remove /var/cache" find /var/cache -type f -exec rm -rf {} \; echo "force a new random seed to be generated" rm -f /var/lib/systemd/random-seed # for debian based systems ensure host keys regenerated on boot if [ -e /usr/sbin/dpkg-reconfigure ] then printf "@reboot root command bash -c 'export PATH=$PATH:/usr/sbin ; export DEBIAN_FRONTEND=noninteractive ; export DEBCONF_NONINTERACTIVE_SEEN=true ; /usr/sbin/dpkg-reconfigure openssh-server &>/dev/null ; /bin/systemctl restart ssh.service ; rm --force /etc/cron.d/keys'\n" > /etc/cron.d/keys fi vagrant-libvirt-0.7.0/tests/package_simple/000077500000000000000000000000001414232526500207435ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/package_simple/Vagrantfile000066400000000000000000000004201414232526500231240ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "infernix/tinycore" config.ssh.shell = "/bin/sh" config.ssh.insert_key = false config.vm.synced_folder ".", "/vagrant", disabled: true end vagrant-libvirt-0.7.0/tests/package_simple/Vagrantfile.testbox000066400000000000000000000003711414232526500246200ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : Vagrant.configure("2") do |config| config.vm.box = "test-package-simple-domain" config.ssh.shell = "/bin/sh" config.ssh.insert_key = false config.vm.synced_folder ".", "/vagrant", disabled: true end vagrant-libvirt-0.7.0/tests/parse_tests.awk000066400000000000000000000003411414232526500210350ustar00rootroot00000000000000BEGIN { printf "[" previous="" } match($0, /@test "(.*)" \{/, arr) { if ( previous != "" ) { printf "%s, ",previous } previous = sprintf("\"%s\"", arr[1]) } END { printf "%s",previous print "]" } vagrant-libvirt-0.7.0/tests/private_network/000077500000000000000000000000001414232526500212225ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/private_network/Vagrantfile000066400000000000000000000006151414232526500234110ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| # private network doesnt work with tinycore, use # debian10 box for test config.vm.box = "generic/debian10" config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.define :test_vm1 do |test_vm1| test_vm1.vm.network :private_network, :ip => "10.20.30.40" end end vagrant-libvirt-0.7.0/tests/qemu_agent/000077500000000000000000000000001414232526500201245ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/qemu_agent/Vagrantfile000066400000000000000000000006131414232526500223110ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "generic/debian10" config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.provider :libvirt do |libvirt| libvirt.channel :type => 'unix', :target_name => 'org.qemu.guest_agent.0', :target_type => 'virtio' libvirt.qemu_use_agent = true end end vagrant-libvirt-0.7.0/tests/runtests.bats000066400000000000000000000126151414232526500205460ustar00rootroot00000000000000SCRIPT_DIR="$( cd "$BATS_TEST_DIRNAME" &> /dev/null && pwd )" export PATH=$(dirname ${SCRIPT_DIR})/bin:${PATH} VAGRANT_CMD="vagrant" VAGRANT_OPT="--provider=libvirt" TEMPDIR= setup_file() { # set VAGRANT_HOME to something else to reuse for tests to avoid clashes with # user installed plugins when running tests locally. if [ -z "${VAGRANT_HOME:-}" ] then TEMPDIR=$(mktemp -d 2>/dev/null) export VAGRANT_HOME=${TEMPDIR}/.vagrant.d echo "# Using ${VAGRANT_HOME} for VAGRANT_HOME" >&3 fi } teardown_file() { if [ -n "${TEMPDIR:-}" ] && [ -d "${TEMPDIR:-}" ] then rm -rf ${TEMPDIR:-} fi } cleanup() { ${VAGRANT_CMD} destroy -f if [ $? == "0" ]; then return 0 else return 1 fi } @test "destroy simple vm" { export VAGRANT_LOG=debug export VAGRANT_CWD=tests/simple run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] cleanup } @test "simple vm provision via shell" { export VAGRANT_CWD=tests/simple_provision_shell cleanup run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "status = ${status}" echo "${output}" [ "$status" -eq 0 ] [ $(expr "$output" : ".*Hello.*") -ne 0 ] echo "${output}" cleanup } @test "bring up with custom default prefix" { export VAGRANT_CWD=tests/default_prefix cleanup run ${VAGRANT_CMD} up ${VAGRANT_OPT} [ "$status" -eq 0 ] echo "${output}" echo "status = ${status}" [ $(expr "$output" : ".*changed_default_prefixdefault.*") -ne 0 ] echo "${output}" cleanup } @test "bring up with second disk" { export VAGRANT_CWD=tests/second_disk cleanup run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] echo "${output}" [ $(expr "$output" : ".*second_disk_default-vdb.*") -ne 0 ] cleanup } @test "bring up with two disks" { export VAGRANT_CWD=tests/two_disks cleanup tools/create_box_with_two_disks.sh ${VAGRANT_HOME} ${VAGRANT_CMD} run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] echo "${output}" [ $(expr "$output" : ".*Image.*2G") -ne 0 ] [ $(expr "$output" : ".*Image.*10G") -ne 0 ] cleanup } @test "bring up with adjusted memory settings" { export VAGRANT_CWD=tests/memory cleanup run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] echo "${output}" [ $(expr "$output" : ".*Memory.*1000M.*") -ne 0 ] cleanup } @test "bring up with adjusted cpu settings" { export VAGRANT_CWD=tests/cpus cleanup run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] echo "${output}" [ $(expr "$output" : ".*Cpus.*2.*") -ne 0 ] cleanup } @test "bring up and use qemu agent for connectivity" { export VAGRANT_CWD=tests/qemu_agent cleanup run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] echo "${output}" cleanup } @test "ip is reachable with private network" { export VAGRANT_CWD=tests/private_network cleanup run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] echo "${output}" [ $(expr "$output" : ".*Cpus.*2.*") -ne 0 ] run fping 10.20.30.40 [ "$status" -eq 0 ] echo "${output}" [ $(expr "$output" : ".*alive.*") -ne 0 ] cleanup } @test "package simple domain" { export VAGRANT_CWD=tests/package_simple cleanup run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] rm -f package.box run ${VAGRANT_CMD} package echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] run ${VAGRANT_CMD} destroy -f echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] run ${VAGRANT_CMD} box add --force package.box --name test-package-simple-domain echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] VAGRANT_VAGRANTFILE=Vagrantfile.testbox run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] run ${VAGRANT_CMD} box remove --force test-package-simple-domain echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] rm -f package.box cleanup } @test "package complex example" { export VAGRANT_CWD=tests/package_complex_example # this will allow the host keys to be removed, and part of the sysprep script # adds a step to trigger the regeneration. export VAGRANT_LIBVIRT_VIRT_SYSPREP_OPERATIONS='defaults,-ssh-userdir,customize' export VAGRANT_LIBVIRT_VIRT_SYSPREP_OPTIONS="--run $(pwd)/tests/package_complex_example/scripts/sysprep.sh" cleanup run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] rm -f package.box run ${VAGRANT_CMD} package echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] run ${VAGRANT_CMD} destroy -f echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] run ${VAGRANT_CMD} box add --force package.box --name test-package-complex-example echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] VAGRANT_VAGRANTFILE=Vagrantfile.testbox run ${VAGRANT_CMD} up ${VAGRANT_OPT} echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] run ${VAGRANT_CMD} box remove --force test-package-complex-example echo "${output}" echo "status = ${status}" [ "$status" -eq 0 ] rm -f package.box cleanup } vagrant-libvirt-0.7.0/tests/second_disk/000077500000000000000000000000001414232526500202645ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/second_disk/Vagrantfile000066400000000000000000000005131414232526500224500ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "infernix/tinycore" config.ssh.shell = "/bin/sh" config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.provider :libvirt do |libvirt| libvirt.storage :file, :size => '1G' end end vagrant-libvirt-0.7.0/tests/simple/000077500000000000000000000000001414232526500172705ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/simple/Vagrantfile000066400000000000000000000003601414232526500214540ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "infernix/tinycore" config.ssh.shell = "/bin/sh" config.vm.synced_folder ".", "/vagrant", disabled: true end vagrant-libvirt-0.7.0/tests/simple_provision_shell/000077500000000000000000000000001414232526500225675ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/simple_provision_shell/Vagrantfile000066400000000000000000000004761414232526500247630ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "infernix/tinycore" config.ssh.shell = "/bin/sh" config.vm.synced_folder ".", "/vagrant", disabled: true config.vm.provision "shell", inline: "echo Hello, World", privileged: false end vagrant-libvirt-0.7.0/tests/two_disks/000077500000000000000000000000001414232526500200055ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tests/two_disks/Vagrantfile000066400000000000000000000004351414232526500221740ustar00rootroot00000000000000# -*- mode: ruby -*- # vi: set ft=ruby : # # frozen_string_literal: true Vagrant.configure("2") do |config| config.vm.box = "infernix/tinycore-two-disks" config.vm.box_version = "0.0.2" config.ssh.shell = "/bin/sh" config.vm.synced_folder ".", "/vagrant", disabled: true end vagrant-libvirt-0.7.0/tools/000077500000000000000000000000001414232526500157755ustar00rootroot00000000000000vagrant-libvirt-0.7.0/tools/create_box.sh000077500000000000000000000062751414232526500204610ustar00rootroot00000000000000#!/usr/bin/env bash #set -xu error() { local msg="${1}" echo "==> ERROR: ${msg}" exit 1 } usage() { echo "Usage: ${0} IMAGE [BOX] [Vagrantfile.add]" echo echo "Package a qcow2 image into a vagrant-libvirt reusable box" echo "" echo "If packaging from a Vagrant machine ensure 'config.ssh.insert_key = false' was " echo "set in the original Vagrantfile to avoid removal of the default ssh key, " echo "otherwise vagrant will not be able to connect to machines created from this box" } # Print the image's backing file backing(){ local img=${1} qemu-img info "$img" | grep 'backing file:' | cut -d ':' -f2 } # Rebase the image rebase(){ local img=${1} qemu-img rebase -p -b "" "$img" [[ "$?" -ne 0 ]] && error "Error during rebase" } # Is absolute path isabspath(){ local path=${1} [[ "$path" =~ ^/.* ]] } if [ -z "$1" ] || [ "$1" = "-h" ] || [ "$1" = "--help" ]; then usage exit 1 fi IMG=$(readlink -e "$1") [[ "$?" -ne 0 ]] && error "'$1': No such image" IMG_DIR=$(dirname "$IMG") IMG_BASENAME=$(basename "$IMG") BOX=${2:-} # If no box name is supplied infer one from image name if [[ -z "$BOX" ]]; then BOX_NAME=${IMG_BASENAME%.*} BOX=$BOX_NAME.box else BOX_NAME=$(basename "${BOX%.*}") fi [[ -f "$BOX" ]] && error "'$BOX': Already exists" CWD=$(pwd) TMP_DIR="$CWD/_tmp_package" TMP_IMG="$TMP_DIR/box.img" mkdir -p "$TMP_DIR" [[ ! -r "$IMG" ]] && error "'$IMG': Permission denied" if [ -n "$3" ] && [ -r "$3" ]; then VAGRANTFILE_ADD="$(cat $3)" fi # We move / copy (when the image has master) the image to the tempdir # ensure that it's moved back / removed again if [[ -n $(backing "$IMG") ]]; then echo "==> Image has backing image, copying image and rebasing ..." trap "rm -rf $TMP_DIR" EXIT cp "$IMG" "$TMP_IMG" rebase "$TMP_IMG" else if fuser -s "$IMG"; then error "Image '$IMG_BASENAME' is used by another process" fi # move the image to get a speed-up and use less space on disk trap 'mv "$TMP_IMG" "$IMG"; rm -rf "$TMP_DIR"' EXIT mv "$IMG" "$TMP_IMG" fi cd "$TMP_DIR" #Using the awk int function here to truncate the virtual image size to an #integer since the fog-libvirt library does not seem to properly handle #floating point. IMG_SIZE=$(qemu-img info --output=json "$TMP_IMG" | awk '/virtual-size/{s=int($2)/(1024^3); print (s == int(s)) ? s : int(s)+1 }') echo "{$IMG_SIZE}" cat > metadata.json < Vagrantfile < Creating box, tarring and gzipping" if type pigz >/dev/null 2>/dev/null; then GZ="pigz" else GZ="gzip" fi tar cv -S --totals ./metadata.json ./Vagrantfile ./box.img | $GZ -c > "$BOX" # if box is in tmpdir move it to CWD before removing tmpdir if ! isabspath "$BOX"; then mv "$BOX" "$CWD" fi echo "==> ${BOX} created" echo "==> You can now add the box:" echo "==> 'vagrant box add ${BOX} --name ${BOX_NAME}'" vagrant-libvirt-0.7.0/tools/create_box_with_two_disks.sh000077500000000000000000000017621414232526500235760ustar00rootroot00000000000000#!/bin/bash set -eu -o pipefail VAGRANT_HOME=${1:-$HOME/.vagrant.d/} VAGRANT_CMD=${2:-vagrant} echo 'Create box with two disks' ${VAGRANT_CMD} box list if [ "$(${VAGRANT_CMD} box list | grep -c -E '^infernix/tinycore-two-disks\s')" -eq 0 ] then ${VAGRANT_CMD} box list if [ "$(${VAGRANT_CMD} box list | grep -c -E '^infernix/tinycore\s')" -eq 0 ] then ${VAGRANT_CMD} box add infernix/tinycore fi NEW_PATH="${VAGRANT_HOME}/boxes/infernix-VAGRANTSLASH-tinycore-two-disks" cp -r "${VAGRANT_HOME}/boxes/infernix-VAGRANTSLASH-tinycore" "${NEW_PATH}" BOX_VERSION="$(${VAGRANT_CMD} box list --machine-readable | grep -A 10 infernix/tinycore-two-disks | grep box-version | head -n 1 | cut -d, -f4)" qemu-img create -f qcow2 "${NEW_PATH}/${BOX_VERSION}/libvirt/disk2.qcow2" 10G cat > "${NEW_PATH}/${BOX_VERSION}/libvirt/metadata.json" <" echo "Hostname should be in format vagrant-[os-name], e.g. vagrant-redhat63." exit 1 fi # On which version of Red Hat are we running? RHEL_MAJOR_VERSION=$(sed 's/.*release \([0-9]\)\..*/\1/' /etc/redhat-release) if [ $? -ne 0 ]; then echo "Is this a Red Hat distro?" exit 1 fi echo "* Found Red Hat ${RHEL_MAJOR_VERSION} version." # Setup hostname vagrant-something. FQDN="$1.vagrantup.com" if grep '^HOSTNAME=' /etc/sysconfig/network > /dev/null; then sed -i 's/HOSTNAME=\(.*\)/HOSTNAME='${FQDN}'/' /etc/sysconfig/network else echo "HOSTNAME=${FQDN}" >> /etc/sysconfig/network fi # Enable EPEL repository. yum -y install wget cd ~root if [ $RHEL_MAJOR_VERSION -eq 5 ]; then wget http://ftp.astral.ro/mirrors/fedora/pub/epel/5/i386/epel-release-5-4.noarch.rpm EPEL_PKG="epel-release-5-4.noarch.rpm" else wget http://ftp.astral.ro/mirrors/fedora/pub/epel/6/i386/epel-release-6-8.noarch.rpm EPEL_PKG="epel-release-6-8.noarch.rpm" fi rpm -i ~root/${EPEL_PKG} rm -f ~root/${EPEL_PKG} # Install some required software. yum -y install openssh-server openssh-clients sudo \ ruby ruby-devel make gcc rubygems rsync chkconfig sshd on # Users, groups, passwords and sudoers. echo 'vagrant' | passwd --stdin root grep 'vagrant' /etc/passwd > /dev/null if [ $? -ne 0 ]; then echo '* Creating user vagrant.' useradd vagrant echo 'vagrant' | passwd --stdin vagrant fi grep '^admin:' /etc/group > /dev/null || groupadd admin usermod -G admin vagrant echo 'Defaults env_keep += "SSH_AUTH_SOCK"' >> /etc/sudoers echo '%admin ALL=NOPASSWD: ALL' >> /etc/sudoers sed -i 's/Defaults\s*requiretty/Defaults !requiretty/' /etc/sudoers # SSH setup # Add Vagrant ssh key for root and vagrant accouts. sed -i 's/.*UseDNS.*/UseDNS no/' /etc/ssh/sshd_config [ -d ~root/.ssh ] || mkdir ~root/.ssh chmod 700 ~root/.ssh cat > ~root/.ssh/authorized_keys << EOF ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key EOF chmod 600 ~root/.ssh/authorized_keys [ -d ~vagrant/.ssh ] || mkdir ~vagrant/.ssh chmod 700 ~vagrant/.ssh cat > ~vagrant/.ssh/authorized_keys << EOF ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key EOF chmod 600 ~vagrant/.ssh/authorized_keys # Disable firewall and switch SELinux to permissive mode. chkconfig iptables off chkconfig ip6tables off # Networking setup.. # Don't fix ethX names to hw address. rm -f /etc/udev/rules.d/*persistent-net.rules rm -f /etc/udev/rules.d/*-net.rules rm -fr /var/lib/dhclient/* # Interface eth0 should get IP address via dhcp. cat > /etc/sysconfig/network-scripts/ifcfg-eth0 << EOF DEVICE="eth0" BOOTPROTO="dhcp" ONBOOT="yes" NM_CONTROLLED="no" EOF # Do some cleanup.. rm -f ~root/.bash_history rm -r "$(gem env gemdir)"/doc/* yum clean all halt vagrant-libvirt-0.7.0/vagrant-libvirt.gemspec000066400000000000000000000030161414232526500213150ustar00rootroot00000000000000# -*- encoding: utf-8 -*- # frozen_string_literal: true require File.expand_path('../lib/vagrant-libvirt/version', __FILE__) Gem::Specification.new do |s| s.authors = ['Lukas Stanek','Dima Vasilets','Brian Pitts','Darragh Bailey'] s.email = ['ls@elostech.cz','pronix.service@gmail.com','brian@polibyte.com','daragh.bailey@gmail.com'] s.license = 'MIT' s.description = %q{libvirt provider for Vagrant.} s.summary = %q{libvirt provider for Vagrant.} s.homepage = VagrantPlugins::ProviderLibvirt::HOMEPAGE s.metadata = { "source_code_uri" => VagrantPlugins::ProviderLibvirt::HOMEPAGE, } s.files = Dir.glob("{lib,locales}/**/*") + %w(LICENSE README.md) s.executables = Dir.glob("bin/*.*").map{ |f| File.basename(f) } s.test_files = Dir.glob("{test,spec,features}/**/*.*") s.name = 'vagrant-libvirt' s.require_paths = ['lib'] s.version = VagrantPlugins::ProviderLibvirt.get_version s.add_development_dependency "rspec-core", ">= 3.5" s.add_development_dependency "rspec-expectations", ">= 3.5" s.add_development_dependency "rspec-mocks", ">= 3.5" s.add_development_dependency "simplecov" s.add_development_dependency "simplecov-lcov" s.add_runtime_dependency 'fog-libvirt', '>= 0.6.0' s.add_runtime_dependency 'fog-core', '~> 2.1' s.add_runtime_dependency 'rexml' # Make sure to allow use of the same version as Vagrant by being less specific s.add_runtime_dependency 'nokogiri', '~> 1.6' s.add_development_dependency 'rake' end