Commit c656a01b authored by Chris Bednarski's avatar Chris Bednarski

Merge pull request #2490 from mitchellh/docs-revamp

Reformat docs using htmlbeautifier and pandoc
parents 40f2173c e0be4efe
...@@ -3,3 +3,5 @@ source "https://rubygems.org" ...@@ -3,3 +3,5 @@ source "https://rubygems.org"
ruby "2.2.2" ruby "2.2.2"
gem "middleman-hashicorp", github: "hashicorp/middleman-hashicorp" gem "middleman-hashicorp", github: "hashicorp/middleman-hashicorp"
gem "middleman-breadcrumbs"
gem "htmlbeautifier"
\ No newline at end of file
...@@ -69,6 +69,7 @@ GEM ...@@ -69,6 +69,7 @@ GEM
hitimes (1.2.2) hitimes (1.2.2)
hooks (0.4.0) hooks (0.4.0)
uber (~> 0.0.4) uber (~> 0.0.4)
htmlbeautifier (1.1.0)
htmlcompressor (0.2.0) htmlcompressor (0.2.0)
http_parser.rb (0.6.0) http_parser.rb (0.6.0)
i18n (0.7.0) i18n (0.7.0)
...@@ -92,6 +93,8 @@ GEM ...@@ -92,6 +93,8 @@ GEM
middleman-sprockets (>= 3.1.2) middleman-sprockets (>= 3.1.2)
sass (>= 3.4.0, < 4.0) sass (>= 3.4.0, < 4.0)
uglifier (~> 2.5) uglifier (~> 2.5)
middleman-breadcrumbs (0.1.0)
middleman (>= 3.3.5)
middleman-core (3.3.12) middleman-core (3.3.12)
activesupport (~> 4.1.0) activesupport (~> 4.1.0)
bundler (~> 1.1) bundler (~> 1.1)
...@@ -179,4 +182,6 @@ PLATFORMS ...@@ -179,4 +182,6 @@ PLATFORMS
ruby ruby
DEPENDENCIES DEPENDENCIES
htmlbeautifier
middleman-breadcrumbs
middleman-hashicorp! middleman-hashicorp!
...@@ -8,3 +8,10 @@ dev: init ...@@ -8,3 +8,10 @@ dev: init
build: init build: init
PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman build PACKER_DISABLE_DOWNLOAD_FETCH=true PACKER_VERSION=1.0 bundle exec middleman build
format:
bundle exec htmlbeautifier -t 2 source/*.erb
bundle exec htmlbeautifier -t 2 source/layouts/*.erb
@pandoc -v > /dev/null || echo "pandoc must be installed in order to format markdown content"
pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "pandoc -r markdown -w markdown --tab-stop=4 --atx-headers -s --columns=80 {} > {}.new"\; || true
pandoc -v > /dev/null && find . -iname "*.html.markdown" | xargs -I{} bash -c "mv {}.new {}"\; || true
...@@ -21,3 +21,13 @@ make dev ...@@ -21,3 +21,13 @@ make dev
Then open up `localhost:4567`. Note that some URLs you may need to append Then open up `localhost:4567`. Note that some URLs you may need to append
".html" to make them work (in the navigation and such). ".html" to make them work (in the navigation and such).
## Keeping Tidy
To keep the source code nicely formatted, there is a `make format` target. This
runs `htmlbeautify` and `pandoc` to reformat the source code so it's nicely formatted.
make format
Note that you will need to install pandoc yourself. `make format` will skip it
if you don't have it installed.
\ No newline at end of file
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
set :base_url, "https://www.packer.io/" set :base_url, "https://www.packer.io/"
activate :breadcrumbs
activate :hashicorp do |h| activate :hashicorp do |h|
h.version = ENV["PACKER_VERSION"] h.version = ENV["PACKER_VERSION"]
h.bintray_enabled = ENV["BINTRAY_ENABLED"] h.bintray_enabled = ENV["BINTRAY_ENABLED"]
......
...@@ -12,45 +12,45 @@ footer { ...@@ -12,45 +12,45 @@ footer {
margin-left: -20px; margin-left: -20px;
} }
ul { ul {
margin-top: 40px; margin-top: 40px;
@include respond-to(mobile) { @include respond-to(mobile) {
margin-left: $baseline; margin-left: $baseline;
margin-top: $baseline; margin-top: $baseline;
} }
li { li {
display: inline; display: inline;
margin-right: 50px; margin-right: 50px;
@include respond-to(mobile) { @include respond-to(mobile) {
margin-right: 20px; margin-right: 20px;
display: list-item; display: list-item;
} }
} }
.hashi-logo { .hashi-logo {
background: image-url('logo_footer.png') no-repeat center top; background: image-url('logo_footer.png') no-repeat center top;
height: 40px; height: 40px;
width: 40px; width: 40px;
background-size: 37px 40px; background-size: 37px 40px;
text-indent: -999999px; text-indent: -999999px;
display: inline-block; display: inline-block;
margin-top: -10px; margin-top: -10px;
margin-right: 0; margin-right: 0;
@include respond-to(mobile) { @include respond-to(mobile) {
margin-top: -50px; margin-top: -50px;
margin-right: $baseline; margin-right: $baseline;
} }
} }
} }
.active { .active {
color: $green; color: $green;
} }
button { button {
margin-top: 20px; margin-top: 20px;
} }
} }
.page-wrap { .page-wrap {
......
...@@ -70,17 +70,17 @@ $mono: 'Inconsolata', 'courier new', courier, mono-space; ...@@ -70,17 +70,17 @@ $mono: 'Inconsolata', 'courier new', courier, mono-space;
background-color: #000; background-color: #000;
color: $white; color: $white;
a { a {
color: inherit; color: inherit;
&:hover { &:hover {
color: $green; color: $green;
} }
&:active { &:active {
color: darken($green, 30%); color: darken($green, 30%);
} }
} }
} }
.white-background { .white-background {
...@@ -102,9 +102,9 @@ $mono: 'Inconsolata', 'courier new', courier, mono-space; ...@@ -102,9 +102,9 @@ $mono: 'Inconsolata', 'courier new', courier, mono-space;
color: $orange; color: $orange;
font-size: 20px; font-size: 20px;
a:hover, a:active, a:visited { a:hover, a:active, a:visited {
color: inherit; color: inherit;
} }
} }
// media queries // media queries
...@@ -170,13 +170,13 @@ $break-lg: 980px; ...@@ -170,13 +170,13 @@ $break-lg: 980px;
@mixin transform-scale($value) { @mixin transform-scale($value) {
-webkit-transform: scale($value); -webkit-transform: scale($value);
-moz-transform: scale($value); -moz-transform: scale($value);
transform: scale($value); transform: scale($value);
} }
@mixin transition($type, $speed, $easing) { @mixin transition($type, $speed, $easing) {
-webkit-transition: $type $speed $easing; -webkit-transition: $type $speed $easing;
-moz-transition: $type $speed $easing; -moz-transition: $type $speed $easing;
-o-transition: $type $speed $easing; -o-transition: $type $speed $easing;
transition: $type $speed $easing; transition: $type $speed $easing;
} }
......
...@@ -14,10 +14,10 @@ form, input, textarea, button { ...@@ -14,10 +14,10 @@ form, input, textarea, button {
line-height: 1.0; line-height: 1.0;
color: inherit; color: inherit;
&:focus { &:focus {
line-height: 1.0; line-height: 1.0;
box-shadow: none !important; box-shadow: none !important;
outline: none; outline: none;
vertical-align: middle; vertical-align: middle;
} }
} }
--- ---
layout: "community" description: |
page_title: "Community" Packer is a new project with a growing community. Despite this, there are
description: |- dedicated users willing to help through various mediums.
Packer is a new project with a growing community. Despite this, there are dedicated users willing to help through various mediums. layout: community
--- page_title: Community
...
# Community # Community
Packer is a new project with a growing community. Despite this, there are Packer is a new project with a growing community. Despite this, there are
dedicated users willing to help through various mediums. dedicated users willing to help through various mediums.
**IRC:**&nbsp;`#packer-tool` on Freenode. **IRC:** `#packer-tool` on Freenode.
**Mailing List:**&nbsp;[Packer Google Group](http://groups.google.com/group/packer-tool) **Mailing List:** [Packer Google
Group](http://groups.google.com/group/packer-tool)
**Bug Tracker:**&nbsp;[Issue tracker on GitHub](https://github.com/mitchellh/packer/issues). **Bug Tracker:** [Issue tracker on
Please only use this for reporting bugs. Do not ask for general help here. Use IRC GitHub](https://github.com/mitchellh/packer/issues). Please only use this for
or the mailing list for that. reporting bugs. Do not ask for general help here. Use IRC or the mailing list
for that.
## People ## People
...@@ -25,62 +28,82 @@ to Packer in some core way. Over time, faces may appear and disappear from this ...@@ -25,62 +28,82 @@ to Packer in some core way. Over time, faces may appear and disappear from this
list as contributors come and go. list as contributors come and go.
<div class="people"> <div class="people">
<div class="person">
<img class="pull-left" src="http://www.gravatar.com/avatar/54079122b67de9677c1f93933ce8b63a.png?s=125"> <div class="person">
<div class="bio">
<h3>Mitchell Hashimoto (<a href="https://github.com/mitchellh">@mitchellh</a>)</h3> <img class="pull-left" src="http://www.gravatar.com/avatar/54079122b67de9677c1f93933ce8b63a.png?s=125">
<p> <div class="bio">
Mitchell Hashimoto is the creator of Packer. He developed the <h3>Mitchell Hashimoto (<a href="https://github.com/mitchellh">@mitchellh</a>)</h3>
core of Packer as well as the Amazon, VirtualBox, and VMware <p>
builders. In addition to Packer, Mitchell is the creator of Mitchell Hashimoto is the creator of Packer. He developed the
<a href="http://www.vagrantup.com">Vagrant</a>. He is self core of Packer as well as the Amazon, VirtualBox, and VMware
described as "automation obsessed." builders. In addition to Packer, Mitchell is the creator of
</p> <a href="http://www.vagrantup.com">Vagrant</a>. He is self
</div> described as "automation obsessed."
</div> </p>
</div>
<div class="person">
<img class="pull-left" src="http://www.gravatar.com/avatar/2acc31dd6370a54b18f6755cd0710ce6.png?s=125"> </div>
<div class="bio">
<h3>Jack Pearkes (<a href="https://github.com/pearkes">@pearkes</a>)</h3> <div class="person">
<p>
<a href="http://jack.ly/">Jack Pearkes</a> created and maintains the DigitalOcean builder <img class="pull-left" src="http://www.gravatar.com/avatar/2acc31dd6370a54b18f6755cd0710ce6.png?s=125">
for Packer. Outside of Packer, Jack is an avid open source <div class="bio">
contributor and software consultant.</p> <h3>Jack Pearkes (<a href="https://github.com/pearkes">@pearkes</a>)</h3>
</div> <p>
</div> <a href="http://jack.ly/">Jack Pearkes</a> created and maintains the DigitalOcean builder
for Packer. Outside of Packer, Jack is an avid open source
<div class="person"> contributor and software consultant.</p>
<img class="pull-left" src="http://www.gravatar.com/avatar/2f7fc9cb7558e3ea48f5a86fa90a78da.png?s=125"> </div>
<div class="bio">
<h3>Mark Peek (<a href="https://github.com/markpeek">@markpeek</a>)</h3> </div>
<p>
In addition to Packer, Mark Peek helps maintain <div class="person">
various open source projects such as
<a href="https://github.com/cloudtools">cloudtools</a> and <img class="pull-left" src="http://www.gravatar.com/avatar/2f7fc9cb7558e3ea48f5a86fa90a78da.png?s=125">
<a href="https://github.com/ironport">IronPort Python libraries</a>. <div class="bio">
Mark is also a <a href="https://FreeBSD.org">FreeBSD committer</a>.</p> <h3>Mark Peek (<a href="https://github.com/markpeek">@markpeek</a>)</h3>
</div> <p>
</div> In addition to Packer, Mark Peek helps maintain
various open source projects such as
<div class="person"> <a href="https://github.com/cloudtools">cloudtools</a> and
<img class="pull-left" src="http://www.gravatar.com/avatar/1fca64df3d7db1e2f258a8956d2b0aff.png?s=125"> <a href="https://github.com/ironport">IronPort Python libraries</a>.
<div class="bio"> Mark is also a <a href="https://FreeBSD.org">FreeBSD committer</a>.</p>
<h3>Ross Smith II (<a href="https://github.com/rasa" target="_blank">@rasa</a>)</h3> </div>
<p>
<a href="http://smithii.com/" target="_blank">Ross Smith</a> maintains our VMware builder on Windows, and provides other valuable assistance. </div>
Ross is an open source enthusiast, published author, and freelance consultant.</p>
</div> <div class="person">
</div>
<img class="pull-left" src="http://www.gravatar.com/avatar/1fca64df3d7db1e2f258a8956d2b0aff.png?s=125">
<div class="person"> <div class="bio">
<img class="pull-left" src="http://www.gravatar.com/avatar/c9f6bf7b5b865012be5eded656ebed7d.png?s=125"> <h3>Ross Smith II (<a href="https://github.com/rasa" target="_blank">@rasa</a>)</h3>
<div class="bio"> <p>
<h3>Rickard von Essen<br/>(<a href="https://github.com/rickard-von-essen" target="_blank">@rickard-von-essen</a>)</h3>
<p> <a href="http://smithii.com/" target="_blank">Ross Smith</a> maintains our
Rickard von Essen maintains our Parallels Desktop builder. Rickard is an polyglot programmer and consults on Continuous Delivery.</p> VMware builder on Windows, and provides other valuable assistance. Ross is an
</div> open source enthusiast, published author, and freelance consultant.
</div> </p>
</div>
<div class="clearfix"></div>
</div>
<div class="person">
<img class="pull-left" src="http://www.gravatar.com/avatar/c9f6bf7b5b865012be5eded656ebed7d.png?s=125">
<div class="bio">
<h3>Rickard von Essen<br/>(<a href="https://github.com/rickard-von-essen" target="_blank">@rickard-von-essen</a>)</h3>
<p>
Rickard von Essen maintains our Parallels Desktop builder. Rickard is an
polyglot programmer and consults on Continuous Delivery.
</p>
</div>
</div>
<div class="clearfix">
</div>
</div> </div>
--- ---
layout: "docs" description: |
page_title: "Packer Terminology" There are a handful of terms used throughout the Packer documentation where the
description: |- meaning may not be immediately obvious if you haven't used Packer before.
There are a handful of terms used throughout the Packer documentation where the meaning may not be immediately obvious if you haven't used Packer before. Luckily, there are relatively few. This page documents all the terminology required to understand and use Packer. The terminology is in alphabetical order for easy referencing. Luckily, there are relatively few. This page documents all the terminology
--- required to understand and use Packer. The terminology is in alphabetical order
for easy referencing.
layout: docs
page_title: Packer Terminology
...
# Packer Terminology # Packer Terminology
There are a handful of terms used throughout the Packer documentation where There are a handful of terms used throughout the Packer documentation where the
the meaning may not be immediately obvious if you haven't used Packer before. meaning may not be immediately obvious if you haven't used Packer before.
Luckily, there are relatively few. This page documents all the terminology Luckily, there are relatively few. This page documents all the terminology
required to understand and use Packer. The terminology is in alphabetical required to understand and use Packer. The terminology is in alphabetical order
order for easy referencing. for easy referencing.
- `Artifacts` are the results of a single build, and are usually a set of IDs - `Artifacts` are the results of a single build, and are usually a set of IDs
or files to represent a machine image. Every builder produces a single or files to represent a machine image. Every builder produces a
artifact. As an example, in the case of the Amazon EC2 builder, the artifact is single artifact. As an example, in the case of the Amazon EC2 builder, the
a set of AMI IDs (one per region). For the VMware builder, the artifact is a artifact is a set of AMI IDs (one per region). For the VMware builder, the
directory of files comprising the created virtual machine. artifact is a directory of files comprising the created virtual machine.
- `Builds` are a single task that eventually produces an image for a single - `Builds` are a single task that eventually produces an image for a
platform. Multiple builds run in parallel. Example usage in a single platform. Multiple builds run in parallel. Example usage in a
sentence: "The Packer build produced an AMI to run our web application." sentence: "The Packer build produced an AMI to run our web application." Or:
Or: "Packer is running the builds now for VMware, AWS, and VirtualBox." "Packer is running the builds now for VMware, AWS, and VirtualBox."
- `Builders` are components of Packer that are able to create a machine - `Builders` are components of Packer that are able to create a machine image
image for a single platform. Builders read in some configuration and use for a single platform. Builders read in some configuration and use that to
that to run and generate a machine image. A builder is invoked as part of a run and generate a machine image. A builder is invoked as part of a build in
build in order to create the actual resulting images. Example builders include order to create the actual resulting images. Example builders include
VirtualBox, VMware, and Amazon EC2. Builders can be created and added to VirtualBox, VMware, and Amazon EC2. Builders can be created and added to
Packer in the form of plugins. Packer in the form of plugins.
- `Commands` are sub-commands for the `packer` program that perform some - `Commands` are sub-commands for the `packer` program that perform some job.
job. An example command is "build", which is invoked as `packer build`. An example command is "build", which is invoked as `packer build`. Packer
Packer ships with a set of commands out of the box in order to define ships with a set of commands out of the box in order to define its
its command-line interface. Commands can also be created and added to command-line interface. Commands can also be created and added to Packer in
Packer in the form of plugins. the form of plugins.
- `Post-processors` are components of Packer that take the result of - `Post-processors` are components of Packer that take the result of a builder
a builder or another post-processor and process that to or another post-processor and process that to create a new artifact.
create a new artifact. Examples of post-processors are Examples of post-processors are compress to compress artifacts, upload to
compress to compress artifacts, upload to upload artifacts, etc. upload artifacts, etc.
- `Provisioners` are components of Packer that install and configure - `Provisioners` are components of Packer that install and configure software
software within a running machine prior to that machine being turned within a running machine prior to that machine being turned into a
into a static image. They perform the major work of making the image contain static image. They perform the major work of making the image contain
useful software. Example provisioners include shell scripts, Chef, Puppet, useful software. Example provisioners include shell scripts, Chef,
etc. Puppet, etc.
- `Templates` are JSON files which define one or more builds - `Templates` are JSON files which define one or more builds by configuring
by configuring the various components of Packer. Packer is able to read a the various components of Packer. Packer is able to read a template and use
template and use that information to create multiple machine images in that information to create multiple machine images in parallel.
parallel.
--- ---
layout: "docs" description: |
page_title: "Amazon AMI Builder (chroot)" The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by an
description: |- EBS volume as the root device. For more information on the difference between
The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by an EBS volume as the root device. For more information on the difference between instance storage and EBS-backed instances, storage for the root device section in the EC2 documentation. instance storage and EBS-backed instances, storage for the root device section
--- in the EC2 documentation.
layout: docs
page_title: 'Amazon AMI Builder (chroot)'
...
# AMI Builder (chroot) # AMI Builder (chroot)
Type: `amazon-chroot` Type: `amazon-chroot`
The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by The `amazon-chroot` Packer builder is able to create Amazon AMIs backed by an
an EBS volume as the root device. For more information on the difference EBS volume as the root device. For more information on the difference between
between instance storage and EBS-backed instances, see the instance storage and EBS-backed instances, see the ["storage for the root
["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). device" section in the EC2
documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device).
The difference between this builder and the `amazon-ebs` builder is that The difference between this builder and the `amazon-ebs` builder is that this
this builder is able to build an EBS-backed AMI without launching a new builder is able to build an EBS-backed AMI without launching a new EC2 instance.
EC2 instance. This can dramatically speed up AMI builds for organizations This can dramatically speed up AMI builds for organizations who need the extra
who need the extra fast build. fast build.
~> **This is an advanced builder** If you're just getting \~&gt; **This is an advanced builder** If you're just getting started with
started with Packer, we recommend starting with the Packer, we recommend starting with the [amazon-ebs
[amazon-ebs builder](/docs/builders/amazon-ebs.html), which is builder](/docs/builders/amazon-ebs.html), which is much easier to use.
much easier to use.
The builder does _not_ manage AMIs. Once it creates an AMI and stores it The builder does *not* manage AMIs. Once it creates an AMI and stores it in your
in your account, it is up to you to use, delete, etc. the AMI. account, it is up to you to use, delete, etc. the AMI.
## How Does it Work? ## How Does it Work?
This builder works by creating a new EBS volume from an existing source AMI This builder works by creating a new EBS volume from an existing source AMI and
and attaching it into an already-running EC2 instance. Once attached, a attaching it into an already-running EC2 instance. Once attached, a
[chroot](http://en.wikipedia.org/wiki/Chroot) is used to provision the [chroot](http://en.wikipedia.org/wiki/Chroot) is used to provision the system
system within that volume. After provisioning, the volume is detached, within that volume. After provisioning, the volume is detached, snapshotted, and
snapshotted, and an AMI is made. an AMI is made.
Using this process, minutes can be shaved off the AMI creation process Using this process, minutes can be shaved off the AMI creation process because a
because a new EC2 instance doesn't need to be launched. new EC2 instance doesn't need to be launched.
There are some restrictions, however. The host EC2 instance where the There are some restrictions, however. The host EC2 instance where the volume is
volume is attached to must be a similar system (generally the same OS attached to must be a similar system (generally the same OS version, kernel
version, kernel versions, etc.) as the AMI being built. Additionally, versions, etc.) as the AMI being built. Additionally, this process is much more
this process is much more expensive because the EC2 instance must be kept expensive because the EC2 instance must be kept running persistently in order to
running persistently in order to build AMIs, whereas the other AMI builders build AMIs, whereas the other AMI builders start instances on-demand to build
start instances on-demand to build AMIs as needed. AMIs as needed.
## Configuration Reference ## Configuration Reference
...@@ -52,98 +55,101 @@ segmented below into two categories: required and optional parameters. Within ...@@ -52,98 +55,101 @@ segmented below into two categories: required and optional parameters. Within
each category, the available configuration keys are alphabetized. each category, the available configuration keys are alphabetized.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) - `access_key` (string) - The access key used to communicate with AWS. [Learn
how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
* `ami_name` (string) - The name of the resulting AMI that will appear - `ami_name` (string) - The name of the resulting AMI that will appear when
when managing AMIs in the AWS console or via APIs. This must be unique. managing AMIs in the AWS console or via APIs. This must be unique. To help
To help make this unique, use a function like `timestamp` (see make this unique, use a function like `timestamp` (see [configuration
[configuration templates](/docs/templates/configuration-templates.html) for more info) templates](/docs/templates/configuration-templates.html) for more info)
* `secret_key` (string) - The secret key used to communicate with AWS. - `secret_key` (string) - The secret key used to communicate with AWS. [Learn
[Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
* `source_ami` (string) - The source AMI whose root volume will be copied - `source_ami` (string) - The source AMI whose root volume will be copied and
and provisioned on the currently running instance. This must be an provisioned on the currently running instance. This must be an EBS-backed
EBS-backed AMI with a root volume snapshot that you have access to. AMI with a root volume snapshot that you have access to.
### Optional: ### Optional:
* `ami_description` (string) - The description to set for the resulting - `ami_description` (string) - The description to set for the
AMI(s). By default this description is empty. resulting AMI(s). By default this description is empty.
* `ami_groups` (array of strings) - A list of groups that have access - `ami_groups` (array of strings) - A list of groups that have access to
to launch the resulting AMI(s). By default no groups have permission launch the resulting AMI(s). By default no groups have permission to launch
to launch the AMI. `all` will make the AMI publicly accessible. the AMI. `all` will make the AMI publicly accessible.
* `ami_product_codes` (array of strings) - A list of product codes to - `ami_product_codes` (array of strings) - A list of product codes to
associate with the AMI. By default no product codes are associated with associate with the AMI. By default no product codes are associated with
the AMI. the AMI.
* `ami_regions` (array of strings) - A list of regions to copy the AMI to. - `ami_regions` (array of strings) - A list of regions to copy the AMI to.
Tags and attributes are copied along with the AMI. AMI copying takes time Tags and attributes are copied along with the AMI. AMI copying takes time
depending on the size of the AMI, but will generally take many minutes. depending on the size of the AMI, but will generally take many minutes.
* `ami_users` (array of strings) - A list of account IDs that have access - `ami_users` (array of strings) - A list of account IDs that have access to
to launch the resulting AMI(s). By default no additional users other than the user launch the resulting AMI(s). By default no additional users other than the
creating the AMI has permissions to launch it. user creating the AMI has permissions to launch it.
* `ami_virtualization_type` (string) - The type of virtualization for the AMI - `ami_virtualization_type` (string) - The type of virtualization for the AMI
you are building. This option is required to register HVM images. Can be you are building. This option is required to register HVM images. Can be
"paravirtual" (default) or "hvm". "paravirtual" (default) or "hvm".
* `chroot_mounts` (array of array of strings) - This is a list of additional - `chroot_mounts` (array of array of strings) - This is a list of additional
devices to mount into the chroot environment. This configuration parameter devices to mount into the chroot environment. This configuration parameter
requires some additional documentation which is in the "Chroot Mounts" section requires some additional documentation which is in the "Chroot Mounts"
below. Please read that section for more information on how to use this. section below. Please read that section for more information on how to
use this.
* `command_wrapper` (string) - How to run shell commands. This - `command_wrapper` (string) - How to run shell commands. This defaults
defaults to "{{.Command}}". This may be useful to set if you want to set to "{{.Command}}". This may be useful to set if you want to set
environmental variables or perhaps run it with `sudo` or so on. This is a environmental variables or perhaps run it with `sudo` or so on. This is a
configuration template where the `.Command` variable is replaced with the configuration template where the `.Command` variable is replaced with the
command to be run. command to be run.
* `copy_files` (array of strings) - Paths to files on the running EC2 instance - `copy_files` (array of strings) - Paths to files on the running EC2 instance
that will be copied into the chroot environment prior to provisioning. that will be copied into the chroot environment prior to provisioning. This
This is useful, for example, to copy `/etc/resolv.conf` so that DNS lookups is useful, for example, to copy `/etc/resolv.conf` so that DNS lookups work.
work.
* `device_path` (string) - The path to the device where the root volume - `device_path` (string) - The path to the device where the root volume of the
of the source AMI will be attached. This defaults to "" (empty string), source AMI will be attached. This defaults to "" (empty string), which
which forces Packer to find an open device automatically. forces Packer to find an open device automatically.
* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on - `enhanced_networking` (boolean) - Enable enhanced
HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. networking (SriovNetSupport) on HVM-compatible AMIs. If true, add
`ec2:ModifyInstanceAttribute` to your AWS IAM policy.
* `force_deregister` (boolean) - Force Packer to first deregister an existing - `force_deregister` (boolean) - Force Packer to first deregister an existing
AMI if one with the same name already exists. Default `false`. AMI if one with the same name already exists. Default `false`.
* `mount_path` (string) - The path where the volume will be mounted. This is - `mount_path` (string) - The path where the volume will be mounted. This is
where the chroot environment will be. This defaults to where the chroot environment will be. This defaults to
`packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration `packer-amazon-chroot-volumes/{{.Device}}`. This is a configuration template
template where the `.Device` variable is replaced with the name of the where the `.Device` variable is replaced with the name of the device where
device where the volume is attached. the volume is attached.
* `mount_options` (array of strings) - Options to supply the `mount` command - `mount_options` (array of strings) - Options to supply the `mount` command
when mounting devices. Each option will be prefixed with `-o ` and supplied to when mounting devices. Each option will be prefixed with `-o` and supplied
the `mount` command ran by Packer. Because this command is ran in a shell, user to the `mount` command ran by Packer. Because this command is ran in a
discrestion is advised. See [this manual page for the mount command][1] for valid shell, user discrestion is advised. See [this manual page for the mount
file system specific options command](http://linuxcommand.org/man_pages/mount8.html) for valid file
system specific options
* `root_volume_size` (integer) - The size of the root volume for the chroot - `root_volume_size` (integer) - The size of the root volume for the chroot
environment, and the resulting AMI environment, and the resulting AMI
* `tags` (object of key/value strings) - Tags applied to the AMI. - `tags` (object of key/value strings) - Tags applied to the AMI.
## Basic Example ## Basic Example
Here is a basic example. It is completely valid except for the access keys: Here is a basic example. It is completely valid except for the access keys:
```javascript ``` {.javascript}
{ {
"type": "amazon-chroot", "type": "amazon-chroot",
"access_key": "YOUR KEY HERE", "access_key": "YOUR KEY HERE",
...@@ -155,21 +161,21 @@ Here is a basic example. It is completely valid except for the access keys: ...@@ -155,21 +161,21 @@ Here is a basic example. It is completely valid except for the access keys:
## Chroot Mounts ## Chroot Mounts
The `chroot_mounts` configuration can be used to mount additional devices The `chroot_mounts` configuration can be used to mount additional devices within
within the chroot. By default, the following additional mounts are added the chroot. By default, the following additional mounts are added into the
into the chroot by Packer: chroot by Packer:
* `/proc` (proc) - `/proc` (proc)
* `/sys` (sysfs) - `/sys` (sysfs)
* `/dev` (bind to real `/dev`) - `/dev` (bind to real `/dev`)
* `/dev/pts` (devpts) - `/dev/pts` (devpts)
* `/proc/sys/fs/binfmt_misc` (binfmt_misc) - `/proc/sys/fs/binfmt_misc` (binfmt\_misc)
These default mounts are usually good enough for anyone and are sane These default mounts are usually good enough for anyone and are sane defaults.
defaults. However, if you want to change or add the mount points, you may However, if you want to change or add the mount points, you may using the
using the `chroot_mounts` configuration. Here is an example configuration: `chroot_mounts` configuration. Here is an example configuration:
```javascript ``` {.javascript}
{ {
"chroot_mounts": [ "chroot_mounts": [
["proc", "proc", "/proc"], ["proc", "proc", "/proc"],
...@@ -178,25 +184,25 @@ using the `chroot_mounts` configuration. Here is an example configuration: ...@@ -178,25 +184,25 @@ using the `chroot_mounts` configuration. Here is an example configuration:
} }
``` ```
`chroot_mounts` is a list of a 3-tuples of strings. The three components `chroot_mounts` is a list of a 3-tuples of strings. The three components of the
of the 3-tuple, in order, are: 3-tuple, in order, are:
* The filesystem type. If this is "bind", then Packer will properly bind - The filesystem type. If this is "bind", then Packer will properly bind the
the filesystem to another mount point. filesystem to another mount point.
* The source device. - The source device.
* The mount directory. - The mount directory.
## Parallelism ## Parallelism
A quick note on parallelism: it is perfectly safe to run multiple A quick note on parallelism: it is perfectly safe to run multiple *separate*
_separate_ Packer processes with the `amazon-chroot` builder on the same Packer processes with the `amazon-chroot` builder on the same EC2 instance. In
EC2 instance. In fact, this is recommended as a way to push the most performance fact, this is recommended as a way to push the most performance out of your AMI
out of your AMI builds. builds.
Packer properly obtains a process lock for the parallelism-sensitive parts Packer properly obtains a process lock for the parallelism-sensitive parts of
of its internals such as finding an available device. its internals such as finding an available device.
## Gotchas ## Gotchas
...@@ -204,10 +210,12 @@ One of the difficulties with using the chroot builder is that your provisioning ...@@ -204,10 +210,12 @@ One of the difficulties with using the chroot builder is that your provisioning
scripts must not leave any processes running or packer will be unable to unmount scripts must not leave any processes running or packer will be unable to unmount
the filesystem. the filesystem.
For debian based distributions you can setup a [policy-rc.d](http://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt) file which will For debian based distributions you can setup a
prevent packages installed by your provisioners from starting services: [policy-rc.d](http://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt)
file which will prevent packages installed by your provisioners from starting
services:
```javascript ``` {.javascript}
{ {
"type": "shell", "type": "shell",
"inline": [ "inline": [
...@@ -226,6 +234,3 @@ prevent packages installed by your provisioners from starting services: ...@@ -226,6 +234,3 @@ prevent packages installed by your provisioners from starting services:
] ]
} }
``` ```
[1]: http://linuxcommand.org/man_pages/mount8.html
--- ---
layout: "docs" description: |
page_title: "Amazon AMI Builder (EBS backed)" The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS
description: |- volumes for use in EC2. For more information on the difference between
The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS volumes for use in EC2. For more information on the difference between EBS-backed instances and instance-store backed instances, see the storage for the root device section in the EC2 documentation. EBS-backed instances and instance-store backed instances, see the storage for
--- the root device section in the EC2 documentation.
layout: docs
page_title: 'Amazon AMI Builder (EBS backed)'
...
# AMI Builder (EBS backed) # AMI Builder (EBS backed)
Type: `amazon-ebs` Type: `amazon-ebs`
The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS The `amazon-ebs` Packer builder is able to create Amazon AMIs backed by EBS
volumes for use in [EC2](http://aws.amazon.com/ec2/). For more information volumes for use in [EC2](http://aws.amazon.com/ec2/). For more information on
on the difference between EBS-backed instances and instance-store backed the difference between EBS-backed instances and instance-store backed instances,
instances, see the see the ["storage for the root device" section in the EC2
["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device).
This builder builds an AMI by launching an EC2 instance from a source AMI, This builder builds an AMI by launching an EC2 instance from a source AMI,
provisioning that running machine, and then creating an AMI from that machine. provisioning that running machine, and then creating an AMI from that machine.
This is all done in your own AWS account. The builder will create temporary This is all done in your own AWS account. The builder will create temporary
keypairs, security group rules, etc. that provide it temporary access to keypairs, security group rules, etc. that provide it temporary access to the
the instance while the image is being created. This simplifies configuration instance while the image is being created. This simplifies configuration quite a
quite a bit. bit.
The builder does _not_ manage AMIs. Once it creates an AMI and stores it The builder does *not* manage AMIs. Once it creates an AMI and stores it in your
in your account, it is up to you to use, delete, etc. the AMI. account, it is up to you to use, delete, etc. the AMI.
## Configuration Reference ## Configuration Reference
...@@ -32,166 +35,169 @@ segmented below into two categories: required and optional parameters. Within ...@@ -32,166 +35,169 @@ segmented below into two categories: required and optional parameters. Within
each category, the available configuration keys are alphabetized. each category, the available configuration keys are alphabetized.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) - `access_key` (string) - The access key used to communicate with AWS. [Learn
how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
* `ami_name` (string) - The name of the resulting AMI that will appear - `ami_name` (string) - The name of the resulting AMI that will appear when
when managing AMIs in the AWS console or via APIs. This must be unique. managing AMIs in the AWS console or via APIs. This must be unique. To help
To help make this unique, use a function like `timestamp` (see make this unique, use a function like `timestamp` (see [configuration
[configuration templates](/docs/templates/configuration-templates.html) for more info) templates](/docs/templates/configuration-templates.html) for more info)
* `instance_type` (string) - The EC2 instance type to use while building - `instance_type` (string) - The EC2 instance type to use while building the
the AMI, such as "m1.small". AMI, such as "m1.small".
* `region` (string) - The name of the region, such as "us-east-1", in which - `region` (string) - The name of the region, such as "us-east-1", in which to
to launch the EC2 instance to create the AMI. launch the EC2 instance to create the AMI.
* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) - `secret_key` (string) - The secret key used to communicate with AWS. [Learn
how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
* `source_ami` (string) - The initial AMI used as a base for the newly - `source_ami` (string) - The initial AMI used as a base for the newly
created machine. created machine.
* `ssh_username` (string) - The username to use in order to communicate - `ssh_username` (string) - The username to use in order to communicate over
over SSH to the running machine. SSH to the running machine.
### Optional: ### Optional:
* `ami_block_device_mappings` (array of block device mappings) - Add the block - `ami_block_device_mappings` (array of block device mappings) - Add the block
device mappings to the AMI. The block device mappings allow for keys: device mappings to the AMI. The block device mappings allow for keys:
- `device_name` (string) - The device name exposed to the instance (for - `device_name` (string) - The device name exposed to the instance (for
example, "/dev/sdh" or "xvdh") example, "/dev/sdh" or "xvdh")
- `virtual_name` (string) - The virtual device name. See the documentation on - `virtual_name` (string) - The virtual device name. See the documentation on
[Block Device Mapping][1] for more information [Block Device
- `snapshot_id` (string) - The ID of the snapshot Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html)
- `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) for more information
volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic - `snapshot_id` (string) - The ID of the snapshot
volumes - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD)
- `volume_size` (integer) - The size of the volume, in GiB. Required if not volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic
specifying a `snapshot_id` volumes
- `delete_on_termination` (boolean) - Indicates whether the EBS volume is - `volume_size` (integer) - The size of the volume, in GiB. Required if not
deleted on instance termination specifying a `snapshot_id`
- `encrypted` (boolean) - Indicates whether to encrypt the volume or not - `delete_on_termination` (boolean) - Indicates whether the EBS volume is
- `no_device` (boolean) - Suppresses the specified device included in the deleted on instance termination
block device mapping of the AMI - `encrypted` (boolean) - Indicates whether to encrypt the volume or not
- `iops` (integer) - The number of I/O operations per second (IOPS) that the - `no_device` (boolean) - Suppresses the specified device included in the
volume supports. See the documentation on [IOPs][2] for more information block device mapping of the AMI
- `iops` (integer) - The number of I/O operations per second (IOPS) that the
volume supports. See the documentation on
* `ami_description` (string) - The description to set for the resulting [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html)
AMI(s). By default this description is empty. for more information
- `ami_description` (string) - The description to set for the
* `ami_groups` (array of strings) - A list of groups that have access resulting AMI(s). By default this description is empty.
to launch the resulting AMI(s). By default no groups have permission
to launch the AMI. `all` will make the AMI publicly accessible. - `ami_groups` (array of strings) - A list of groups that have access to
AWS currently doesn't accept any value other than "all". launch the resulting AMI(s). By default no groups have permission to launch
the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't
* `ami_product_codes` (array of strings) - A list of product codes to accept any value other than "all".
associate with the AMI. By default no product codes are associated with
the AMI. - `ami_product_codes` (array of strings) - A list of product codes to
associate with the AMI. By default no product codes are associated with
* `ami_regions` (array of strings) - A list of regions to copy the AMI to. the AMI.
Tags and attributes are copied along with the AMI. AMI copying takes time
depending on the size of the AMI, but will generally take many minutes. - `ami_regions` (array of strings) - A list of regions to copy the AMI to.
Tags and attributes are copied along with the AMI. AMI copying takes time
* `ami_users` (array of strings) - A list of account IDs that have access depending on the size of the AMI, but will generally take many minutes.
to launch the resulting AMI(s). By default no additional users other than the user
creating the AMI has permissions to launch it. - `ami_users` (array of strings) - A list of account IDs that have access to
launch the resulting AMI(s). By default no additional users other than the
* `associate_public_ip_address` (boolean) - If using a non-default VPC, public user creating the AMI has permissions to launch it.
IP addresses are not provided by default. If this is toggled, your new
instance will get a Public IP. - `associate_public_ip_address` (boolean) - If using a non-default VPC, public
IP addresses are not provided by default. If this is toggled, your new
* `availability_zone` (string) - Destination availability zone to launch instance in. instance will get a Public IP.
Leave this empty to allow Amazon to auto-assign.
- `availability_zone` (string) - Destination availability zone to launch
* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on instance in. Leave this empty to allow Amazon to auto-assign.
HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy.
- `enhanced_networking` (boolean) - Enable enhanced
* `force_deregister` (boolean) - Force Packer to first deregister an existing networking (SriovNetSupport) on HVM-compatible AMIs. If true, add
AMI if one with the same name already exists. Default `false`. `ec2:ModifyInstanceAttribute` to your AWS IAM policy.
* `iam_instance_profile` (string) - The name of an - `force_deregister` (boolean) - Force Packer to first deregister an existing
[IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) AMI if one with the same name already exists. Default `false`.
to launch the EC2 instance with.
- `iam_instance_profile` (string) - The name of an [IAM instance
* `launch_block_device_mappings` (array of block device mappings) - Add the profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
block device mappings to the launch instance. The block device mappings are to launch the EC2 instance with.
the same as `ami_block_device_mappings` above.
- `launch_block_device_mappings` (array of block device mappings) - Add the
* `run_tags` (object of key/value strings) - Tags to apply to the instance block device mappings to the launch instance. The block device mappings are
that is _launched_ to create the AMI. These tags are _not_ applied to the same as `ami_block_device_mappings` above.
the resulting AMI unless they're duplicated in `tags`.
- `run_tags` (object of key/value strings) - Tags to apply to the instance
* `security_group_id` (string) - The ID (_not_ the name) of the security that is *launched* to create the AMI. These tags are *not* applied to the
group to assign to the instance. By default this is not set and Packer resulting AMI unless they're duplicated in `tags`.
will automatically create a new temporary security group to allow SSH
access. Note that if this is specified, you must be sure the security - `security_group_id` (string) - The ID (*not* the name) of the security group
group allows access to the `ssh_port` given below. to assign to the instance. By default this is not set and Packer will
automatically create a new temporary security group to allow SSH access.
* `security_group_ids` (array of strings) - A list of security groups as Note that if this is specified, you must be sure the security group allows
described above. Note that if this is specified, you must omit the access to the `ssh_port` given below.
`security_group_id`.
- `security_group_ids` (array of strings) - A list of security groups as
* `spot_price` (string) - The maximum hourly price to pay for a spot instance described above. Note that if this is specified, you must omit the
to create the AMI. Spot instances are a type of instance that EC2 starts when `security_group_id`.
the current spot price is less than the maximum price you specify. Spot price
will be updated based on available spot instance capacity and current spot - `spot_price` (string) - The maximum hourly price to pay for a spot instance
instance requests. It may save you some costs. You can set this to "auto" for to create the AMI. Spot instances are a type of instance that EC2 starts
Packer to automatically discover the best spot price. when the current spot price is less than the maximum price you specify. Spot
price will be updated based on available spot instance capacity and current
* `spot_price_auto_product` (string) - Required if `spot_price` is set to spot instance requests. It may save you some costs. You can set this to
"auto". This tells Packer what sort of AMI you're launching to find the best "auto" for Packer to automatically discover the best spot price.
spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`,
`Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)` - `spot_price_auto_product` (string) - Required if `spot_price` is set
to "auto". This tells Packer what sort of AMI you're launching to find the
* `ssh_keypair_name` (string) - If specified, this is the key that will be best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`,
used for SSH with the machine. By default, this is blank, and Packer will `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)`
generate a temporary keypair. `ssh_private_key_file` must be specified
with this. - `ssh_keypair_name` (string) - If specified, this is the key that will be
used for SSH with the machine. By default, this is blank, and Packer will
* `ssh_private_ip` (boolean) - If true, then SSH will always use the private generate a temporary keypair. `ssh_private_key_file` must be specified
IP if available. with this.
* `subnet_id` (string) - If using VPC, the ID of the subnet, such as - `ssh_private_ip` (boolean) - If true, then SSH will always use the private
"subnet-12345def", where Packer will launch the EC2 instance. This field is IP if available.
required if you are using an non-default VPC.
- `subnet_id` (string) - If using VPC, the ID of the subnet, such as
* `tags` (object of key/value strings) - Tags applied to the AMI and "subnet-12345def", where Packer will launch the EC2 instance. This field is
relevant snapshots. required if you are using an non-default VPC.
* `temporary_key_pair_name` (string) - The name of the temporary keypair - `tags` (object of key/value strings) - Tags applied to the AMI and
to generate. By default, Packer generates a name with a UUID. relevant snapshots.
* `token` (string) - The access token to use. This is different from - `temporary_key_pair_name` (string) - The name of the temporary keypair
the access key and secret key. If you're not sure what this is, then you to generate. By default, Packer generates a name with a UUID.
probably don't need it. This will also be read from the `AWS_SECURITY_TOKEN`
environmental variable. - `token` (string) - The access token to use. This is different from the
access key and secret key. If you're not sure what this is, then you
* `user_data` (string) - User data to apply when launching the instance. probably don't need it. This will also be read from the `AWS_SECURITY_TOKEN`
Note that you need to be careful about escaping characters due to the environmental variable.
templates being JSON. It is often more convenient to use `user_data_file`,
instead. - `user_data` (string) - User data to apply when launching the instance. Note
that you need to be careful about escaping characters due to the templates
* `user_data_file` (string) - Path to a file that will be used for the being JSON. It is often more convenient to use `user_data_file`, instead.
user data when launching the instance.
- `user_data_file` (string) - Path to a file that will be used for the user
data when launching the instance.
* `vpc_id` (string) - If launching into a VPC subnet, Packer needs the - `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID
VPC ID in order to create a temporary security group within the VPC. in order to create a temporary security group within the VPC.
* `windows_password_timeout` (string) - The timeout for waiting for - `windows_password_timeout` (string) - The timeout for waiting for a Windows
a Windows password for Windows instances. Defaults to 20 minutes. password for Windows instances. Defaults to 20 minutes. Example value: "10m"
Example value: "10m"
## Basic Example ## Basic Example
Here is a basic example. It is completely valid except for the access keys: Here is a basic example. It is completely valid except for the access keys:
```javascript ``` {.javascript}
{ {
"type": "amazon-ebs", "type": "amazon-ebs",
"access_key": "YOUR KEY HERE", "access_key": "YOUR KEY HERE",
...@@ -204,25 +210,23 @@ Here is a basic example. It is completely valid except for the access keys: ...@@ -204,25 +210,23 @@ Here is a basic example. It is completely valid except for the access keys:
} }
``` ```
-> **Note:** Packer can also read the access key and secret -&gt; **Note:** Packer can also read the access key and secret access key from
access key from environmental variables. See the configuration reference in environmental variables. See the configuration reference in the section above
the section above for more information on what environmental variables Packer for more information on what environmental variables Packer will look for.
will look for.
## Accessing the Instance to Debug ## Accessing the Instance to Debug
If you need to access the instance to debug for some reason, run the builder If you need to access the instance to debug for some reason, run the builder
with the `-debug` flag. In debug mode, the Amazon builder will save the with the `-debug` flag. In debug mode, the Amazon builder will save the private
private key in the current directory and will output the DNS or IP information key in the current directory and will output the DNS or IP information as well.
as well. You can use this information to access the instance as it is You can use this information to access the instance as it is running.
running.
## AMI Block Device Mappings Example ## AMI Block Device Mappings Example
Here is an example using the optional AMI block device mappings. This will add Here is an example using the optional AMI block device mappings. This will add
the /dev/sdb and /dev/sdc block device mappings to the finished AMI. the /dev/sdb and /dev/sdc block device mappings to the finished AMI.
```javascript ``` {.javascript}
{ {
"type": "amazon-ebs", "type": "amazon-ebs",
"access_key": "YOUR KEY HERE", "access_key": "YOUR KEY HERE",
...@@ -248,9 +252,9 @@ the /dev/sdb and /dev/sdc block device mappings to the finished AMI. ...@@ -248,9 +252,9 @@ the /dev/sdb and /dev/sdc block device mappings to the finished AMI.
## Tag Example ## Tag Example
Here is an example using the optional AMI tags. This will add the tags Here is an example using the optional AMI tags. This will add the tags
"OS_Version" and "Release" to the finished AMI. "OS\_Version" and "Release" to the finished AMI.
```javascript ``` {.javascript}
{ {
"type": "amazon-ebs", "type": "amazon-ebs",
"access_key": "YOUR KEY HERE", "access_key": "YOUR KEY HERE",
...@@ -267,13 +271,10 @@ Here is an example using the optional AMI tags. This will add the tags ...@@ -267,13 +271,10 @@ Here is an example using the optional AMI tags. This will add the tags
} }
``` ```
-> **Note:** Packer uses pre-built AMIs as the source for building images. -&gt; **Note:** Packer uses pre-built AMIs as the source for building images.
These source AMIs may include volumes that are not flagged to be destroyed on These source AMIs may include volumes that are not flagged to be destroyed on
termiation of the instance building the new image. Packer will attempt to clean termiation of the instance building the new image. Packer will attempt to clean
up all residual volumes that are not designated by the user to remain after up all residual volumes that are not designated by the user to remain after
termination. If you need to preserve those source volumes, you can overwrite the termination. If you need to preserve those source volumes, you can overwrite the
termination setting by specifying `delete_on_termination=false` in the termination setting by specifying `delete_on_termination=false` in the
`launch_device_mappings` block for the device. `launch_device_mappings` block for the device.
[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html
[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html
--- ---
layout: "docs" description: |
page_title: "Amazon AMI Builder (instance-store)" The `amazon-instance` Packer builder is able to create Amazon AMIs backed by
description: |- instance storage as the root device. For more information on the difference
The `amazon-instance` Packer builder is able to create Amazon AMIs backed by instance storage as the root device. For more information on the difference between instance storage and EBS-backed instances, see the storage for the root device section in the EC2 documentation. between instance storage and EBS-backed instances, see the storage for the root
--- device section in the EC2 documentation.
layout: docs
page_title: 'Amazon AMI Builder (instance-store)'
...
# AMI Builder (instance-store) # AMI Builder (instance-store)
...@@ -11,24 +14,24 @@ Type: `amazon-instance` ...@@ -11,24 +14,24 @@ Type: `amazon-instance`
The `amazon-instance` Packer builder is able to create Amazon AMIs backed by The `amazon-instance` Packer builder is able to create Amazon AMIs backed by
instance storage as the root device. For more information on the difference instance storage as the root device. For more information on the difference
between instance storage and EBS-backed instances, see the between instance storage and EBS-backed instances, see the ["storage for the
["storage for the root device" section in the EC2 documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device). root device" section in the EC2
documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ComponentsAMIs.html#storage-for-the-root-device).
This builder builds an AMI by launching an EC2 instance from an existing This builder builds an AMI by launching an EC2 instance from an existing
instance-storage backed AMI, provisioning that running machine, and then instance-storage backed AMI, provisioning that running machine, and then
bundling and creating a new AMI from that machine. bundling and creating a new AMI from that machine. This is all done in your own
This is all done in your own AWS account. The builder will create temporary AWS account. The builder will create temporary keypairs, security group rules,
keypairs, security group rules, etc. that provide it temporary access to etc. that provide it temporary access to the instance while the image is being
the instance while the image is being created. This simplifies configuration created. This simplifies configuration quite a bit.
quite a bit.
The builder does _not_ manage AMIs. Once it creates an AMI and stores it The builder does *not* manage AMIs. Once it creates an AMI and stores it in your
in your account, it is up to you to use, delete, etc. the AMI. account, it is up to you to use, delete, etc. the AMI.
-> **Note** This builder requires that the -&gt; **Note** This builder requires that the [Amazon EC2 AMI
[Amazon EC2 AMI Tools](http://aws.amazon.com/developertools/368) Tools](http://aws.amazon.com/developertools/368) are installed onto the machine.
are installed onto the machine. This can be done within a provisioner, but This can be done within a provisioner, but must be done before the builder
must be done before the builder finishes running. finishes running.
## Configuration Reference ## Configuration Reference
...@@ -37,200 +40,204 @@ segmented below into two categories: required and optional parameters. Within ...@@ -37,200 +40,204 @@ segmented below into two categories: required and optional parameters. Within
each category, the available configuration keys are alphabetized. each category, the available configuration keys are alphabetized.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `access_key` (string) - The access key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) - `access_key` (string) - The access key used to communicate with AWS. [Learn
how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
* `account_id` (string) - Your AWS account ID. This is required for bundling - `account_id` (string) - Your AWS account ID. This is required for bundling
the AMI. This is _not the same_ as the access key. You can find your the AMI. This is *not the same* as the access key. You can find your account
account ID in the security credentials page of your AWS account. ID in the security credentials page of your AWS account.
* `ami_name` (string) - The name of the resulting AMI that will appear - `ami_name` (string) - The name of the resulting AMI that will appear when
when managing AMIs in the AWS console or via APIs. This must be unique. managing AMIs in the AWS console or via APIs. This must be unique. To help
To help make this unique, use a function like `timestamp` (see make this unique, use a function like `timestamp` (see [configuration
[configuration templates](/docs/templates/configuration-templates.html) for more info) templates](/docs/templates/configuration-templates.html) for more info)
* `instance_type` (string) - The EC2 instance type to use while building - `instance_type` (string) - The EC2 instance type to use while building the
the AMI, such as "m1.small". AMI, such as "m1.small".
* `region` (string) - The name of the region, such as "us-east-1", in which - `region` (string) - The name of the region, such as "us-east-1", in which to
to launch the EC2 instance to create the AMI. launch the EC2 instance to create the AMI.
* `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. - `s3_bucket` (string) - The name of the S3 bucket to upload the AMI. This
This bucket will be created if it doesn't exist. bucket will be created if it doesn't exist.
* `secret_key` (string) - The secret key used to communicate with AWS. [Learn how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials) - `secret_key` (string) - The secret key used to communicate with AWS. [Learn
how to set this.](/docs/builders/amazon.html#specifying-amazon-credentials)
* `source_ami` (string) - The initial AMI used as a base for the newly - `source_ami` (string) - The initial AMI used as a base for the newly
created machine. created machine.
* `ssh_username` (string) - The username to use in order to communicate - `ssh_username` (string) - The username to use in order to communicate over
over SSH to the running machine. SSH to the running machine.
* `x509_cert_path` (string) - The local path to a valid X509 certificate for - `x509_cert_path` (string) - The local path to a valid X509 certificate for
your AWS account. This is used for bundling the AMI. This X509 certificate your AWS account. This is used for bundling the AMI. This X509 certificate
must be registered with your account from the security credentials page must be registered with your account from the security credentials page in
in the AWS console. the AWS console.
* `x509_key_path` (string) - The local path to the private key for the X509 - `x509_key_path` (string) - The local path to the private key for the X509
certificate specified by `x509_cert_path`. This is used for bundling the AMI. certificate specified by `x509_cert_path`. This is used for bundling
the AMI.
### Optional: ### Optional:
* `ami_block_device_mappings` (array of block device mappings) - Add the block - `ami_block_device_mappings` (array of block device mappings) - Add the block
device mappings to the AMI. The block device mappings allow for keys: device mappings to the AMI. The block device mappings allow for keys:
- `device_name` (string) - The device name exposed to the instance (for - `device_name` (string) - The device name exposed to the instance (for
example, "/dev/sdh" or "xvdh") example, "/dev/sdh" or "xvdh")
- `virtual_name` (string) - The virtual device name. See the documentation on - `virtual_name` (string) - The virtual device name. See the documentation on
[Block Device Mapping][1] for more information [Block Device
- `snapshot_id` (string) - The ID of the snapshot Mapping](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html)
- `volume_type` (string) - The volume type. gp2 for General Purpose (SSD) for more information
volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic - `snapshot_id` (string) - The ID of the snapshot
volumes - `volume_type` (string) - The volume type. gp2 for General Purpose (SSD)
- `volume_size` (integer) - The size of the volume, in GiB. Required if not volumes, io1 for Provisioned IOPS (SSD) volumes, and standard for Magnetic
specifying a `snapshot_id` volumes
- `delete_on_termination` (boolean) - Indicates whether the EBS volume is - `volume_size` (integer) - The size of the volume, in GiB. Required if not
deleted on instance termination specifying a `snapshot_id`
- `encrypted` (boolean) - Indicates whether to encrypt the volume or not - `delete_on_termination` (boolean) - Indicates whether the EBS volume is
- `no_device` (boolean) - Suppresses the specified device included in the deleted on instance termination
block device mapping of the AMI - `encrypted` (boolean) - Indicates whether to encrypt the volume or not
- `iops` (integer) - The number of I/O operations per second (IOPS) that the - `no_device` (boolean) - Suppresses the specified device included in the
volume supports. See the documentation on [IOPs][2] for more information block device mapping of the AMI
- `iops` (integer) - The number of I/O operations per second (IOPS) that the
* `ami_description` (string) - The description to set for the resulting volume supports. See the documentation on
AMI(s). By default this description is empty. [IOPs](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html)
for more information
* `ami_groups` (array of strings) - A list of groups that have access - `ami_description` (string) - The description to set for the
to launch the resulting AMI(s). By default no groups have permission resulting AMI(s). By default this description is empty.
to launch the AMI. `all` will make the AMI publicly accessible.
AWS currently doesn't accept any value other than "all". - `ami_groups` (array of strings) - A list of groups that have access to
launch the resulting AMI(s). By default no groups have permission to launch
* `ami_product_codes` (array of strings) - A list of product codes to the AMI. `all` will make the AMI publicly accessible. AWS currently doesn't
associate with the AMI. By default no product codes are associated with accept any value other than "all".
the AMI.
- `ami_product_codes` (array of strings) - A list of product codes to
* `ami_regions` (array of strings) - A list of regions to copy the AMI to. associate with the AMI. By default no product codes are associated with
Tags and attributes are copied along with the AMI. AMI copying takes time the AMI.
depending on the size of the AMI, but will generally take many minutes.
- `ami_regions` (array of strings) - A list of regions to copy the AMI to.
* `ami_users` (array of strings) - A list of account IDs that have access Tags and attributes are copied along with the AMI. AMI copying takes time
to launch the resulting AMI(s). By default no additional users other than the user depending on the size of the AMI, but will generally take many minutes.
creating the AMI has permissions to launch it.
- `ami_users` (array of strings) - A list of account IDs that have access to
* `ami_virtualization_type` (string) - The type of virtualization for the AMI launch the resulting AMI(s). By default no additional users other than the
you are building. This option is required to register HVM images. Can be user creating the AMI has permissions to launch it.
"paravirtual" (default) or "hvm".
- `ami_virtualization_type` (string) - The type of virtualization for the AMI
* `associate_public_ip_address` (boolean) - If using a non-default VPC, public you are building. This option is required to register HVM images. Can be
IP addresses are not provided by default. If this is toggled, your new "paravirtual" (default) or "hvm".
instance will get a Public IP.
- `associate_public_ip_address` (boolean) - If using a non-default VPC, public
* `availability_zone` (string) - Destination availability zone to launch instance in. IP addresses are not provided by default. If this is toggled, your new
Leave this empty to allow Amazon to auto-assign. instance will get a Public IP.
* `bundle_destination` (string) - The directory on the running instance - `availability_zone` (string) - Destination availability zone to launch
where the bundled AMI will be saved prior to uploading. By default this is instance in. Leave this empty to allow Amazon to auto-assign.
"/tmp". This directory must exist and be writable.
- `bundle_destination` (string) - The directory on the running instance where
* `bundle_prefix` (string) - The prefix for files created from bundling the bundled AMI will be saved prior to uploading. By default this is "/tmp".
the root volume. By default this is "image-{{timestamp}}". The `timestamp` This directory must exist and be writable.
variable should be used to make sure this is unique, otherwise it can
collide with other created AMIs by Packer in your account. - `bundle_prefix` (string) - The prefix for files created from bundling the
root volume. By default this is "image-{{timestamp}}". The `timestamp`
* `bundle_upload_command` (string) - The command to use to upload the variable should be used to make sure this is unique, otherwise it can
bundled volume. See the "custom bundle commands" section below for more collide with other created AMIs by Packer in your account.
information.
- `bundle_upload_command` (string) - The command to use to upload the
* `bundle_vol_command` (string) - The command to use to bundle the volume. bundled volume. See the "custom bundle commands" section below for
See the "custom bundle commands" section below for more information. more information.
* `enhanced_networking` (boolean) - Enable enhanced networking (SriovNetSupport) on - `bundle_vol_command` (string) - The command to use to bundle the volume. See
HVM-compatible AMIs. If true, add `ec2:ModifyInstanceAttribute` to your AWS IAM policy. the "custom bundle commands" section below for more information.
* `force_deregister` (boolean) - Force Packer to first deregister an existing - `enhanced_networking` (boolean) - Enable enhanced
AMI if one with the same name already exists. Default `false`. networking (SriovNetSupport) on HVM-compatible AMIs. If true, add
`ec2:ModifyInstanceAttribute` to your AWS IAM policy.
* `iam_instance_profile` (string) - The name of an
[IAM instance profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html) - `force_deregister` (boolean) - Force Packer to first deregister an existing
to launch the EC2 instance with. AMI if one with the same name already exists. Default `false`.
* `launch_block_device_mappings` (array of block device mappings) - Add the - `iam_instance_profile` (string) - The name of an [IAM instance
block device mappings to the launch instance. The block device mappings are profile](http://docs.aws.amazon.com/IAM/latest/UserGuide/instance-profiles.html)
the same as `ami_block_device_mappings` above. to launch the EC2 instance with.
* `run_tags` (object of key/value strings) - Tags to apply to the instance - `launch_block_device_mappings` (array of block device mappings) - Add the
that is _launched_ to create the AMI. These tags are _not_ applied to block device mappings to the launch instance. The block device mappings are
the resulting AMI unless they're duplicated in `tags`. the same as `ami_block_device_mappings` above.
* `security_group_id` (string) - The ID (_not_ the name) of the security - `run_tags` (object of key/value strings) - Tags to apply to the instance
group to assign to the instance. By default this is not set and Packer that is *launched* to create the AMI. These tags are *not* applied to the
will automatically create a new temporary security group to allow SSH resulting AMI unless they're duplicated in `tags`.
access. Note that if this is specified, you must be sure the security
group allows access to the `ssh_port` given below. - `security_group_id` (string) - The ID (*not* the name) of the security group
to assign to the instance. By default this is not set and Packer will
* `security_group_ids` (array of strings) - A list of security groups as automatically create a new temporary security group to allow SSH access.
described above. Note that if this is specified, you must omit the Note that if this is specified, you must be sure the security group allows
`security_group_id`. access to the `ssh_port` given below.
* `spot_price` (string) - The maximum hourly price to launch a spot instance - `security_group_ids` (array of strings) - A list of security groups as
to create the AMI. It is a type of instances that EC2 starts when the maximum described above. Note that if this is specified, you must omit the
price that you specify exceeds the current spot price. Spot price will be `security_group_id`.
updated based on available spot instance capacity and current spot Instance
requests. It may save you some costs. You can set this to "auto" for - `spot_price` (string) - The maximum hourly price to launch a spot instance
Packer to automatically discover the best spot price. to create the AMI. It is a type of instances that EC2 starts when the
maximum price that you specify exceeds the current spot price. Spot price
* `spot_price_auto_product` (string) - Required if `spot_price` is set to will be updated based on available spot instance capacity and current spot
"auto". This tells Packer what sort of AMI you're launching to find the best Instance requests. It may save you some costs. You can set this to "auto"
spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`, for Packer to automatically discover the best spot price.
`Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)`
- `spot_price_auto_product` (string) - Required if `spot_price` is set
* `ssh_keypair_name` (string) - If specified, this is the key that will be to "auto". This tells Packer what sort of AMI you're launching to find the
used for SSH with the machine. By default, this is blank, and Packer will best spot price. This must be one of: `Linux/UNIX`, `SUSE Linux`, `Windows`,
generate a temporary keypair. `ssh_private_key_file` must be specified `Linux/UNIX (Amazon VPC)`, `SUSE Linux (Amazon VPC)`, `Windows (Amazon VPC)`
with this.
- `ssh_keypair_name` (string) - If specified, this is the key that will be
* `ssh_private_ip` (boolean) - If true, then SSH will always use the private used for SSH with the machine. By default, this is blank, and Packer will
IP if available. generate a temporary keypair. `ssh_private_key_file` must be specified
with this.
* `subnet_id` (string) - If using VPC, the ID of the subnet, such as
"subnet-12345def", where Packer will launch the EC2 instance. This field is - `ssh_private_ip` (boolean) - If true, then SSH will always use the private
required if you are using an non-default VPC. IP if available.
- `subnet_id` (string) - If using VPC, the ID of the subnet, such as
"subnet-12345def", where Packer will launch the EC2 instance. This field is
required if you are using an non-default VPC.
* `tags` (object of key/value strings) - Tags applied to the AMI. - `tags` (object of key/value strings) - Tags applied to the AMI.
* `temporary_key_pair_name` (string) - The name of the temporary keypair - `temporary_key_pair_name` (string) - The name of the temporary keypair
to generate. By default, Packer generates a name with a UUID. to generate. By default, Packer generates a name with a UUID.
* `user_data` (string) - User data to apply when launching the instance. - `user_data` (string) - User data to apply when launching the instance. Note
Note that you need to be careful about escaping characters due to the that you need to be careful about escaping characters due to the templates
templates being JSON. It is often more convenient to use `user_data_file`, being JSON. It is often more convenient to use `user_data_file`, instead.
instead.
* `user_data_file` (string) - Path to a file that will be used for the - `user_data_file` (string) - Path to a file that will be used for the user
user data when launching the instance. data when launching the instance.
* `vpc_id` (string) - If launching into a VPC subnet, Packer needs the - `vpc_id` (string) - If launching into a VPC subnet, Packer needs the VPC ID
VPC ID in order to create a temporary security group within the VPC. in order to create a temporary security group within the VPC.
* `x509_upload_path` (string) - The path on the remote machine where the - `x509_upload_path` (string) - The path on the remote machine where the X509
X509 certificate will be uploaded. This path must already exist and be certificate will be uploaded. This path must already exist and be writable.
writable. X509 certificates are uploaded after provisioning is run, so X509 certificates are uploaded after provisioning is run, so it is perfectly
it is perfectly okay to create this directory as part of the provisioning okay to create this directory as part of the provisioning process.
process.
* `windows_password_timeout` (string) - The timeout for waiting for - `windows_password_timeout` (string) - The timeout for waiting for a Windows
a Windows password for Windows instances. Defaults to 20 minutes. password for Windows instances. Defaults to 20 minutes. Example value: "10m"
Example value: "10m"
## Basic Example ## Basic Example
Here is a basic example. It is completely valid except for the access keys: Here is a basic example. It is completely valid except for the access keys:
```javascript ``` {.javascript}
{ {
"type": "amazon-instance", "type": "amazon-instance",
"access_key": "YOUR KEY HERE", "access_key": "YOUR KEY HERE",
...@@ -250,84 +257,79 @@ Here is a basic example. It is completely valid except for the access keys: ...@@ -250,84 +257,79 @@ Here is a basic example. It is completely valid except for the access keys:
} }
``` ```
-> **Note:** Packer can also read the access key and secret -&gt; **Note:** Packer can also read the access key and secret access key from
access key from environmental variables. See the configuration reference in environmental variables. See the configuration reference in the section above
the section above for more information on what environmental variables Packer for more information on what environmental variables Packer will look for.
will look for.
## Accessing the Instance to Debug ## Accessing the Instance to Debug
If you need to access the instance to debug for some reason, run the builder If you need to access the instance to debug for some reason, run the builder
with the `-debug` flag. In debug mode, the Amazon builder will save the with the `-debug` flag. In debug mode, the Amazon builder will save the private
private key in the current directory and will output the DNS or IP information key in the current directory and will output the DNS or IP information as well.
as well. You can use this information to access the instance as it is You can use this information to access the instance as it is running.
running.
## Custom Bundle Commands ## Custom Bundle Commands
A lot of the process required for creating an instance-store backed AMI A lot of the process required for creating an instance-store backed AMI involves
involves commands being run on the actual source instance. Specifically, the commands being run on the actual source instance. Specifically, the
`ec2-bundle-vol` and `ec2-upload-bundle` commands must be used to bundle `ec2-bundle-vol` and `ec2-upload-bundle` commands must be used to bundle the
the root filesystem and upload it, respectively. root filesystem and upload it, respectively.
Each of these commands have a lot of available flags. Instead of exposing each Each of these commands have a lot of available flags. Instead of exposing each
possible flag as a template configuration option, the instance-store AMI possible flag as a template configuration option, the instance-store AMI builder
builder for Packer lets you customize the entire command used to bundle for Packer lets you customize the entire command used to bundle and upload the
and upload the AMI. AMI.
These are configured with `bundle_vol_command` and `bundle_upload_command`. These are configured with `bundle_vol_command` and `bundle_upload_command`. Both
Both of these configurations are of these configurations are [configuration
[configuration templates](/docs/templates/configuration-templates.html) templates](/docs/templates/configuration-templates.html) and have support for
and have support for their own set of template variables. their own set of template variables.
### Bundle Volume Command ### Bundle Volume Command
The default value for `bundle_vol_command` is shown below. It is split The default value for `bundle_vol_command` is shown below. It is split across
across multiple lines for convenience of reading. The bundle volume command multiple lines for convenience of reading. The bundle volume command is
is responsible for executing `ec2-bundle-vol` in order to store and image responsible for executing `ec2-bundle-vol` in order to store and image of the
of the root filesystem to use to create the AMI. root filesystem to use to create the AMI.
```text ``` {.text}
sudo -i -n ec2-bundle-vol \ sudo -i -n ec2-bundle-vol \
-k {{.KeyPath}} \ -k {{.KeyPath}} \
-u {{.AccountId}} \ -u {{.AccountId}} \
-c {{.CertPath}} \ -c {{.CertPath}} \
-r {{.Architecture}} \ -r {{.Architecture}} \
-e {{.PrivatePath}}/* \ -e {{.PrivatePath}}/* \
-d {{.Destination}} \ -d {{.Destination}} \
-p {{.Prefix}} \ -p {{.Prefix}} \
--batch \ --batch \
--no-filter --no-filter
``` ```
The available template variables should be self-explanatory based on the The available template variables should be self-explanatory based on the
parameters they're used to satisfy the `ec2-bundle-vol` command. parameters they're used to satisfy the `ec2-bundle-vol` command.
~> **Warning!** Some versions of ec2-bundle-vol silently ignore all .pem and \~&gt; **Warning!** Some versions of ec2-bundle-vol silently ignore all .pem and
.gpg files during the bundling of the AMI, which can cause problems on some .gpg files during the bundling of the AMI, which can cause problems on some
systems, such as Ubuntu. You may want to customize the bundle volume command systems, such as Ubuntu. You may want to customize the bundle volume command to
to include those files (see the `--no-filter` option of ec2-bundle-vol). include those files (see the `--no-filter` option of ec2-bundle-vol).
### Bundle Upload Command ### Bundle Upload Command
The default value for `bundle_upload_command` is shown below. It is split The default value for `bundle_upload_command` is shown below. It is split across
across multiple lines for convenience of reading. The bundle upload command multiple lines for convenience of reading. The bundle upload command is
is responsible for taking the bundled volume and uploading it to S3. responsible for taking the bundled volume and uploading it to S3.
```text ``` {.text}
sudo -i -n ec2-upload-bundle \ sudo -i -n ec2-upload-bundle \
-b {{.BucketName}} \ -b {{.BucketName}} \
-m {{.ManifestPath}} \ -m {{.ManifestPath}} \
-a {{.AccessKey}} \ -a {{.AccessKey}} \
-s {{.SecretKey}} \ -s {{.SecretKey}} \
-d {{.BundleDirectory}} \ -d {{.BundleDirectory}} \
--batch \ --batch \
--region {{.Region}} \ --region {{.Region}} \
--retry --retry
``` ```
The available template variables should be self-explanatory based on the The available template variables should be self-explanatory based on the
parameters they're used to satisfy the `ec2-upload-bundle` command. parameters they're used to satisfy the `ec2-upload-bundle` command.
[1]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_BlockDeviceMapping.html
[2]: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html
--- ---
layout: "docs" description: |
page_title: "Amazon AMI Builder" Packer is able to create Amazon AMIs. To achieve this, Packer comes with
description: |- multiple builders depending on the strategy you want to use to build the AMI.
Packer is able to create Amazon AMIs. To achieve this, Packer comes with multiple builders depending on the strategy you want to use to build the AMI. layout: docs
--- page_title: Amazon AMI Builder
...
# Amazon AMI Builder # Amazon AMI Builder
Packer is able to create Amazon AMIs. To achieve this, Packer comes with Packer is able to create Amazon AMIs. To achieve this, Packer comes with
multiple builders depending on the strategy you want to use to build the multiple builders depending on the strategy you want to use to build the AMI.
AMI. Packer supports the following builders at the moment: Packer supports the following builders at the moment:
- [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs by
launching a source AMI and re-packaging it into a new AMI
after provisioning. If in doubt, use this builder, which is the easiest to
get started with.
* [amazon-ebs](/docs/builders/amazon-ebs.html) - Create EBS-backed AMIs - [amazon-instance](/docs/builders/amazon-instance.html) - Create
by launching a source AMI and re-packaging it into a new AMI after instance-store AMIs by launching and provisioning a source instance, then
provisioning. If in doubt, use this builder, which is the easiest to get rebundling it and uploading it to S3.
started with.
* [amazon-instance](/docs/builders/amazon-instance.html) - Create - [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs
instance-store AMIs by launching and provisioning a source instance, then from an existing EC2 instance by mounting the root device and using a
rebundling it and uploading it to S3. [Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision
that device. This is an **advanced builder and should not be used by
newcomers**. However, it is also the fastest way to build an EBS-backed AMI
since no new EC2 instance needs to be launched.
* [amazon-chroot](/docs/builders/amazon-chroot.html) - Create EBS-backed AMIs -&gt; **Don't know which builder to use?** If in doubt, use the [amazon-ebs
from an existing EC2 instance by mounting the root device and using a builder](/docs/builders/amazon-ebs.html). It is much easier to use and Amazon
[Chroot](http://en.wikipedia.org/wiki/Chroot) environment to provision generally recommends EBS-backed images nowadays.
that device. This is an **advanced builder and should not be used by
newcomers**. However, it is also the fastest way to build an EBS-backed
AMI since no new EC2 instance needs to be launched.
-> **Don't know which builder to use?** If in doubt, use the <span id="specifying-amazon-credentials"></span>
[amazon-ebs builder](/docs/builders/amazon-ebs.html). It is
much easier to use and Amazon generally recommends EBS-backed images nowadays.
<div id="specifying-amazon-credentials">## Specifying Amazon Credentials</div> ## Specifying Amazon Credentials
When you use any of the amazon builders, you must provide credentials to the API in the form of an access key id and secret. These look like: When you use any of the amazon builders, you must provide credentials to the API
in the form of an access key id and secret. These look like:
access key id: AKIAIOSFODNN7EXAMPLE access key id: AKIAIOSFODNN7EXAMPLE
secret access key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY secret access key: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
If you use other AWS tools you may already have these configured. If so, packer will try to use them, *unless* they are specified in your packer template. Credentials are resolved in the following order: If you use other AWS tools you may already have these configured. If so, packer
will try to use them, *unless* they are specified in your packer template.
Credentials are resolved in the following order:
1. Values hard-coded in the packer template are always authoritative. 1. Values hard-coded in the packer template are always authoritative.
2. *Variables* in the packer template may be resolved from command-line flags or from environment variables. Please read about [User Variables](https://packer.io/docs/templates/user-variables.html) for details. 2. *Variables* in the packer template may be resolved from command-line flags
3. If no credentials are found, packer falls back to automatic lookup. or from environment variables. Please read about [User
Variables](https://packer.io/docs/templates/user-variables.html)
for details.
3. If no credentials are found, packer falls back to automatic lookup.
### Automatic Lookup ### Automatic Lookup
If no AWS credentials are found in a packer template, we proceed on to the following steps: If no AWS credentials are found in a packer template, we proceed on to the
following steps:
1. Lookup via environment variables.
- First `AWS_ACCESS_KEY_ID`, then `AWS_ACCESS_KEY`
- First `AWS_SECRET_ACCESS_KEY`, then `AWS_SECRET_KEY`
2. Look for [local AWS configuration
files](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files)
- First `~/.aws/credentials`
- Next based on `AWS_PROFILE`
1. Lookup via environment variables. 3. Lookup an IAM role for the current EC2 instance (if you're running in EC2)
- First `AWS_ACCESS_KEY_ID`, then `AWS_ACCESS_KEY`
- First `AWS_SECRET_ACCESS_KEY`, then `AWS_SECRET_KEY`
2. Look for [local AWS configuration files](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files)
- First `~/.aws/credentials`
- Next based on `AWS_PROFILE`
3. Lookup an IAM role for the current EC2 instance (if you're running in EC2)
~> **Subtle details of automatic lookup may change over time.** The most reliable way to specify your configuration is by setting them in template variables (directly or indirectly), or by using the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables. \~&gt; **Subtle details of automatic lookup may change over time.** The most
reliable way to specify your configuration is by setting them in template
variables (directly or indirectly), or by using the `AWS_ACCESS_KEY_ID` and
`AWS_SECRET_ACCESS_KEY` environment variables.
Environment variables provide the best portability, allowing you to run your packer build on your workstation, in Atlas, or on another build server. Environment variables provide the best portability, allowing you to run your
packer build on your workstation, in Atlas, or on another build server.
## Using an IAM Instance Profile ## Using an IAM Instance Profile
If AWS keys are not specified in the template, Packer will consult the [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files) file, try the standard AWS environment variables, and then If AWS keys are not specified in the template, a
any IAM role credentials defined by the instance's metadata. [credentials](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-config-files)
file or through environment variables Packer will use credentials provided by
the instance's IAM profile, if it has one.
The following policy document provides the minimal set permissions necessary for Packer to work: The following policy document provides the minimal set permissions necessary for
Packer to work:
```javascript ``` {.javascript}
{ {
"Statement": [{ "Statement": [{
"Effect": "Allow", "Effect": "Allow",
...@@ -104,13 +124,18 @@ The following policy document provides the minimal set permissions necessary for ...@@ -104,13 +124,18 @@ The following policy document provides the minimal set permissions necessary for
### Attaching IAM Policies to Roles ### Attaching IAM Policies to Roles
IAM policies can be associated with user or roles. If you use packer with IAM roles, you may encounter an error like this one: IAM policies can be associated with user or roles. If you use packer with IAM
roles, you may encounter an error like this one:
==> amazon-ebs: Error launching source instance: You are not authorized to perform this operation. ==> amazon-ebs: Error launching source instance: You are not authorized to perform this operation.
You can read more about why this happens on the [Amazon Security Blog](http://blogs.aws.amazon.com/security/post/Tx3M0IFB5XBOCQX/Granting-Permission-to-Launch-EC2-Instances-with-IAM-Roles-PassRole-Permission). The example policy below may help packer work with IAM roles. Note that this example provides more than the minimal set of permissions needed for packer to work, but specifics will depend on your use-case. You can read more about why this happens on the [Amazon Security
Blog](http://blogs.aws.amazon.com/security/post/Tx3M0IFB5XBOCQX/Granting-Permission-to-Launch-EC2-Instances-with-IAM-Roles-PassRole-Permission).
The example policy below may help packer work with IAM roles. Note that this
example provides more than the minimal set of permissions needed for packer to
work, but specifics will depend on your use-case.
```json ``` {.json}
{ {
"Sid": "PackerIAMPassRole", "Sid": "PackerIAMPassRole",
"Effect": "Allow", "Effect": "Allow",
......
--- ---
layout: "docs" description: |
page_title: "Custom Builder" Packer is extensible, allowing you to write new builders without having to
description: |- modify the core source code of Packer itself. Documentation for creating new
Packer is extensible, allowing you to write new builders without having to modify the core source code of Packer itself. Documentation for creating new builders is covered in the custom builders page of the Packer plugin section. builders is covered in the custom builders page of the Packer plugin section.
--- layout: docs
page_title: Custom Builder
...
# Custom Builder # Custom Builder
Packer is extensible, allowing you to write new builders without having to Packer is extensible, allowing you to write new builders without having to
modify the core source code of Packer itself. Documentation for creating modify the core source code of Packer itself. Documentation for creating new
new builders is covered in the [custom builders](/docs/extend/builder.html) builders is covered in the [custom builders](/docs/extend/builder.html) page of
page of the Packer plugin section. the Packer plugin section.
--- ---
layout: "docs" description: |
page_title: "DigitalOcean Builder" The `digitalocean` Packer builder is able to create new images for use with
description: |- DigitalOcean. The builder takes a source image, runs any provisioning necessary
The `digitalocean` Packer builder is able to create new images for use with DigitalOcean. The builder takes a source image, runs any provisioning necessary on the image after launching it, then snapshots it into a reusable image. This reusable image can then be used as the foundation of new servers that are launched within DigitalOcean. on the image after launching it, then snapshots it into a reusable image. This
--- reusable image can then be used as the foundation of new servers that are
launched within DigitalOcean.
layout: docs
page_title: DigitalOcean Builder
...
# DigitalOcean Builder # DigitalOcean Builder
Type: `digitalocean` Type: `digitalocean`
The `digitalocean` Packer builder is able to create new images for use with The `digitalocean` Packer builder is able to create new images for use with
[DigitalOcean](http://www.digitalocean.com). The builder takes a source [DigitalOcean](http://www.digitalocean.com). The builder takes a source image,
image, runs any provisioning necessary on the image after launching it, runs any provisioning necessary on the image after launching it, then snapshots
then snapshots it into a reusable image. This reusable image can then be it into a reusable image. This reusable image can then be used as the foundation
used as the foundation of new servers that are launched within DigitalOcean. of new servers that are launched within DigitalOcean.
The builder does _not_ manage images. Once it creates an image, it is up to The builder does *not* manage images. Once it creates an image, it is up to you
you to use it or delete it. to use it or delete it.
## Configuration Reference ## Configuration Reference
...@@ -25,50 +29,55 @@ segmented below into two categories: required and optional parameters. Within ...@@ -25,50 +29,55 @@ segmented below into two categories: required and optional parameters. Within
each category, the available configuration keys are alphabetized. each category, the available configuration keys are alphabetized.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `api_token` (string) - The client TOKEN to use to access your account. - `api_token` (string) - The client TOKEN to use to access your account. It
It can also be specified via environment variable `DIGITALOCEAN_API_TOKEN`, if set. can also be specified via environment variable `DIGITALOCEAN_API_TOKEN`,
if set.
* `image` (string) - The name (or slug) of the base image to use. This is the - `image` (string) - The name (or slug) of the base image to use. This is the
image that will be used to launch a new droplet and provision it. image that will be used to launch a new droplet and provision it. See
See https://developers.digitalocean.com/documentation/v2/#list-all-images for details on how to get a list of the the accepted image names/slugs. https://developers.digitalocean.com/documentation/v2/\#list-all-images for
details on how to get a list of the the accepted image names/slugs.
* `region` (string) - The name (or slug) of the region to launch the droplet in. - `region` (string) - The name (or slug) of the region to launch the
Consequently, this is the region where the snapshot will be available. droplet in. Consequently, this is the region where the snapshot will
See https://developers.digitalocean.com/documentation/v2/#list-all-regions for the accepted region names/slugs. be available. See
https://developers.digitalocean.com/documentation/v2/\#list-all-regions for
the accepted region names/slugs.
* `size` (string) - The name (or slug) of the droplet size to use. - `size` (string) - The name (or slug) of the droplet size to use. See
See https://developers.digitalocean.com/documentation/v2/#list-all-sizes for the accepted size names/slugs. https://developers.digitalocean.com/documentation/v2/\#list-all-sizes for
the accepted size names/slugs.
### Optional: ### Optional:
* `droplet_name` (string) - The name assigned to the droplet. DigitalOcean - `droplet_name` (string) - The name assigned to the droplet. DigitalOcean
sets the hostname of the machine to this value. sets the hostname of the machine to this value.
* `private_networking` (boolean) - Set to `true` to enable private networking - `private_networking` (boolean) - Set to `true` to enable private networking
for the droplet being created. This defaults to `false`, or not enabled. for the droplet being created. This defaults to `false`, or not enabled.
* `snapshot_name` (string) - The name of the resulting snapshot that will - `snapshot_name` (string) - The name of the resulting snapshot that will
appear in your account. This must be unique. appear in your account. This must be unique. To help make this unique, use a
To help make this unique, use a function like `timestamp` (see function like `timestamp` (see [configuration
[configuration templates](/docs/templates/configuration-templates.html) for more info) templates](/docs/templates/configuration-templates.html) for more info)
* `state_timeout` (string) - The time to wait, as a duration string, - `state_timeout` (string) - The time to wait, as a duration string, for a
for a droplet to enter a desired state (such as "active") before droplet to enter a desired state (such as "active") before timing out. The
timing out. The default state timeout is "6m". default state timeout is "6m".
* `user_data` (string) - User data to launch with the Droplet. - `user_data` (string) - User data to launch with the Droplet.
## Basic Example ## Basic Example
Here is a basic example. It is completely valid as soon as you enter your Here is a basic example. It is completely valid as soon as you enter your own
own access tokens: access tokens:
```javascript ``` {.javascript}
{ {
"type": "digitalocean", "type": "digitalocean",
"api_token": "YOUR API KEY", "api_token": "YOUR API KEY",
......
--- ---
layout: "docs" description: |
page_title: "Docker Builder" The `docker` Packer builder builds Docker images using Docker. The builder
description: |- starts a Docker container, runs provisioners within this container, then exports
The `docker` Packer builder builds Docker images using Docker. The builder starts a Docker container, runs provisioners within this container, then exports the container for reuse or commits the image. the container for reuse or commits the image.
--- layout: docs
page_title: Docker Builder
...
# Docker Builder # Docker Builder
Type: `docker` Type: `docker`
The `docker` Packer builder builds [Docker](http://www.docker.io) images using The `docker` Packer builder builds [Docker](http://www.docker.io) images using
Docker. The builder starts a Docker container, runs provisioners within Docker. The builder starts a Docker container, runs provisioners within this
this container, then exports the container for reuse or commits the image. container, then exports the container for reuse or commits the image.
Packer builds Docker containers _without_ the use of Packer builds Docker containers *without* the use of
[Dockerfiles](https://docs.docker.com/reference/builder/). [Dockerfiles](https://docs.docker.com/reference/builder/). By not using
By not using Dockerfiles, Packer is able to provision Dockerfiles, Packer is able to provision containers with portable scripts or
containers with portable scripts or configuration management systems configuration management systems that are not tied to Docker in any way. It also
that are not tied to Docker in any way. It also has a simpler mental model: has a simpler mental model: you provision containers much the same way you
you provision containers much the same way you provision a normal virtualized provision a normal virtualized or dedicated server. For more information, read
or dedicated server. For more information, read the section on the section on [Dockerfiles](#toc_8).
[Dockerfiles](#toc_8).
The Docker builder must run on a machine that has Docker installed. Therefore The Docker builder must run on a machine that has Docker installed. Therefore
the builder only works on machines that support Docker (modern Linux machines). the builder only works on machines that support Docker (modern Linux machines).
If you want to use Packer to build Docker containers on another platform, If you want to use Packer to build Docker containers on another platform, use
use [Vagrant](http://www.vagrantup.com) to start a Linux environment, then [Vagrant](http://www.vagrantup.com) to start a Linux environment, then run
run Packer within that environment. Packer within that environment.
## Basic Example: Export ## Basic Example: Export
Below is a fully functioning example. It doesn't do anything useful, since Below is a fully functioning example. It doesn't do anything useful, since no
no provisioners are defined, but it will effectively repackage an image. provisioners are defined, but it will effectively repackage an image.
```javascript ``` {.javascript}
{ {
"type": "docker", "type": "docker",
"image": "ubuntu", "image": "ubuntu",
...@@ -43,11 +44,11 @@ no provisioners are defined, but it will effectively repackage an image. ...@@ -43,11 +44,11 @@ no provisioners are defined, but it will effectively repackage an image.
## Basic Example: Commit ## Basic Example: Commit
Below is another example, the same as above but instead of exporting the Below is another example, the same as above but instead of exporting the running
running container, this one commits the container to an image. The image container, this one commits the container to an image. The image can then be
can then be more easily tagged, pushed, etc. more easily tagged, pushed, etc.
```javascript ``` {.javascript}
{ {
"type": "docker", "type": "docker",
"image": "ubuntu", "image": "ubuntu",
...@@ -55,7 +56,6 @@ can then be more easily tagged, pushed, etc. ...@@ -55,7 +56,6 @@ can then be more easily tagged, pushed, etc.
} }
``` ```
## Configuration Reference ## Configuration Reference
Configuration options are organized below into two categories: required and Configuration options are organized below into two categories: required and
...@@ -63,47 +63,47 @@ optional. Within each category, the available options are alphabetized and ...@@ -63,47 +63,47 @@ optional. Within each category, the available options are alphabetized and
described. described.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `commit` (boolean) - If true, the container will be committed to an - `commit` (boolean) - If true, the container will be committed to an image
image rather than exported. This cannot be set if `export_path` is set. rather than exported. This cannot be set if `export_path` is set.
* `export_path` (string) - The path where the final container will be exported - `export_path` (string) - The path where the final container will be exported
as a tar file. This cannot be set if `commit` is set to true. as a tar file. This cannot be set if `commit` is set to true.
* `image` (string) - The base image for the Docker container that will - `image` (string) - The base image for the Docker container that will
be started. This image will be pulled from the Docker registry if it be started. This image will be pulled from the Docker registry if it doesn't
doesn't already exist. already exist.
### Optional: ### Optional:
* `login` (boolean) - Defaults to false. If true, the builder will - `login` (boolean) - Defaults to false. If true, the builder will login in
login in order to pull the image. The builder only logs in for the order to pull the image. The builder only logs in for the duration of
duration of the pull. It always logs out afterwards. the pull. It always logs out afterwards.
* `login_email` (string) - The email to use to authenticate to login. - `login_email` (string) - The email to use to authenticate to login.
* `login_username` (string) - The username to use to authenticate to login. - `login_username` (string) - The username to use to authenticate to login.
* `login_password` (string) - The password to use to authenticate to login. - `login_password` (string) - The password to use to authenticate to login.
* `login_server` (string) - The server address to login to. - `login_server` (string) - The server address to login to.
* `pull` (boolean) - If true, the configured image will be pulled using - `pull` (boolean) - If true, the configured image will be pulled using
`docker pull` prior to use. Otherwise, it is assumed the image already `docker pull` prior to use. Otherwise, it is assumed the image already
exists and can be used. This defaults to true if not set. exists and can be used. This defaults to true if not set.
* `run_command` (array of strings) - An array of arguments to pass to - `run_command` (array of strings) - An array of arguments to pass to
`docker run` in order to run the container. By default this is set to `docker run` in order to run the container. By default this is set to
`["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. `["-d", "-i", "-t", "{{.Image}}", "/bin/bash"]`. As you can see, you have a
As you can see, you have a couple template variables to customize, as well. couple template variables to customize, as well.
* `volumes` (map of strings to strings) - A mapping of additional volumes - `volumes` (map of strings to strings) - A mapping of additional volumes to
to mount into this container. The key of the object is the host path, mount into this container. The key of the object is the host path, the value
the value is the container path. is the container path.
## Using the Artifact: Export ## Using the Artifact: Export
...@@ -113,27 +113,26 @@ with the [docker-import](/docs/post-processors/docker-import.html) and ...@@ -113,27 +113,26 @@ with the [docker-import](/docs/post-processors/docker-import.html) and
[docker-push](/docs/post-processors/docker-push.html) post-processors. [docker-push](/docs/post-processors/docker-push.html) post-processors.
**Note:** This section is covering how to use an artifact that has been **Note:** This section is covering how to use an artifact that has been
_exported_. More specifically, if you set `export_path` in your configuration. *exported*. More specifically, if you set `export_path` in your configuration.
If you set `commit`, see the next section. If you set `commit`, see the next section.
The example below shows a full configuration that would import and push The example below shows a full configuration that would import and push the
the created image. This is accomplished using a sequence definition (a created image. This is accomplished using a sequence definition (a collection of
collection of post-processors that are treated as as single pipeline, see post-processors that are treated as as single pipeline, see
[Post-Processors](/docs/templates/post-processors.html) [Post-Processors](/docs/templates/post-processors.html) for more information):
for more information):
```javascript ``` {.javascript}
{ {
"post-processors": [ "post-processors": [
[ [
{ {
"type": "docker-import", "type": "docker-import",
"repository": "mitchellh/packer", "repository": "mitchellh/packer",
"tag": "0.7" "tag": "0.7"
}, },
"docker-push" "docker-push"
] ]
] ]
} }
``` ```
...@@ -143,10 +142,10 @@ post-processor which will import the artifact as a docker image. The resulting ...@@ -143,10 +142,10 @@ post-processor which will import the artifact as a docker image. The resulting
docker image is then passed on to the `docker-push` post-processor which handles docker image is then passed on to the `docker-push` post-processor which handles
pushing the image to a container repository. pushing the image to a container repository.
If you want to do this manually, however, perhaps from a script, you can If you want to do this manually, however, perhaps from a script, you can import
import the image using the process below: the image using the process below:
```text ``` {.text}
$ docker import - registry.mydomain.com/mycontainer:latest < artifact.tar $ docker import - registry.mydomain.com/mycontainer:latest < artifact.tar
``` ```
...@@ -157,23 +156,22 @@ and `docker push`, respectively. ...@@ -157,23 +156,22 @@ and `docker push`, respectively.
If you committed your container to an image, you probably want to tag, save, If you committed your container to an image, you probably want to tag, save,
push, etc. Packer can do this automatically for you. An example is shown below push, etc. Packer can do this automatically for you. An example is shown below
which tags and pushes an image. This is accomplished using a sequence which tags and pushes an image. This is accomplished using a sequence definition
definition (a collection of post-processors that are treated as as single (a collection of post-processors that are treated as as single pipeline, see
pipeline, see [Post-Processors](/docs/templates/post-processors.html) for more [Post-Processors](/docs/templates/post-processors.html) for more information):
information):
```javascript ``` {.javascript}
{ {
"post-processors": [ "post-processors": [
[ [
{ {
"type": "docker-tag", "type": "docker-tag",
"repository": "mitchellh/packer", "repository": "mitchellh/packer",
"tag": "0.7" "tag": "0.7"
}, },
"docker-push" "docker-push"
] ]
] ]
} }
``` ```
...@@ -187,52 +185,52 @@ Going a step further, if you wanted to tag and push an image to multiple ...@@ -187,52 +185,52 @@ Going a step further, if you wanted to tag and push an image to multiple
container repositories, this could be accomplished by defining two, container repositories, this could be accomplished by defining two,
nearly-identical sequence definitions, as demonstrated by the example below: nearly-identical sequence definitions, as demonstrated by the example below:
```javascript ``` {.javascript}
{ {
"post-processors": [ "post-processors": [
[ [
{ {
"type": "docker-tag", "type": "docker-tag",
"repository": "mitchellh/packer", "repository": "mitchellh/packer",
"tag": "0.7" "tag": "0.7"
}, },
"docker-push" "docker-push"
], ],
[ [
{ {
"type": "docker-tag", "type": "docker-tag",
"repository": "hashicorp/packer", "repository": "hashicorp/packer",
"tag": "0.7" "tag": "0.7"
}, },
"docker-push" "docker-push"
] ]
] ]
} }
``` ```
## Dockerfiles ## Dockerfiles
This builder allows you to build Docker images _without_ Dockerfiles. This builder allows you to build Docker images *without* Dockerfiles.
With this builder, you can repeatably create Docker images without the use of With this builder, you can repeatably create Docker images without the use of a
a Dockerfile. You don't need to know the syntax or semantics of Dockerfiles. Dockerfile. You don't need to know the syntax or semantics of Dockerfiles.
Instead, you can just provide shell scripts, Chef recipes, Puppet manifests, Instead, you can just provide shell scripts, Chef recipes, Puppet manifests,
etc. to provision your Docker container just like you would a regular etc. to provision your Docker container just like you would a regular
virtualized or dedicated machine. virtualized or dedicated machine.
While Docker has many features, Packer views Docker simply as an LXC While Docker has many features, Packer views Docker simply as an LXC container
container runner. To that end, Packer is able to repeatably build these runner. To that end, Packer is able to repeatably build these LXC containers
LXC containers using portable provisioning scripts. using portable provisioning scripts.
Dockerfiles have some additional features that Packer doesn't support Dockerfiles have some additional features that Packer doesn't support which are
which are able to be worked around. Many of these features will be automated able to be worked around. Many of these features will be automated by Packer in
by Packer in the future: the future:
* Dockerfiles will snapshot the container at each step, allowing you to - Dockerfiles will snapshot the container at each step, allowing you to go
go back to any step in the history of building. Packer doesn't do this yet, back to any step in the history of building. Packer doesn't do this yet, but
but inter-step snapshotting is on the way. inter-step snapshotting is on the way.
* Dockerfiles can contain information such as exposed ports, shared - Dockerfiles can contain information such as exposed ports, shared volumes,
volumes, and other metadata. Packer builds a raw Docker container image and other metadata. Packer builds a raw Docker container image that has none
that has none of this metadata. You can pass in much of this metadata of this metadata. You can pass in much of this metadata at runtime with
at runtime with `docker run`. `docker run`.
--- ---
layout: "docs" description: |
page_title: "Null Builder" The `null` Packer builder is not really a builder, it just sets up an SSH
description: |- connection and runs the provisioners. It can be used to debug provisioners
The `null` Packer builder is not really a builder, it just sets up an SSH connection and runs the provisioners. It can be used to debug provisioners without incurring high wait times. It does not create any kind of image or artifact. without incurring high wait times. It does not create any kind of image or
--- artifact.
layout: docs
page_title: Null Builder
...
# Null Builder # Null Builder
Type: `null` Type: `null`
The `null` Packer builder is not really a builder, it just sets up an SSH connection The `null` Packer builder is not really a builder, it just sets up an SSH
and runs the provisioners. It can be used to debug provisioners without connection and runs the provisioners. It can be used to debug provisioners
incurring high wait times. It does not create any kind of image or artifact. without incurring high wait times. It does not create any kind of image or
artifact.
## Basic Example ## Basic Example
Below is a fully functioning example. It doesn't do anything useful, since Below is a fully functioning example. It doesn't do anything useful, since no
no provisioners are defined, but it will connect to the specified host via ssh. provisioners are defined, but it will connect to the specified host via ssh.
```javascript ``` {.javascript}
{ {
"type": "null", "type": "null",
"ssh_host": "127.0.0.1", "ssh_host": "127.0.0.1",
...@@ -31,4 +35,3 @@ no provisioners are defined, but it will connect to the specified host via ssh. ...@@ -31,4 +35,3 @@ no provisioners are defined, but it will connect to the specified host via ssh.
The null builder has no configuration parameters other than the The null builder has no configuration parameters other than the
[communicator](/docs/templates/communicator.html) settings. [communicator](/docs/templates/communicator.html) settings.
--- ---
layout: "docs" description: |
page_title: "OpenStack Builder" The `openstack` Packer builder is able to create new images for use with
description: |- OpenStack. The builder takes a source image, runs any provisioning necessary on
The `openstack` Packer builder is able to create new images for use with OpenStack. The builder takes a source image, runs any provisioning necessary on the image after launching it, then creates a new reusable image. This reusable image can then be used as the foundation of new servers that are launched within OpenStack. The builder will create temporary keypairs that provide temporary access to the server while the image is being created. This simplifies configuration quite a bit. the image after launching it, then creates a new reusable image. This reusable
--- image can then be used as the foundation of new servers that are launched within
OpenStack. The builder will create temporary keypairs that provide temporary
access to the server while the image is being created. This simplifies
configuration quite a bit.
layout: docs
page_title: OpenStack Builder
...
# OpenStack Builder # OpenStack Builder
Type: `openstack` Type: `openstack`
The `openstack` Packer builder is able to create new images for use with The `openstack` Packer builder is able to create new images for use with
[OpenStack](http://www.openstack.org). The builder takes a source [OpenStack](http://www.openstack.org). The builder takes a source image, runs
image, runs any provisioning necessary on the image after launching it, any provisioning necessary on the image after launching it, then creates a new
then creates a new reusable image. This reusable image can then be reusable image. This reusable image can then be used as the foundation of new
used as the foundation of new servers that are launched within OpenStack. servers that are launched within OpenStack. The builder will create temporary
The builder will create temporary keypairs that provide temporary access to keypairs that provide temporary access to the server while the image is being
the server while the image is being created. This simplifies configuration created. This simplifies configuration quite a bit.
quite a bit.
The builder does _not_ manage images. Once it creates an image, it is up to The builder does *not* manage images. Once it creates an image, it is up to you
you to use it or delete it. to use it or delete it.
## Configuration Reference ## Configuration Reference
...@@ -28,81 +33,79 @@ segmented below into two categories: required and optional parameters. Within ...@@ -28,81 +33,79 @@ segmented below into two categories: required and optional parameters. Within
each category, the available configuration keys are alphabetized. each category, the available configuration keys are alphabetized.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `flavor` (string) - The ID, name, or full URL for the desired flavor for the - `flavor` (string) - The ID, name, or full URL for the desired flavor for the
server to be created. server to be created.
* `image_name` (string) - The name of the resulting image. - `image_name` (string) - The name of the resulting image.
* `source_image` (string) - The ID or full URL to the base image to use. - `source_image` (string) - The ID or full URL to the base image to use. This
This is the image that will be used to launch a new server and provision it. is the image that will be used to launch a new server and provision it.
Unless you specify completely custom SSH settings, the source image must Unless you specify completely custom SSH settings, the source image must
have `cloud-init` installed so that the keypair gets assigned properly. have `cloud-init` installed so that the keypair gets assigned properly.
* `username` (string) - The username used to connect to the OpenStack service. - `username` (string) - The username used to connect to the OpenStack service.
If not specified, Packer will use the environment variable If not specified, Packer will use the environment variable `OS_USERNAME`,
`OS_USERNAME`, if set. if set.
* `password` (string) - The password used to connect to the OpenStack service. - `password` (string) - The password used to connect to the OpenStack service.
If not specified, Packer will use the environment variables If not specified, Packer will use the environment variables `OS_PASSWORD`,
`OS_PASSWORD`, if set. if set.
### Optional: ### Optional:
* `api_key` (string) - The API key used to access OpenStack. Some OpenStack - `api_key` (string) - The API key used to access OpenStack. Some OpenStack
installations require this. installations require this.
* `availability_zone` (string) - The availability zone to launch the - `availability_zone` (string) - The availability zone to launch the
server in. If this isn't specified, the default enforced by your OpenStack server in. If this isn't specified, the default enforced by your OpenStack
cluster will be used. This may be required for some OpenStack clusters. cluster will be used. This may be required for some OpenStack clusters.
* `floating_ip` (string) - A specific floating IP to assign to this instance. - `floating_ip` (string) - A specific floating IP to assign to this instance.
`use_floating_ip` must also be set to true for this to have an affect. `use_floating_ip` must also be set to true for this to have an affect.
* `floating_ip_pool` (string) - The name of the floating IP pool to use - `floating_ip_pool` (string) - The name of the floating IP pool to use to
to allocate a floating IP. `use_floating_ip` must also be set to true allocate a floating IP. `use_floating_ip` must also be set to true for this
for this to have an affect. to have an affect.
* `insecure` (boolean) - Whether or not the connection to OpenStack can be done - `insecure` (boolean) - Whether or not the connection to OpenStack can be
over an insecure connection. By default this is false. done over an insecure connection. By default this is false.
* `networks` (array of strings) - A list of networks by UUID to attach - `networks` (array of strings) - A list of networks by UUID to attach to
to this instance. this instance.
* `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the - `tenant_id` or `tenant_name` (string) - The tenant ID or name to boot the
instance into. Some OpenStack installations require this. instance into. Some OpenStack installations require this. If not specified,
If not specified, Packer will use the environment variable Packer will use the environment variable `OS_TENANT_NAME`, if set.
`OS_TENANT_NAME`, if set.
* `security_groups` (array of strings) - A list of security groups by name - `security_groups` (array of strings) - A list of security groups by name to
to add to this instance. add to this instance.
* `region` (string) - The name of the region, such as "DFW", in which - `region` (string) - The name of the region, such as "DFW", in which to
to launch the server to create the AMI. launch the server to create the AMI. If not specified, Packer will use the
If not specified, Packer will use the environment variable environment variable `OS_REGION_NAME`, if set.
`OS_REGION_NAME`, if set.
* `ssh_interface` (string) - The type of interface to connect via SSH. Values - `ssh_interface` (string) - The type of interface to connect via SSH. Values
useful for Rackspace are "public" or "private", and the default behavior is useful for Rackspace are "public" or "private", and the default behavior is
to connect via whichever is returned first from the OpenStack API. to connect via whichever is returned first from the OpenStack API.
* `use_floating_ip` (boolean) - Whether or not to use a floating IP for - `use_floating_ip` (boolean) - Whether or not to use a floating IP for
the instance. Defaults to false. the instance. Defaults to false.
* `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for - `rackconnect_wait` (boolean) - For rackspace, whether or not to wait for
Rackconnect to assign the machine an IP address before connecting via SSH. Rackconnect to assign the machine an IP address before connecting via SSH.
Defaults to false. Defaults to false.
## Basic Example: Rackspace public cloud ## Basic Example: Rackspace public cloud
Here is a basic example. This is a working example to build a Here is a basic example. This is a working example to build a Ubuntu 12.04 LTS
Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering. (Precise Pangolin) on Rackspace OpenStack cloud offering.
```javascript ``` {.javascript}
{ {
"type": "openstack", "type": "openstack",
"username": "foo", "username": "foo",
...@@ -117,10 +120,10 @@ Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering. ...@@ -117,10 +120,10 @@ Ubuntu 12.04 LTS (Precise Pangolin) on Rackspace OpenStack cloud offering.
## Basic Example: Private OpenStack cloud ## Basic Example: Private OpenStack cloud
This example builds an Ubuntu 14.04 image on a private OpenStack cloud, This example builds an Ubuntu 14.04 image on a private OpenStack cloud, powered
powered by Metacloud. by Metacloud.
```javascript ``` {.javascript}
{ {
"type": "openstack", "type": "openstack",
"ssh_username": "root", "ssh_username": "root",
...@@ -130,12 +133,12 @@ powered by Metacloud. ...@@ -130,12 +133,12 @@ powered by Metacloud.
} }
``` ```
In this case, the connection information for connecting to OpenStack In this case, the connection information for connecting to OpenStack doesn't
doesn't appear in the template. That is because I source a standard appear in the template. That is because I source a standard OpenStack script
OpenStack script with environment variables set before I run this. This with environment variables set before I run this. This script is setting
script is setting environment variables like: environment variables like:
* `OS_AUTH_URL` - `OS_AUTH_URL`
* `OS_TENANT_ID` - `OS_TENANT_ID`
* `OS_USERNAME` - `OS_USERNAME`
* `OS_PASSWORD` - `OS_PASSWORD`
--- ---
layout: "docs" description: |
page_title: "Parallels Builder (from an ISO)" The Parallels Packer builder is able to create Parallels Desktop for Mac virtual
description: |- machines and export them in the PVM format, starting from an ISO image.
The Parallels Packer builder is able to create Parallels Desktop for Mac virtual machines and export them in the PVM format, starting from an ISO image. layout: docs
--- page_title: 'Parallels Builder (from an ISO)'
...
# Parallels Builder (from an ISO) # Parallels Builder (from an ISO)
Type: `parallels-iso` Type: `parallels-iso`
The Parallels Packer builder is able to create The Parallels Packer builder is able to create [Parallels Desktop for
[Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) virtual Mac](http://www.parallels.com/products/desktop/) virtual machines and export
machines and export them in the PVM format, starting from an them in the PVM format, starting from an ISO image.
ISO image.
The builder builds a virtual machine by creating a new virtual machine The builder builds a virtual machine by creating a new virtual machine from
from scratch, booting it, installing an OS, provisioning software within scratch, booting it, installing an OS, provisioning software within the OS, then
the OS, then shutting it down. The result of the Parallels builder is a directory shutting it down. The result of the Parallels builder is a directory containing
containing all the files necessary to run the virtual machine portably. all the files necessary to run the virtual machine portably.
## Basic Example ## Basic Example
Here is a basic example. This example is not functional. It will start the Here is a basic example. This example is not functional. It will start the OS
OS installer but then fail because we don't provide the preseed file for installer but then fail because we don't provide the preseed file for Ubuntu to
Ubuntu to self-install. Still, the example serves to show the basic configuration: self-install. Still, the example serves to show the basic configuration:
```javascript ``` {.javascript}
{ {
"type": "parallels-iso", "type": "parallels-iso",
"guest_os_type": "ubuntu", "guest_os_type": "ubuntu",
...@@ -40,219 +40,222 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio ...@@ -40,219 +40,222 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio
} }
``` ```
It is important to add a `shutdown_command`. By default Packer halts the It is important to add a `shutdown_command`. By default Packer halts the virtual
virtual machine and the file system may not be sync'd. Thus, changes made in a machine and the file system may not be sync'd. Thus, changes made in a
provisioner might not be saved. provisioner might not be saved.
## Configuration Reference ## Configuration Reference
There are many configuration options available for the Parallels builder. There are many configuration options available for the Parallels builder. They
They are organized below into two categories: required and optional. Within are organized below into two categories: required and optional. Within each
each category, the available options are alphabetized and described. category, the available options are alphabetized and described.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO
files are so large, this is required and Packer will verify it prior files are so large, this is required and Packer will verify it prior to
to booting a virtual machine with the ISO attached. The type of the booting a virtual machine with the ISO attached. The type of the checksum is
checksum is specified with `iso_checksum_type`, documented below. specified with `iso_checksum_type`, documented below.
* `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum_type` (string) - The type of the checksum specified in
`iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or
"sha512" currently. While "none" will skip checksumming, this is not "sha512" currently. While "none" will skip checksumming, this is not
recommended since ISO files are generally large and corruption does happen recommended since ISO files are generally large and corruption does happen
from time to time. from time to time.
* `iso_url` (string) - A URL to the ISO containing the installation image. - `iso_url` (string) - A URL to the ISO containing the installation image.
This URL can be either an HTTP URL or a file URL (or path to a file). This URL can be either an HTTP URL or a file URL (or path to a file). If
If this is an HTTP URL, Packer will download it and cache it between this is an HTTP URL, Packer will download it and cache it between runs.
runs.
* `ssh_username` (string) - The username to use to SSH into the machine - `ssh_username` (string) - The username to use to SSH into the machine once
once the OS is installed. the OS is installed.
* `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to - `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to
install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". install into the VM. Valid values are "win", "lin", "mac", "os2"
This can be omitted only if `parallels_tools_mode` is "disable". and "other". This can be omitted only if `parallels_tools_mode`
is "disable".
### Optional: ### Optional:
* `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is first booted. The goal of these commands should when the virtual machine is first booted. The goal of these commands should
be to type just enough to initialize the operating system installer. Special be to type just enough to initialize the operating system installer. Special
keys can be typed as well, and are covered in the section below on the boot keys can be typed as well, and are covered in the section below on the
command. If this is not specified, it is assumed the installer will start boot command. If this is not specified, it is assumed the installer will
itself. start itself.
* `boot_wait` (string) - The time to wait after booting the initial virtual - `boot_wait` (string) - The time to wait after booting the initial virtual
machine before typing the `boot_command`. The value of this should be machine before typing the `boot_command`. The value of this should be
a duration. Examples are "5s" and "1m30s" which will cause Packer to wait a duration. Examples are "5s" and "1m30s" which will cause Packer to wait
five seconds and one minute 30 seconds, respectively. If this isn't specified, five seconds and one minute 30 seconds, respectively. If this isn't
the default is 10 seconds. specified, the default is 10 seconds.
* `disk_size` (integer) - The size, in megabytes, of the hard disk to create - `disk_size` (integer) - The size, in megabytes, of the hard disk to create
for the VM. By default, this is 40000 (about 40 GB). for the VM. By default, this is 40000 (about 40 GB).
* `floppy_files` (array of strings) - A list of files to place onto a floppy - `floppy_files` (array of strings) - A list of files to place onto a floppy
disk that is attached when the VM is booted. This is most useful disk that is attached when the VM is booted. This is most useful for
for unattended Windows installs, which look for an `Autounattend.xml` file unattended Windows installs, which look for an `Autounattend.xml` file on
on removable media. By default, no floppy will be attached. All files removable media. By default, no floppy will be attached. All files listed in
listed in this setting get placed into the root directory of the floppy this setting get placed into the root directory of the floppy and the floppy
and the floppy is attached as the first floppy device. Currently, no is attached as the first floppy device. Currently, no support exists for
support exists for creating sub-directories on the floppy. Wildcard creating sub-directories on the floppy. Wildcard characters (\*, ?,
characters (*, ?, and []) are allowed. Directory names are also allowed, and \[\]) are allowed. Directory names are also allowed, which will add all
which will add all the files found in the directory to the floppy. the files found in the directory to the floppy.
* `guest_os_type` (string) - The guest OS type being installed. By default - `guest_os_type` (string) - The guest OS type being installed. By default
this is "other", but you can get _dramatic_ performance improvements by this is "other", but you can get *dramatic* performance improvements by
setting this to the proper value. To view all available values for this setting this to the proper value. To view all available values for this run
run `prlctl create x --distribution list`. Setting the correct value hints to `prlctl create x --distribution list`. Setting the correct value hints to
Parallels Desktop how to optimize the virtual hardware to work best with Parallels Desktop how to optimize the virtual hardware to work best with
that operating system. that operating system.
* `hard_drive_interface` (string) - The type of controller that the - `hard_drive_interface` (string) - The type of controller that the hard
hard drives are attached to, defaults to "sata". Valid options are drives are attached to, defaults to "sata". Valid options are "sata", "ide",
"sata", "ide", and "scsi". and "scsi".
* `host_interfaces` (array of strings) - A list of which interfaces on the - `host_interfaces` (array of strings) - A list of which interfaces on the
host should be searched for a IP address. The first IP address found on host should be searched for a IP address. The first IP address found on one
one of these will be used as `{{ .HTTPIP }}` in the `boot_command`. of these will be used as `{{ .HTTPIP }}` in the `boot_command`. Defaults to
Defaults to ["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", \["en0", "en1", "en2", "en3", "en4", "en5", "en6", "en7", "en8", "en9",
"en9", "ppp0", "ppp1", "ppp2"]. "ppp0", "ppp1", "ppp2"\].
* `http_directory` (string) - Path to a directory to serve using an HTTP - `http_directory` (string) - Path to a directory to serve using an
server. The files in this directory will be available over HTTP that will HTTP server. The files in this directory will be available over HTTP that
be requestable from the virtual machine. This is useful for hosting will be requestable from the virtual machine. This is useful for hosting
kickstart files and so on. By default this is "", which means no HTTP kickstart files and so on. By default this is "", which means no HTTP server
server will be started. The address and port of the HTTP server will be will be started. The address and port of the HTTP server will be available
available as variables in `boot_command`. This is covered in more detail as variables in `boot_command`. This is covered in more detail below.
below.
- `http_port_min` and `http_port_max` (integer) - These are the minimum and
* `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the
maximum port to use for the HTTP server started to serve the `http_directory`. `http_directory`. Because Packer often runs in parallel, Packer will choose
Because Packer often runs in parallel, Packer will choose a randomly available a randomly available port in this range to run the HTTP server. If you want
port in this range to run the HTTP server. If you want to force the HTTP to force the HTTP server to be on one port, make this minimum and maximum
server to be on one port, make this minimum and maximum port the same. port the same. By default the values are 8000 and 9000, respectively.
By default the values are 8000 and 9000, respectively.
- `iso_urls` (array of strings) - Multiple URLs for the ISO to download.
* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer will try these in order. If anything goes wrong attempting to
Packer will try these in order. If anything goes wrong attempting to download download or while downloading a single URL, it will move on to the next. All
or while downloading a single URL, it will move on to the next. All URLs URLs must point to the same file (same checksum). By default this is empty
must point to the same file (same checksum). By default this is empty and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified.
and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified.
- `output_directory` (string) - This is the path to the directory where the
* `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute.
resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer`
If relative, the path is relative to the working directory when `packer` is executed. This directory must not exist or be empty prior to running
is executed. This directory must not exist or be empty prior to running the builder. the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
By default this is "output-BUILDNAME" where "BUILDNAME" is the name name of the build.
of the build.
- `parallels_tools_guest_path` (string) - The path in the virtual machine to
* `parallels_tools_guest_path` (string) - The path in the virtual machine to upload upload Parallels Tools. This only takes effect if `parallels_tools_mode`
Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". is "upload". This is a [configuration
This is a [configuration template](/docs/templates/configuration-templates.html) template](/docs/templates/configuration-templates.html) that has a single
that has a single valid variable: `Flavor`, which will be the value of valid variable: `Flavor`, which will be the value of
`parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" which `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso"
should upload into the login directory of the user. which should upload into the login directory of the user.
* `parallels_tools_mode` (string) - The method by which Parallels Tools are made - `parallels_tools_mode` (string) - The method by which Parallels Tools are
available to the guest for installation. Valid options are "upload", "attach", made available to the guest for installation. Valid options are "upload",
or "disable". If the mode is "attach" the Parallels Tools ISO will be attached "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will
as a CD device to the virtual machine. If the mode is "upload" the Parallels be attached as a CD device to the virtual machine. If the mode is "upload"
Tools ISO will be uploaded to the path specified by the Parallels Tools ISO will be uploaded to the path specified by
`parallels_tools_guest_path`. The default value is "upload". `parallels_tools_guest_path`. The default value is "upload".
* `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in - `prlctl` (array of array of strings) - Custom `prlctl` commands to execute
order to further customize the virtual machine being created. The value of in order to further customize the virtual machine being created. The value
this is an array of commands to execute. The commands are executed in the order of this is an array of commands to execute. The commands are executed in the
defined in the template. For each command, the command is defined itself as an order defined in the template. For each command, the command is defined
array of strings, where each string represents a single argument on the itself as an array of strings, where each string represents a single
command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated argument on the command-line to `prlctl` (but excluding `prlctl` itself).
as a [configuration template](/docs/templates/configuration-templates.html), Each arg is treated as a [configuration
where the `Name` variable is replaced with the VM name. More details on how template](/docs/templates/configuration-templates.html), where the `Name`
to use `prlctl` are below. variable is replaced with the VM name. More details on how to use `prlctl`
are below.
* `prlctl_post` (array of array of strings) - Identical to `prlctl`,
except that it is run after the virtual machine is shutdown, and before the - `prlctl_post` (array of array of strings) - Identical to `prlctl`, except
virtual machine is exported. that it is run after the virtual machine is shutdown, and before the virtual
machine is exported.
* `prlctl_version_file` (string) - The path within the virtual machine to upload
a file that contains the `prlctl` version that was used to create the machine. - `prlctl_version_file` (string) - The path within the virtual machine to
This information can be useful for provisioning. By default this is upload a file that contains the `prlctl` version that was used to create
".prlctl_version", which will generally upload it into the home directory. the machine. This information can be useful for provisioning. By default
this is ".prlctl\_version", which will generally upload it into the
* `shutdown_command` (string) - The command to use to gracefully shut down home directory.
the machine once all the provisioning is done. By default this is an empty
string, which tells Packer to just forcefully shut down the machine. - `shutdown_command` (string) - The command to use to gracefully shut down the
machine once all the provisioning is done. By default this is an empty
* `shutdown_timeout` (string) - The amount of time to wait after executing string, which tells Packer to just forcefully shut down the machine.
the `shutdown_command` for the virtual machine to actually shut down.
If it doesn't shut down in this time, it is an error. By default, the timeout - `shutdown_timeout` (string) - The amount of time to wait after executing the
is "5m", or five minutes. `shutdown_command` for the virtual machine to actually shut down. If it
doesn't shut down in this time, it is an error. By default, the timeout is
* `vm_name` (string) - This is the name of the PVM directory for the new "5m", or five minutes.
virtual machine, without the file extension. By default this is
"packer-BUILDNAME", where "BUILDNAME" is the name of the build. - `vm_name` (string) - This is the name of the PVM directory for the new
virtual machine, without the file extension. By default this is
"packer-BUILDNAME", where "BUILDNAME" is the name of the build.
## Boot Command ## Boot Command
The `boot_command` configuration is very important: it specifies the keys The `boot_command` configuration is very important: it specifies the keys to
to type when the virtual machine is first booted in order to start the type when the virtual machine is first booted in order to start the OS
OS installer. This command is typed after `boot_wait`, which gives the installer. This command is typed after `boot_wait`, which gives the virtual
virtual machine some time to actually load the ISO. machine some time to actually load the ISO.
As documented above, the `boot_command` is an array of strings. The As documented above, the `boot_command` is an array of strings. The strings are
strings are all typed in sequence. It is an array only to improve readability all typed in sequence. It is an array only to improve readability within the
within the template. template.
The boot command is "typed" character for character (using the Parallels The boot command is "typed" character for character (using the Parallels
Virtualization SDK, see [Parallels Builder](/docs/builders/parallels.html)) Virtualization SDK, see [Parallels Builder](/docs/builders/parallels.html))
simulating a human actually typing the keyboard. There are a set of special simulating a human actually typing the keyboard. There are a set of special keys
keys available. If these are in your boot command, they will be replaced by available. If these are in your boot command, they will be replaced by the
the proper key: proper key:
* `<bs>` - Backspace - `<bs>` - Backspace
* `<del>` - Delete - `<del>` - Delete
* `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress. - `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress.
* `<esc>` - Simulates pressing the escape key. - `<esc>` - Simulates pressing the escape key.
* `<tab>` - Simulates pressing the tab key. - `<tab>` - Simulates pressing the tab key.
* `<f1>` - `<f12>` - Simulates pressing a function key. - `<f1>` - `<f12>` - Simulates pressing a function key.
* `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key. - `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key.
* `<spacebar>` - Simulates pressing the spacebar. - `<spacebar>` - Simulates pressing the spacebar.
* `<insert>` - Simulates pressing the insert key. - `<insert>` - Simulates pressing the insert key.
* `<home>` `<end>` - Simulates pressing the home and end keys. - `<home>` `<end>` - Simulates pressing the home and end keys.
* `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys. - `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys.
* `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before
is useful if you have to generally wait for the UI to update before typing more. sending any additional keys. This is useful if you have to generally wait
for the UI to update before typing more.
In addition to the special keys, each command to type is treated as a In addition to the special keys, each command to type is treated as a
[configuration template](/docs/templates/configuration-templates.html). [configuration template](/docs/templates/configuration-templates.html). The
The available variables are: available variables are:
* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server
that is started serving the directory specified by the `http_directory` that is started serving the directory specified by the `http_directory`
configuration parameter. If `http_directory` isn't specified, these will configuration parameter. If `http_directory` isn't specified, these will be
be blank! blank!
Example boot command. This is actually a working boot command used to start Example boot command. This is actually a working boot command used to start an
an Ubuntu 12.04 installer: Ubuntu 12.04 installer:
```text ``` {.text}
[ [
"<esc><esc><enter><wait>", "<esc><esc><enter><wait>",
"/install/vmlinuz noapic ", "/install/vmlinuz noapic ",
...@@ -267,17 +270,18 @@ an Ubuntu 12.04 installer: ...@@ -267,17 +270,18 @@ an Ubuntu 12.04 installer:
``` ```
## prlctl Commands ## prlctl Commands
In order to perform extra customization of the virtual machine, a template can In order to perform extra customization of the virtual machine, a template can
define extra calls to `prlctl` to perform. define extra calls to `prlctl` to perform.
[prlctl](http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf) [prlctl](http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf)
is the command-line interface to Parallels Desktop. It can be used to configure is the command-line interface to Parallels Desktop. It can be used to configure
the virtual machine, such as set RAM, CPUs, etc. the virtual machine, such as set RAM, CPUs, etc.
Extra `prlctl` commands are defined in the template in the `prlctl` section. Extra `prlctl` commands are defined in the template in the `prlctl` section. An
An example is shown below that sets the memory and number of CPUs within the example is shown below that sets the memory and number of CPUs within the
virtual machine: virtual machine:
```javascript ``` {.javascript}
{ {
"prlctl": [ "prlctl": [
["set", "{{.Name}}", "--memsize", "1024"], ["set", "{{.Name}}", "--memsize", "1024"],
...@@ -291,7 +295,7 @@ executed in the order defined. So in the above example, the memory will be set ...@@ -291,7 +295,7 @@ executed in the order defined. So in the above example, the memory will be set
followed by the CPUs. followed by the CPUs.
Each command itself is an array of strings, where each string is an argument to Each command itself is an array of strings, where each string is an argument to
`prlctl`. Each argument is treated as a `prlctl`. Each argument is treated as a [configuration
[configuration template](/docs/templates/configuration-templates.html). The only template](/docs/templates/configuration-templates.html). The only available
available variable is `Name` which is replaced with the unique name of the VM, variable is `Name` which is replaced with the unique name of the VM, which is
which is required for many `prlctl` calls. required for many `prlctl` calls.
--- ---
layout: "docs" description: |
page_title: "Parallels Builder (from a PVM)" This Parallels builder is able to create Parallels Desktop for Mac virtual
description: |- machines and export them in the PVM format, starting from an existing PVM
This Parallels builder is able to create Parallels Desktop for Mac virtual machines and export them in the PVM format, starting from an existing PVM (exported virtual machine image). (exported virtual machine image).
--- layout: docs
page_title: 'Parallels Builder (from a PVM)'
...
# Parallels Builder (from a PVM) # Parallels Builder (from a PVM)
Type: `parallels-pvm` Type: `parallels-pvm`
This Parallels builder is able to create This Parallels builder is able to create [Parallels Desktop for
[Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) Mac](http://www.parallels.com/products/desktop/) virtual machines and export
virtual machines and export them in the PVM format, starting from an them in the PVM format, starting from an existing PVM (exported virtual machine
existing PVM (exported virtual machine image). image).
The builder builds a virtual machine by importing an existing PVM The builder builds a virtual machine by importing an existing PVM file. It then
file. It then boots this image, runs provisioners on this new VM, and boots this image, runs provisioners on this new VM, and exports that VM to
exports that VM to create the image. The imported machine is deleted prior create the image. The imported machine is deleted prior to finishing the build.
to finishing the build.
## Basic Example ## Basic Example
Here is a basic example. This example is functional if you have an PVM matching Here is a basic example. This example is functional if you have an PVM matching
the settings here. the settings here.
```javascript ``` {.javascript}
{ {
"type": "parallels-pvm", "type": "parallels-pvm",
"parallels_tools_flavor": "lin", "parallels_tools_flavor": "lin",
...@@ -36,175 +37,183 @@ the settings here. ...@@ -36,175 +37,183 @@ the settings here.
} }
``` ```
It is important to add a `shutdown_command`. By default Packer halts the It is important to add a `shutdown_command`. By default Packer halts the virtual
virtual machine and the file system may not be sync'd. Thus, changes made in a machine and the file system may not be sync'd. Thus, changes made in a
provisioner might not be saved. provisioner might not be saved.
## Configuration Reference ## Configuration Reference
There are many configuration options available for the Parallels builder. There are many configuration options available for the Parallels builder. They
They are organized below into two categories: required and optional. Within are organized below into two categories: required and optional. Within each
each category, the available options are alphabetized and described. category, the available options are alphabetized and described.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `source_path` (string) - The path to a PVM directory that acts as - `source_path` (string) - The path to a PVM directory that acts as the source
the source of this build. of this build.
* `ssh_username` (string) - The username to use to SSH into the machine - `ssh_username` (string) - The username to use to SSH into the machine once
once the OS is installed. the OS is installed.
* `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to - `parallels_tools_flavor` (string) - The flavor of the Parallels Tools ISO to
install into the VM. Valid values are "win", "lin", "mac", "os2" and "other". install into the VM. Valid values are "win", "lin", "mac", "os2"
This can be omitted only if `parallels_tools_mode` is "disable". and "other". This can be omitted only if `parallels_tools_mode`
is "disable".
### Optional: ### Optional:
* `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is first booted. The goal of these commands should when the virtual machine is first booted. The goal of these commands should
be to type just enough to initialize the operating system installer. Special be to type just enough to initialize the operating system installer. Special
keys can be typed as well, and are covered in the section below on the boot keys can be typed as well, and are covered in the section below on the
command. If this is not specified, it is assumed the installer will start boot command. If this is not specified, it is assumed the installer will
itself. start itself.
* `boot_wait` (string) - The time to wait after booting the initial virtual - `boot_wait` (string) - The time to wait after booting the initial virtual
machine before typing the `boot_command`. The value of this should be machine before typing the `boot_command`. The value of this should be
a duration. Examples are "5s" and "1m30s" which will cause Packer to wait a duration. Examples are "5s" and "1m30s" which will cause Packer to wait
five seconds and one minute 30 seconds, respectively. If this isn't specified, five seconds and one minute 30 seconds, respectively. If this isn't
the default is 10 seconds. specified, the default is 10 seconds.
* `floppy_files` (array of strings) - A list of files to put onto a floppy - `floppy_files` (array of strings) - A list of files to put onto a floppy
disk that is attached when the VM is booted for the first time. This is disk that is attached when the VM is booted for the first time. This is most
most useful for unattended Windows installs, which look for an useful for unattended Windows installs, which look for an `Autounattend.xml`
`Autounattend.xml` file on removable media. By default no floppy will file on removable media. By default no floppy will be attached. The files
be attached. The files listed in this configuration will all be put listed in this configuration will all be put into the root directory of the
into the root directory of the floppy disk; sub-directories are not supported. floppy disk; sub-directories are not supported.
* `reassign_mac` (boolean) - If this is "false" the MAC address of the first - `reassign_mac` (boolean) - If this is "false" the MAC address of the first
NIC will reused when imported else a new MAC address will be generated by NIC will reused when imported else a new MAC address will be generated
Parallels. Defaults to "false". by Parallels. Defaults to "false".
* `output_directory` (string) - This is the path to the directory where the - `output_directory` (string) - This is the path to the directory where the
resulting virtual machine will be created. This may be relative or absolute. resulting virtual machine will be created. This may be relative or absolute.
If relative, the path is relative to the working directory when `packer` If relative, the path is relative to the working directory when `packer`
is executed. This directory must not exist or be empty prior to running the builder. is executed. This directory must not exist or be empty prior to running
By default this is "output-BUILDNAME" where "BUILDNAME" is the name the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
of the build. name of the build.
* `parallels_tools_guest_path` (string) - The path in the VM to upload - `parallels_tools_guest_path` (string) - The path in the VM to upload
Parallels Tools. This only takes effect if `parallels_tools_mode` is "upload". Parallels Tools. This only takes effect if `parallels_tools_mode`
This is a [configuration template](/docs/templates/configuration-templates.html) is "upload". This is a [configuration
that has a single valid variable: `Flavor`, which will be the value of template](/docs/templates/configuration-templates.html) that has a single
`parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso" which valid variable: `Flavor`, which will be the value of
should upload into the login directory of the user. `parallels_tools_flavor`. By default this is "prl-tools-{{.Flavor}}.iso"
which should upload into the login directory of the user.
* `parallels_tools_mode` (string) - The method by which Parallels Tools are made
available to the guest for installation. Valid options are "upload", "attach", - `parallels_tools_mode` (string) - The method by which Parallels Tools are
or "disable". If the mode is "attach" the Parallels Tools ISO will be attached made available to the guest for installation. Valid options are "upload",
as a CD device to the virtual machine. If the mode is "upload" the Parallels "attach", or "disable". If the mode is "attach" the Parallels Tools ISO will
Tools ISO will be uploaded to the path specified by be attached as a CD device to the virtual machine. If the mode is "upload"
`parallels_tools_guest_path`. The default value is "upload". the Parallels Tools ISO will be uploaded to the path specified by
`parallels_tools_guest_path`. The default value is "upload".
* `prlctl` (array of array of strings) - Custom `prlctl` commands to execute in
order to further customize the virtual machine being created. The value of - `prlctl` (array of array of strings) - Custom `prlctl` commands to execute
this is an array of commands to execute. The commands are executed in the order in order to further customize the virtual machine being created. The value
defined in the template. For each command, the command is defined itself as an of this is an array of commands to execute. The commands are executed in the
array of strings, where each string represents a single argument on the order defined in the template. For each command, the command is defined
command-line to `prlctl` (but excluding `prlctl` itself). Each arg is treated itself as an array of strings, where each string represents a single
as a [configuration template](/docs/templates/configuration-templates.html), argument on the command-line to `prlctl` (but excluding `prlctl` itself).
where the `Name` variable is replaced with the VM name. More details on how Each arg is treated as a [configuration
to use `prlctl` are below. template](/docs/templates/configuration-templates.html), where the `Name`
variable is replaced with the VM name. More details on how to use `prlctl`
* `prlctl_post` (array of array of strings) - Identical to `prlctl`, are below.
except that it is run after the virtual machine is shutdown, and before the
virtual machine is exported. - `prlctl_post` (array of array of strings) - Identical to `prlctl`, except
that it is run after the virtual machine is shutdown, and before the virtual
* `prlctl_version_file` (string) - The path within the virtual machine to upload machine is exported.
a file that contains the `prlctl` version that was used to create the machine.
This information can be useful for provisioning. By default this is - `prlctl_version_file` (string) - The path within the virtual machine to
".prlctl_version", which will generally upload it into the home directory. upload a file that contains the `prlctl` version that was used to create
the machine. This information can be useful for provisioning. By default
* `shutdown_command` (string) - The command to use to gracefully shut down this is ".prlctl\_version", which will generally upload it into the
the machine once all the provisioning is done. By default this is an empty home directory.
string, which tells Packer to just forcefully shut down the machine.
- `shutdown_command` (string) - The command to use to gracefully shut down the
* `shutdown_timeout` (string) - The amount of time to wait after executing machine once all the provisioning is done. By default this is an empty
the `shutdown_command` for the virtual machine to actually shut down. string, which tells Packer to just forcefully shut down the machine.
If it doesn't shut down in this time, it is an error. By default, the timeout
is "5m", or five minutes. - `shutdown_timeout` (string) - The amount of time to wait after executing the
`shutdown_command` for the virtual machine to actually shut down. If it
* `vm_name` (string) - This is the name of the virtual machine when it is doesn't shut down in this time, it is an error. By default, the timeout is
imported as well as the name of the PVM directory when the virtual machine is "5m", or five minutes.
exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is
the name of the build. - `vm_name` (string) - This is the name of the virtual machine when it is
imported as well as the name of the PVM directory when the virtual machine
is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the
name of the build.
## Parallels Tools ## Parallels Tools
After the virtual machine is up and the operating system is installed, Packer After the virtual machine is up and the operating system is installed, Packer
uploads the Parallels Tools into the virtual machine. The path where they are uploads the Parallels Tools into the virtual machine. The path where they are
uploaded is controllable by `parallels_tools_path`, and defaults to uploaded is controllable by `parallels_tools_path`, and defaults to
"prl-tools.iso". Without an absolute path, it is uploaded to the home directory "prl-tools.iso". Without an absolute path, it is uploaded to the home directory
of the SSH user. Parallels Tools ISO's can be found in: of the SSH user. Parallels Tools ISO's can be found in: "/Applications/Parallels
"/Applications/Parallels Desktop.app/Contents/Resources/Tools/" Desktop.app/Contents/Resources/Tools/"
## Boot Command ## Boot Command
The `boot_command` specifies the keys to type when the virtual machine is first booted. This command is typed after `boot_wait`. The `boot_command` specifies the keys to type when the virtual machine is first
booted. This command is typed after `boot_wait`.
As documented above, the `boot_command` is an array of strings. The As documented above, the `boot_command` is an array of strings. The strings are
strings are all typed in sequence. It is an array only to improve readability all typed in sequence. It is an array only to improve readability within the
within the template. template.
The boot command is "typed" character for character (using the Parallels The boot command is "typed" character for character (using the Parallels
Virtualization SDK, see [Parallels Builder](/docs/builders/parallels.html)) Virtualization SDK, see [Parallels Builder](/docs/builders/parallels.html))
simulating a human actually typing the keyboard. There are a set of special simulating a human actually typing the keyboard. There are a set of special keys
keys available. If these are in your boot command, they will be replaced by available. If these are in your boot command, they will be replaced by the
the proper key: proper key:
* `<bs>` - Backspace - `<bs>` - Backspace
* `<del>` - Delete - `<del>` - Delete
* `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress. - `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress.
* `<esc>` - Simulates pressing the escape key. - `<esc>` - Simulates pressing the escape key.
* `<tab>` - Simulates pressing the tab key. - `<tab>` - Simulates pressing the tab key.
* `<f1>` - `<f12>` - Simulates pressing a function key. - `<f1>` - `<f12>` - Simulates pressing a function key.
* `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key. - `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key.
* `<spacebar>` - Simulates pressing the spacebar. - `<spacebar>` - Simulates pressing the spacebar.
* `<insert>` - Simulates pressing the insert key. - `<insert>` - Simulates pressing the insert key.
* `<home>` `<end>` - Simulates pressing the home and end keys. - `<home>` `<end>` - Simulates pressing the home and end keys.
* `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys. - `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys.
* `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before
is useful if you have to generally wait for the UI to update before typing more. sending any additional keys. This is useful if you have to generally wait
for the UI to update before typing more.
In addition to the special keys, each command to type is treated as a In addition to the special keys, each command to type is treated as a
[configuration template](/docs/templates/configuration-templates.html). [configuration template](/docs/templates/configuration-templates.html). The
The available variables are: available variables are:
## prlctl Commands ## prlctl Commands
In order to perform extra customization of the virtual machine, a template can In order to perform extra customization of the virtual machine, a template can
define extra calls to `prlctl` to perform. define extra calls to `prlctl` to perform.
[prlctl](http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf) [prlctl](http://download.parallels.com/desktop/v9/ga/docs/en_US/Parallels%20Command%20Line%20Reference%20Guide.pdf)
is the command-line interface to Parallels Desktop. It can be used to configure is the command-line interface to Parallels Desktop. It can be used to configure
the virtual machine, such as set RAM, CPUs, etc. the virtual machine, such as set RAM, CPUs, etc.
Extra `prlctl` commands are defined in the template in the `prlctl` section. Extra `prlctl` commands are defined in the template in the `prlctl` section. An
An example is shown below that sets the memory and number of CPUs within the example is shown below that sets the memory and number of CPUs within the
virtual machine: virtual machine:
```javascript ``` {.javascript}
{ {
"prlctl": [ "prlctl": [
["set", "{{.Name}}", "--memsize", "1024"], ["set", "{{.Name}}", "--memsize", "1024"],
...@@ -218,7 +227,7 @@ executed in the order defined. So in the above example, the memory will be set ...@@ -218,7 +227,7 @@ executed in the order defined. So in the above example, the memory will be set
followed by the CPUs. followed by the CPUs.
Each command itself is an array of strings, where each string is an argument to Each command itself is an array of strings, where each string is an argument to
`prlctl`. Each argument is treated as a `prlctl`. Each argument is treated as a [configuration
[configuration template](/docs/templates/configuration-templates.html). The only template](/docs/templates/configuration-templates.html). The only available
available variable is `Name` which is replaced with the unique name of the VM, variable is `Name` which is replaced with the unique name of the VM, which is
which is required for many `prlctl` calls. required for many `prlctl` calls.
--- ---
layout: "docs" description: |
page_title: "Parallels Builder" The Parallels Packer builder is able to create Parallels Desktop for Mac virtual
description: |- machines and export them in the PVM format.
The Parallels Packer builder is able to create Parallels Desktop for Mac virtual machines and export them in the PVM format. layout: docs
--- page_title: Parallels Builder
...
# Parallels Builder # Parallels Builder
The Parallels Packer builder is able to create [Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) virtual machines and export them in the PVM format. The Parallels Packer builder is able to create [Parallels Desktop for
Mac](http://www.parallels.com/products/desktop/) virtual machines and export
Packer actually comes with multiple builders able to create Parallels them in the PVM format.
machines, depending on the strategy you want to use to build the image.
Packer supports the following Parallels builders:
* [parallels-iso](/docs/builders/parallels-iso.html) - Starts from Packer actually comes with multiple builders able to create Parallels machines,
an ISO file, creates a brand new Parallels VM, installs an OS, depending on the strategy you want to use to build the image. Packer supports
provisions software within the OS, then exports that machine to create the following Parallels builders:
an image. This is best for people who want to start from scratch.
* [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder - [parallels-iso](/docs/builders/parallels-iso.html) - Starts from an ISO
imports an existing PVM file, runs provisioners on top of that VM, file, creates a brand new Parallels VM, installs an OS, provisions software
and exports that machine to create an image. This is best if you have within the OS, then exports that machine to create an image. This is best
an existing Parallels VM export you want to use as the source. As an for people who want to start from scratch.
additional benefit, you can feed the artifact of this builder back into
itself to iterate on a machine.
- [parallels-pvm](/docs/builders/parallels-pvm.html) - This builder imports an
existing PVM file, runs provisioners on top of that VM, and exports that
machine to create an image. This is best if you have an existing Parallels
VM export you want to use as the source. As an additional benefit, you can
feed the artifact of this builder back into itself to iterate on a machine.
## Requirements ## Requirements
In addition to [Parallels Desktop for Mac](http://www.parallels.com/products/desktop/) this requires the In addition to [Parallels Desktop for
[Parallels Virtualization SDK](http://www.parallels.com/downloads/desktop/). Mac](http://www.parallels.com/products/desktop/) this requires the [Parallels
Virtualization SDK](http://www.parallels.com/downloads/desktop/).
The SDK can be installed by downloading and following the instructions in the dmg. The SDK can be installed by downloading and following the instructions in the
dmg.
--- ---
layout: "docs" description: |
page_title: "QEMU Builder" The Qemu Packer builder is able to create KVM and Xen virtual machine images.
description: |- Support for Xen is experimental at this time.
The Qemu Packer builder is able to create KVM and Xen virtual machine images. Support for Xen is experimental at this time. layout: docs
--- page_title: QEMU Builder
...
# QEMU Builder # QEMU Builder
Type: `qemu` Type: `qemu`
The Qemu Packer builder is able to create [KVM](http://www.linux-kvm.org) The Qemu Packer builder is able to create [KVM](http://www.linux-kvm.org) and
and [Xen](http://www.xenproject.org) virtual machine images. Support [Xen](http://www.xenproject.org) virtual machine images. Support for Xen is
for Xen is experimental at this time. experimental at this time.
The builder builds a virtual machine by creating a new virtual machine The builder builds a virtual machine by creating a new virtual machine from
from scratch, booting it, installing an OS, rebooting the machine with the scratch, booting it, installing an OS, rebooting the machine with the boot media
boot media as the virtual hard drive, provisioning software within as the virtual hard drive, provisioning software within the OS, then shutting it
the OS, then shutting it down. The result of the Qemu builder is a directory down. The result of the Qemu builder is a directory containing the image file
containing the image file necessary to run the virtual machine on KVM or Xen. necessary to run the virtual machine on KVM or Xen.
## Basic Example ## Basic Example
Here is a basic example. This example is functional so long as you fixup Here is a basic example. This example is functional so long as you fixup paths
paths to files, URLS for ISOs and checksums. to files, URLS for ISOs and checksums.
```javascript ``` {.javascript}
{ {
"builders": "builders":
[ [
...@@ -62,153 +63,153 @@ paths to files, URLS for ISOs and checksums. ...@@ -62,153 +63,153 @@ paths to files, URLS for ISOs and checksums.
} }
``` ```
A working CentOS 6.x kickstart file can be found A working CentOS 6.x kickstart file can be found [at this
[at this URL](https://gist.github.com/mitchellh/7328271/#file-centos6-ks-cfg), adapted from an unknown source. URL](https://gist.github.com/mitchellh/7328271/#file-centos6-ks-cfg), adapted
Place this file in the http directory with the proper name. For the from an unknown source. Place this file in the http directory with the proper
example above, it should go into "httpdir" with a name of "centos6-ks.cfg". name. For the example above, it should go into "httpdir" with a name of
"centos6-ks.cfg".
## Configuration Reference ## Configuration Reference
There are many configuration options available for the Qemu builder. There are many configuration options available for the Qemu builder. They are
They are organized below into two categories: required and optional. Within organized below into two categories: required and optional. Within each
each category, the available options are alphabetized and described. category, the available options are alphabetized and described.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO
files are so large, this is required and Packer will verify it prior files are so large, this is required and Packer will verify it prior to
to booting a virtual machine with the ISO attached. The type of the booting a virtual machine with the ISO attached. The type of the checksum is
checksum is specified with `iso_checksum_type`, documented below. specified with `iso_checksum_type`, documented below.
* `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum_type` (string) - The type of the checksum specified in
`iso_checksum`. Valid values are "md5", "sha1", "sha256", or "sha512" currently. `iso_checksum`. Valid values are "md5", "sha1", "sha256", or
"sha512" currently.
* `iso_url` (string) - A URL to the ISO containing the installation image. - `iso_url` (string) - A URL to the ISO containing the installation image.
This URL can be either an HTTP URL or a file URL (or path to a file). This URL can be either an HTTP URL or a file URL (or path to a file). If
If this is an HTTP URL, Packer will download it and cache it between this is an HTTP URL, Packer will download it and cache it between runs.
runs.
* `ssh_username` (string) - The username to use to SSH into the machine - `ssh_username` (string) - The username to use to SSH into the machine once
once the OS is installed. the OS is installed.
### Optional: ### Optional:
* `accelerator` (string) - The accelerator type to use when running the VM. - `accelerator` (string) - The accelerator type to use when running the VM.
This may have a value of either "none", "kvm", "tcg", or "xen" and you must have that This may have a value of either "none", "kvm", "tcg", or "xen" and you must
support in on the machine on which you run the builder. By default "kvm" have that support in on the machine on which you run the builder. By default
is used. "kvm" is used.
* `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is first booted. The goal of these commands should when the virtual machine is first booted. The goal of these commands should
be to type just enough to initialize the operating system installer. Special be to type just enough to initialize the operating system installer. Special
keys can be typed as well, and are covered in the section below on the boot keys can be typed as well, and are covered in the section below on the
command. If this is not specified, it is assumed the installer will start boot command. If this is not specified, it is assumed the installer will
itself. start itself.
* `boot_wait` (string) - The time to wait after booting the initial virtual - `boot_wait` (string) - The time to wait after booting the initial virtual
machine before typing the `boot_command`. The value of this should be machine before typing the `boot_command`. The value of this should be
a duration. Examples are "5s" and "1m30s" which will cause Packer to wait a duration. Examples are "5s" and "1m30s" which will cause Packer to wait
five seconds and one minute 30 seconds, respectively. If this isn't specified, five seconds and one minute 30 seconds, respectively. If this isn't
the default is 10 seconds. specified, the default is 10 seconds.
* `disk_cache` (string) - The cache mode to use for disk. Allowed values - `disk_cache` (string) - The cache mode to use for disk. Allowed values
include any of "writethrough", "writeback", "none", "unsafe" or include any of "writethrough", "writeback", "none", "unsafe"
"directsync". By default, this is set to "writeback". or "directsync". By default, this is set to "writeback".
* `disk_discard` (string) - The discard mode to use for disk. Allowed values - `disk_discard` (string) - The discard mode to use for disk. Allowed values
include any of "unmap" or "ignore". By default, this is set to "ignore". include any of "unmap" or "ignore". By default, this is set to "ignore".
* `disk_image` (boolean) - Packer defaults to building from an ISO file, - `disk_image` (boolean) - Packer defaults to building from an ISO file, this
this parameter controls whether the ISO URL supplied is actually a bootable parameter controls whether the ISO URL supplied is actually a bootable
QEMU image. When this value is set to true, the machine will clone the QEMU image. When this value is set to true, the machine will clone the
source, resize it according to `disk_size` and boot the image. source, resize it according to `disk_size` and boot the image.
* `disk_interface` (string) - The interface to use for the disk. Allowed - `disk_interface` (string) - The interface to use for the disk. Allowed
values include any of "ide," "scsi" or "virtio." Note also that any boot values include any of "ide," "scsi" or "virtio." Note also that any boot
commands or kickstart type scripts must have proper adjustments for commands or kickstart type scripts must have proper adjustments for
resulting device names. The Qemu builder uses "virtio" by default. resulting device names. The Qemu builder uses "virtio" by default.
* `disk_size` (integer) - The size, in megabytes, of the hard disk to create - `disk_size` (integer) - The size, in megabytes, of the hard disk to create
for the VM. By default, this is 40000 (about 40 GB). for the VM. By default, this is 40000 (about 40 GB).
* `floppy_files` (array of strings) - A list of files to place onto a floppy - `floppy_files` (array of strings) - A list of files to place onto a floppy
disk that is attached when the VM is booted. This is most useful disk that is attached when the VM is booted. This is most useful for
for unattended Windows installs, which look for an `Autounattend.xml` file unattended Windows installs, which look for an `Autounattend.xml` file on
on removable media. By default, no floppy will be attached. All files removable media. By default, no floppy will be attached. All files listed in
listed in this setting get placed into the root directory of the floppy this setting get placed into the root directory of the floppy and the floppy
and the floppy is attached as the first floppy device. Currently, no is attached as the first floppy device. Currently, no support exists for
support exists for creating sub-directories on the floppy. Wildcard creating sub-directories on the floppy. Wildcard characters (\*, ?,
characters (*, ?, and []) are allowed. Directory names are also allowed, and \[\]) are allowed. Directory names are also allowed, which will add all
which will add all the files found in the directory to the floppy. the files found in the directory to the floppy.
* `format` (string) - Either "qcow2" or "raw", this specifies the output - `format` (string) - Either "qcow2" or "raw", this specifies the output
format of the virtual machine image. This defaults to "qcow2". format of the virtual machine image. This defaults to "qcow2".
* `headless` (boolean) - Packer defaults to building QEMU virtual machines by - `headless` (boolean) - Packer defaults to building QEMU virtual machines by
launching a GUI that shows the console of the machine being built. launching a GUI that shows the console of the machine being built. When this
When this value is set to true, the machine will start without a console. value is set to true, the machine will start without a console.
* `http_directory` (string) - Path to a directory to serve using an HTTP - `http_directory` (string) - Path to a directory to serve using an
server. The files in this directory will be available over HTTP that will HTTP server. The files in this directory will be available over HTTP that
be requestable from the virtual machine. This is useful for hosting will be requestable from the virtual machine. This is useful for hosting
kickstart files and so on. By default this is "", which means no HTTP kickstart files and so on. By default this is "", which means no HTTP server
server will be started. The address and port of the HTTP server will be will be started. The address and port of the HTTP server will be available
available as variables in `boot_command`. This is covered in more detail as variables in `boot_command`. This is covered in more detail below.
below.
- `http_port_min` and `http_port_max` (integer) - These are the minimum and
* `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the
maximum port to use for the HTTP server started to serve the `http_directory`. `http_directory`. Because Packer often runs in parallel, Packer will choose
Because Packer often runs in parallel, Packer will choose a randomly available a randomly available port in this range to run the HTTP server. If you want
port in this range to run the HTTP server. If you want to force the HTTP to force the HTTP server to be on one port, make this minimum and maximum
server to be on one port, make this minimum and maximum port the same. port the same. By default the values are 8000 and 9000, respectively.
By default the values are 8000 and 9000, respectively.
- `iso_urls` (array of strings) - Multiple URLs for the ISO to download.
* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer will try these in order. If anything goes wrong attempting to
Packer will try these in order. If anything goes wrong attempting to download download or while downloading a single URL, it will move on to the next. All
or while downloading a single URL, it will move on to the next. All URLs URLs must point to the same file (same checksum). By default this is empty
must point to the same file (same checksum). By default this is empty and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified.
and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified.
- `machine_type` (string) - The type of machine emulation to use. Run your
* `machine_type` (string) - The type of machine emulation to use. Run qemu binary with the flags `-machine help` to list available types for
your qemu binary with the flags `-machine help` to list available types your system. This defaults to "pc".
for your system. This defaults to "pc".
- `net_device` (string) - The driver to use for the network interface. Allowed
* `net_device` (string) - The driver to use for the network interface. Allowed values "ne2k\_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000,"
values "ne2k_pci," "i82551," "i82557b," "i82559er," "rtl8139," "e1000," "pcnet" or "virtio." The Qemu builder uses "virtio" by default.
"pcnet" or "virtio." The Qemu builder uses "virtio" by default.
- `output_directory` (string) - This is the path to the directory where the
* `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute.
resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer`
If relative, the path is relative to the working directory when `packer` is executed. This directory must not exist or be empty prior to running
is executed. This directory must not exist or be empty prior to running the builder. the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
By default this is "output-BUILDNAME" where "BUILDNAME" is the name name of the build.
of the build.
- `qemu_binary` (string) - The name of the Qemu binary to look for. This
* `qemu_binary` (string) - The name of the Qemu binary to look for. This defaults to "qemu-system-x86\_64", but may need to be changed for
defaults to "qemu-system-x86_64", but may need to be changed for some some platforms. For example "qemu-kvm", or "qemu-system-i386" may be a
platforms. For example "qemu-kvm", or "qemu-system-i386" may be a better better choice for some systems.
choice for some systems.
- `qemuargs` (array of array of strings) - Allows complete control over the
* `qemuargs` (array of array of strings) - Allows complete control over qemu command line (though not, at this time, qemu-img). Each array of
the qemu command line (though not, at this time, qemu-img). Each array strings makes up a command line switch that overrides matching default
of strings makes up a command line switch that overrides matching default switch/value pairs. Any value specified as an empty string is ignored. All
switch/value pairs. Any value specified as an empty string is ignored. values after the switch are concatenated with no separator.
All values after the switch are concatenated with no separator.
\~&gt; **Warning:** The qemu command line allows extreme flexibility, so beware
~> **Warning:** The qemu command line allows extreme flexibility, so beware of of conflicting arguments causing failures of your run. For instance, using
conflicting arguments causing failures of your run. For instance, using
--no-acpi could break the ability to send power signal type commands (e.g., --no-acpi could break the ability to send power signal type commands (e.g.,
shutdown -P now) to the virtual machine, thus preventing proper shutdown. To shutdown -P now) to the virtual machine, thus preventing proper shutdown. To see
see the defaults, look in the packer.log file and search for the the defaults, look in the packer.log file and search for the qemu-system-x86
qemu-system-x86 command. The arguments are all printed for review. command. The arguments are all printed for review.
The following shows a sample usage: The following shows a sample usage:
```javascript ``` {.javascript}
// ... // ...
"qemuargs": [ "qemuargs": [
[ "-m", "1024M" ], [ "-m", "1024M" ],
...@@ -224,91 +225,91 @@ qemu-system-x86 command. The arguments are all printed for review. ...@@ -224,91 +225,91 @@ qemu-system-x86 command. The arguments are all printed for review.
// ... // ...
``` ```
would produce the following (not including other defaults supplied by the builder and not otherwise conflicting with the qemuargs): would produce the following (not including other defaults supplied by the
builder and not otherwise conflicting with the qemuargs):
<pre class="prettyprint"> <pre class="prettyprint">
qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0" qemu-system-x86 -m 1024m --no-acpi -netdev user,id=mynet0,hostfwd=hostip:hostport-guestip:guestport -device virtio-net,netdev=mynet0"
</pre> </pre>
- `shutdown_command` (string) - The command to use to gracefully shut down the
* `shutdown_command` (string) - The command to use to gracefully shut down machine once all the provisioning is done. By default this is an empty
the machine once all the provisioning is done. By default this is an empty string, which tells Packer to just forcefully shut down the machine.
string, which tells Packer to just forcefully shut down the machine.
- `shutdown_timeout` (string) - The amount of time to wait after executing the
* `shutdown_timeout` (string) - The amount of time to wait after executing `shutdown_command` for the virtual machine to actually shut down. If it
the `shutdown_command` for the virtual machine to actually shut down. doesn't shut down in this time, it is an error. By default, the timeout is
If it doesn't shut down in this time, it is an error. By default, the timeout "5m", or five minutes.
is "5m", or five minutes.
- `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and
* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and maximum port to use for the SSH port on the host machine which is forwarded
maximum port to use for the SSH port on the host machine which is forwarded to the SSH port on the guest machine. Because Packer often runs in parallel,
to the SSH port on the guest machine. Because Packer often runs in parallel, Packer will choose a randomly available port in this range to use as the
Packer will choose a randomly available port in this range to use as the host port.
host port.
- `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for
* `vm_name` (string) - This is the name of the image (QCOW2 or IMG) file for the new virtual machine, without the file extension. By default this is
the new virtual machine, without the file extension. By default this is "packer-BUILDNAME", where "BUILDNAME" is the name of the build.
"packer-BUILDNAME", where "BUILDNAME" is the name of the build.
- `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port
* `vnc_port_min` and `vnc_port_max` (integer) - The minimum and to use for the VNC port on the host machine which is forwarded to the VNC
maximum port to use for the VNC port on the host machine which is forwarded port on the guest machine. Because Packer often runs in parallel, Packer
to the VNC port on the guest machine. Because Packer often runs in parallel, will choose a randomly available port in this range to use as the host port.
Packer will choose a randomly available port in this range to use as the
host port.
## Boot Command ## Boot Command
The `boot_command` configuration is very important: it specifies the keys The `boot_command` configuration is very important: it specifies the keys to
to type when the virtual machine is first booted in order to start the type when the virtual machine is first booted in order to start the OS
OS installer. This command is typed after `boot_wait`, which gives the installer. This command is typed after `boot_wait`, which gives the virtual
virtual machine some time to actually load the ISO. machine some time to actually load the ISO.
As documented above, the `boot_command` is an array of strings. The As documented above, the `boot_command` is an array of strings. The strings are
strings are all typed in sequence. It is an array only to improve readability all typed in sequence. It is an array only to improve readability within the
within the template. template.
The boot command is "typed" character for character over a VNC connection The boot command is "typed" character for character over a VNC connection to the
to the machine, simulating a human actually typing the keyboard. There are machine, simulating a human actually typing the keyboard. There are a set of
a set of special keys available. If these are in your boot command, they special keys available. If these are in your boot command, they will be replaced
will be replaced by the proper key: by the proper key:
* `<bs>` - Backspace - `<bs>` - Backspace
* `<del>` - Delete - `<del>` - Delete
* `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress. - `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress.
* `<esc>` - Simulates pressing the escape key. - `<esc>` - Simulates pressing the escape key.
* `<tab>` - Simulates pressing the tab key. - `<tab>` - Simulates pressing the tab key.
* `<f1>` - `<f12>` - Simulates pressing a function key. - `<f1>` - `<f12>` - Simulates pressing a function key.
* `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key. - `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key.
* `<spacebar>` - Simulates pressing the spacebar. - `<spacebar>` - Simulates pressing the spacebar.
* `<insert>` - Simulates pressing the insert key. - `<insert>` - Simulates pressing the insert key.
* `<home>` `<end>` - Simulates pressing the home and end keys. - `<home>` `<end>` - Simulates pressing the home and end keys.
* `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys. - `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys.
* `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before
is useful if you have to generally wait for the UI to update before typing more. sending any additional keys. This is useful if you have to generally wait
for the UI to update before typing more.
In addition to the special keys, each command to type is treated as a In addition to the special keys, each command to type is treated as a
[configuration template](/docs/templates/configuration-templates.html). [configuration template](/docs/templates/configuration-templates.html). The
The available variables are: available variables are:
* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server
that is started serving the directory specified by the `http_directory` that is started serving the directory specified by the `http_directory`
configuration parameter. If `http_directory` isn't specified, these will configuration parameter. If `http_directory` isn't specified, these will be
be blank! blank!
Example boot command. This is actually a working boot command used to start Example boot command. This is actually a working boot command used to start an
an CentOS 6.4 installer: CentOS 6.4 installer:
```javascript ``` {.javascript}
"boot_command": "boot_command":
[ [
"<tab><wait>", "<tab><wait>",
......
--- ---
layout: "docs" description: |
page_title: "VirtualBox Builder (from an ISO)" The VirtualBox Packer builder is able to create VirtualBox virtual machines and
description: |- export them in the OVF format, starting from an ISO image.
The VirtualBox Packer builder is able to create VirtualBox virtual machines and export them in the OVF format, starting from an ISO image. layout: docs
--- page_title: 'VirtualBox Builder (from an ISO)'
...
# VirtualBox Builder (from an ISO) # VirtualBox Builder (from an ISO)
Type: `virtualbox-iso` Type: `virtualbox-iso`
The VirtualBox Packer builder is able to create [VirtualBox](https://www.virtualbox.org/) The VirtualBox Packer builder is able to create
virtual machines and export them in the OVF format, starting from an [VirtualBox](https://www.virtualbox.org/) virtual machines and export them in
ISO image. the OVF format, starting from an ISO image.
The builder builds a virtual machine by creating a new virtual machine The builder builds a virtual machine by creating a new virtual machine from
from scratch, booting it, installing an OS, provisioning software within scratch, booting it, installing an OS, provisioning software within the OS, then
the OS, then shutting it down. The result of the VirtualBox builder is a directory shutting it down. The result of the VirtualBox builder is a directory containing
containing all the files necessary to run the virtual machine portably. all the files necessary to run the virtual machine portably.
## Basic Example ## Basic Example
Here is a basic example. This example is not functional. It will start the Here is a basic example. This example is not functional. It will start the OS
OS installer but then fail because we don't provide the preseed file for installer but then fail because we don't provide the preseed file for Ubuntu to
Ubuntu to self-install. Still, the example serves to show the basic configuration: self-install. Still, the example serves to show the basic configuration:
```javascript ``` {.javascript}
{ {
"type": "virtualbox-iso", "type": "virtualbox-iso",
"guest_os_type": "Ubuntu_64", "guest_os_type": "Ubuntu_64",
...@@ -37,250 +38,251 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio ...@@ -37,250 +38,251 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio
} }
``` ```
It is important to add a `shutdown_command`. By default Packer halts the It is important to add a `shutdown_command`. By default Packer halts the virtual
virtual machine and the file system may not be sync'd. Thus, changes made in a machine and the file system may not be sync'd. Thus, changes made in a
provisioner might not be saved. provisioner might not be saved.
## Configuration Reference ## Configuration Reference
There are many configuration options available for the VirtualBox builder. There are many configuration options available for the VirtualBox builder. They
They are organized below into two categories: required and optional. Within are organized below into two categories: required and optional. Within each
each category, the available options are alphabetized and described. category, the available options are alphabetized and described.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO
files are so large, this is required and Packer will verify it prior files are so large, this is required and Packer will verify it prior to
to booting a virtual machine with the ISO attached. The type of the booting a virtual machine with the ISO attached. The type of the checksum is
checksum is specified with `iso_checksum_type`, documented below. specified with `iso_checksum_type`, documented below.
* `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum_type` (string) - The type of the checksum specified in
`iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or
"sha512" currently. While "none" will skip checksumming, this is not "sha512" currently. While "none" will skip checksumming, this is not
recommended since ISO files are generally large and corruption does happen recommended since ISO files are generally large and corruption does happen
from time to time. from time to time.
* `iso_url` (string) - A URL to the ISO containing the installation image. - `iso_url` (string) - A URL to the ISO containing the installation image.
This URL can be either an HTTP URL or a file URL (or path to a file). This URL can be either an HTTP URL or a file URL (or path to a file). If
If this is an HTTP URL, Packer will download it and cache it between this is an HTTP URL, Packer will download it and cache it between runs.
runs.
* `ssh_username` (string) - The username to use to SSH into the machine - `ssh_username` (string) - The username to use to SSH into the machine once
once the OS is installed. the OS is installed.
### Optional: ### Optional:
* `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is first booted. The goal of these commands should when the virtual machine is first booted. The goal of these commands should
be to type just enough to initialize the operating system installer. Special be to type just enough to initialize the operating system installer. Special
keys can be typed as well, and are covered in the section below on the boot keys can be typed as well, and are covered in the section below on the
command. If this is not specified, it is assumed the installer will start boot command. If this is not specified, it is assumed the installer will
itself. start itself.
* `boot_wait` (string) - The time to wait after booting the initial virtual - `boot_wait` (string) - The time to wait after booting the initial virtual
machine before typing the `boot_command`. The value of this should be machine before typing the `boot_command`. The value of this should be
a duration. Examples are "5s" and "1m30s" which will cause Packer to wait a duration. Examples are "5s" and "1m30s" which will cause Packer to wait
five seconds and one minute 30 seconds, respectively. If this isn't specified, five seconds and one minute 30 seconds, respectively. If this isn't
the default is 10 seconds. specified, the default is 10 seconds.
* `disk_size` (integer) - The size, in megabytes, of the hard disk to create - `disk_size` (integer) - The size, in megabytes, of the hard disk to create
for the VM. By default, this is 40000 (about 40 GB). for the VM. By default, this is 40000 (about 40 GB).
* `export_opts` (array of strings) - Additional options to pass to the `VBoxManage export`. - `export_opts` (array of strings) - Additional options to pass to the
This can be useful for passing product information to include in the resulting `VBoxManage export`. This can be useful for passing product information to
appliance file. include in the resulting appliance file.
* `floppy_files` (array of strings) - A list of files to place onto a floppy - `floppy_files` (array of strings) - A list of files to place onto a floppy
disk that is attached when the VM is booted. This is most useful disk that is attached when the VM is booted. This is most useful for
for unattended Windows installs, which look for an `Autounattend.xml` file unattended Windows installs, which look for an `Autounattend.xml` file on
on removable media. By default, no floppy will be attached. All files removable media. By default, no floppy will be attached. All files listed in
listed in this setting get placed into the root directory of the floppy this setting get placed into the root directory of the floppy and the floppy
and the floppy is attached as the first floppy device. Currently, no is attached as the first floppy device. Currently, no support exists for
support exists for creating sub-directories on the floppy. Wildcard creating sub-directories on the floppy. Wildcard characters (\*, ?,
characters (*, ?, and []) are allowed. Directory names are also allowed, and \[\]) are allowed. Directory names are also allowed, which will add all
which will add all the files found in the directory to the floppy. the files found in the directory to the floppy.
* `format` (string) - Either "ovf" or "ova", this specifies the output - `format` (string) - Either "ovf" or "ova", this specifies the output format
format of the exported virtual machine. This defaults to "ovf". of the exported virtual machine. This defaults to "ovf".
* `guest_additions_mode` (string) - The method by which guest additions - `guest_additions_mode` (string) - The method by which guest additions are
are made available to the guest for installation. Valid options are made available to the guest for installation. Valid options are "upload",
"upload", "attach", or "disable". If the mode is "attach" the guest "attach", or "disable". If the mode is "attach" the guest additions ISO will
additions ISO will be attached as a CD device to the virtual machine. be attached as a CD device to the virtual machine. If the mode is "upload"
If the mode is "upload" the guest additions ISO will be uploaded to the guest additions ISO will be uploaded to the path specified by
the path specified by `guest_additions_path`. The default value is `guest_additions_path`. The default value is "upload". If "disable" is used,
"upload". If "disable" is used, guest additions won't be downloaded, guest additions won't be downloaded, either.
either.
- `guest_additions_path` (string) - The path on the guest virtual machine
* `guest_additions_path` (string) - The path on the guest virtual machine where the VirtualBox guest additions ISO will be uploaded. By default this
where the VirtualBox guest additions ISO will be uploaded. By default this is "VBoxGuestAdditions.iso" which should upload into the login directory of
is "VBoxGuestAdditions.iso" which should upload into the login directory the user. This is a [configuration
of the user. This is a [configuration template](/docs/templates/configuration-templates.html) template](/docs/templates/configuration-templates.html) where the `Version`
where the `Version` variable is replaced with the VirtualBox version. variable is replaced with the VirtualBox version.
* `guest_additions_sha256` (string) - The SHA256 checksum of the guest - `guest_additions_sha256` (string) - The SHA256 checksum of the guest
additions ISO that will be uploaded to the guest VM. By default the additions ISO that will be uploaded to the guest VM. By default the
checksums will be downloaded from the VirtualBox website, so this only checksums will be downloaded from the VirtualBox website, so this only needs
needs to be set if you want to be explicit about the checksum. to be set if you want to be explicit about the checksum.
* `guest_additions_url` (string) - The URL to the guest additions ISO - `guest_additions_url` (string) - The URL to the guest additions ISO
to upload. This can also be a file URL if the ISO is at a local path. to upload. This can also be a file URL if the ISO is at a local path. By
By default, the VirtualBox builder will attempt to find the guest additions default, the VirtualBox builder will attempt to find the guest additions ISO
ISO on the local file system. If it is not available locally, the builder on the local file system. If it is not available locally, the builder will
will download the proper guest additions ISO from the internet. download the proper guest additions ISO from the internet.
* `guest_os_type` (string) - The guest OS type being installed. By default - `guest_os_type` (string) - The guest OS type being installed. By default
this is "other", but you can get _dramatic_ performance improvements by this is "other", but you can get *dramatic* performance improvements by
setting this to the proper value. To view all available values for this setting this to the proper value. To view all available values for this run
run `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox `VBoxManage list ostypes`. Setting the correct value hints to VirtualBox how
how to optimize the virtual hardware to work best with that operating to optimize the virtual hardware to work best with that operating system.
system.
- `hard_drive_interface` (string) - The type of controller that the primary
* `hard_drive_interface` (string) - The type of controller that the primary hard drive is attached to, defaults to "ide". When set to "sata", the drive
hard drive is attached to, defaults to "ide". When set to "sata", the is attached to an AHCI SATA controller. When set to "scsi", the drive is
drive is attached to an AHCI SATA controller. When set to "scsi", the drive attached to an LsiLogic SCSI controller.
is attached to an LsiLogic SCSI controller.
- `headless` (boolean) - Packer defaults to building VirtualBox virtual
* `headless` (boolean) - Packer defaults to building VirtualBox machines by launching a GUI that shows the console of the machine
virtual machines by launching a GUI that shows the console of the being built. When this value is set to true, the machine will start without
machine being built. When this value is set to true, the machine will a console.
start without a console.
- `http_directory` (string) - Path to a directory to serve using an
* `http_directory` (string) - Path to a directory to serve using an HTTP HTTP server. The files in this directory will be available over HTTP that
server. The files in this directory will be available over HTTP that will will be requestable from the virtual machine. This is useful for hosting
be requestable from the virtual machine. This is useful for hosting kickstart files and so on. By default this is "", which means no HTTP server
kickstart files and so on. By default this is "", which means no HTTP will be started. The address and port of the HTTP server will be available
server will be started. The address and port of the HTTP server will be as variables in `boot_command`. This is covered in more detail below.
available as variables in `boot_command`. This is covered in more detail
below. - `http_port_min` and `http_port_max` (integer) - These are the minimum and
maximum port to use for the HTTP server started to serve the
* `http_port_min` and `http_port_max` (integer) - These are the minimum and `http_directory`. Because Packer often runs in parallel, Packer will choose
maximum port to use for the HTTP server started to serve the `http_directory`. a randomly available port in this range to run the HTTP server. If you want
Because Packer often runs in parallel, Packer will choose a randomly available to force the HTTP server to be on one port, make this minimum and maximum
port in this range to run the HTTP server. If you want to force the HTTP port the same. By default the values are 8000 and 9000, respectively.
server to be on one port, make this minimum and maximum port the same.
By default the values are 8000 and 9000, respectively. - `iso_interface` (string) - The type of controller that the ISO is attached
to, defaults to "ide". When set to "sata", the drive is attached to an AHCI
* `iso_interface` (string) - The type of controller that the ISO is attached SATA controller.
to, defaults to "ide". When set to "sata", the drive is attached to an
AHCI SATA controller. - `iso_urls` (array of strings) - Multiple URLs for the ISO to download.
Packer will try these in order. If anything goes wrong attempting to
* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. download or while downloading a single URL, it will move on to the next. All
Packer will try these in order. If anything goes wrong attempting to download URLs must point to the same file (same checksum). By default this is empty
or while downloading a single URL, it will move on to the next. All URLs and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified.
must point to the same file (same checksum). By default this is empty
and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified. - `output_directory` (string) - This is the path to the directory where the
resulting virtual machine will be created. This may be relative or absolute.
* `output_directory` (string) - This is the path to the directory where the If relative, the path is relative to the working directory when `packer`
resulting virtual machine will be created. This may be relative or absolute. is executed. This directory must not exist or be empty prior to running
If relative, the path is relative to the working directory when `packer` the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
is executed. This directory must not exist or be empty prior to running the builder. name of the build.
By default this is "output-BUILDNAME" where "BUILDNAME" is the name
of the build. - `shutdown_command` (string) - The command to use to gracefully shut down the
machine once all the provisioning is done. By default this is an empty
* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all string, which tells Packer to just forcefully shut down the machine unless a
the provisioning is done. By default this is an empty string, which tells Packer to just shutdown command takes place inside script so this may safely be omitted. If
forcefully shut down the machine unless a shutdown command takes place inside script so this may one or more scripts require a reboot it is suggested to leave this blank
safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank since reboots may fail and specify the final shutdown command in your
since reboots may fail and specify the final shutdown command in your last script. last script.
* `shutdown_timeout` (string) - The amount of time to wait after executing - `shutdown_timeout` (string) - The amount of time to wait after executing the
the `shutdown_command` for the virtual machine to actually shut down. `shutdown_command` for the virtual machine to actually shut down. If it
If it doesn't shut down in this time, it is an error. By default, the timeout doesn't shut down in this time, it is an error. By default, the timeout is
is "5m", or five minutes. "5m", or five minutes.
* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and - `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and
maximum port to use for the SSH port on the host machine which is forwarded maximum port to use for the SSH port on the host machine which is forwarded
to the SSH port on the guest machine. Because Packer often runs in parallel, to the SSH port on the guest machine. Because Packer often runs in parallel,
Packer will choose a randomly available port in this range to use as the Packer will choose a randomly available port in this range to use as the
host port. host port.
* `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer does - `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer
not setup forwarded port mapping for SSH requests and uses `ssh_port` on the does not setup forwarded port mapping for SSH requests and uses `ssh_port`
host to communicate to the virtual machine on the host to communicate to the virtual machine
* `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to - `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to
execute in order to further customize the virtual machine being created. execute in order to further customize the virtual machine being created. The
The value of this is an array of commands to execute. The commands are executed value of this is an array of commands to execute. The commands are executed
in the order defined in the template. For each command, the command is in the order defined in the template. For each command, the command is
defined itself as an array of strings, where each string represents a single defined itself as an array of strings, where each string represents a single
argument on the command-line to `VBoxManage` (but excluding `VBoxManage` argument on the command-line to `VBoxManage` (but excluding
itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), `VBoxManage` itself). Each arg is treated as a [configuration
where the `Name` variable is replaced with the VM name. More details on how template](/docs/templates/configuration-templates.html), where the `Name`
to use `VBoxManage` are below. variable is replaced with the VM name. More details on how to use
`VBoxManage` are below.
* `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`,
except that it is run after the virtual machine is shutdown, and before the - `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`,
virtual machine is exported. except that it is run after the virtual machine is shutdown, and before the
virtual machine is exported.
* `virtualbox_version_file` (string) - The path within the virtual machine
to upload a file that contains the VirtualBox version that was used to - `virtualbox_version_file` (string) - The path within the virtual machine to
create the machine. This information can be useful for provisioning. upload a file that contains the VirtualBox version that was used to create
By default this is ".vbox_version", which will generally be upload it into the machine. This information can be useful for provisioning. By default
the home directory. this is ".vbox\_version", which will generally be upload it into the
home directory.
* `vm_name` (string) - This is the name of the OVF file for the new virtual
machine, without the file extension. By default this is "packer-BUILDNAME", - `vm_name` (string) - This is the name of the OVF file for the new virtual
where "BUILDNAME" is the name of the build. machine, without the file extension. By default this is "packer-BUILDNAME",
where "BUILDNAME" is the name of the build.
## Boot Command ## Boot Command
The `boot_command` configuration is very important: it specifies the keys The `boot_command` configuration is very important: it specifies the keys to
to type when the virtual machine is first booted in order to start the type when the virtual machine is first booted in order to start the OS
OS installer. This command is typed after `boot_wait`, which gives the installer. This command is typed after `boot_wait`, which gives the virtual
virtual machine some time to actually load the ISO. machine some time to actually load the ISO.
As documented above, the `boot_command` is an array of strings. The As documented above, the `boot_command` is an array of strings. The strings are
strings are all typed in sequence. It is an array only to improve readability all typed in sequence. It is an array only to improve readability within the
within the template. template.
The boot command is "typed" character for character over a VNC connection The boot command is "typed" character for character over a VNC connection to the
to the machine, simulating a human actually typing the keyboard. There are machine, simulating a human actually typing the keyboard. There are a set of
a set of special keys available. If these are in your boot command, they special keys available. If these are in your boot command, they will be replaced
will be replaced by the proper key: by the proper key:
* `<bs>` - Backspace - `<bs>` - Backspace
* `<del>` - Delete - `<del>` - Delete
* `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress. - `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress.
* `<esc>` - Simulates pressing the escape key. - `<esc>` - Simulates pressing the escape key.
* `<tab>` - Simulates pressing the tab key. - `<tab>` - Simulates pressing the tab key.
* `<f1>` - `<f12>` - Simulates pressing a function key. - `<f1>` - `<f12>` - Simulates pressing a function key.
* `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key. - `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key.
* `<spacebar>` - Simulates pressing the spacebar. - `<spacebar>` - Simulates pressing the spacebar.
* `<insert>` - Simulates pressing the insert key. - `<insert>` - Simulates pressing the insert key.
* `<home>` `<end>` - Simulates pressing the home and end keys. - `<home>` `<end>` - Simulates pressing the home and end keys.
* `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys. - `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys.
* `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before
is useful if you have to generally wait for the UI to update before typing more. sending any additional keys. This is useful if you have to generally wait
for the UI to update before typing more.
In addition to the special keys, each command to type is treated as a In addition to the special keys, each command to type is treated as a
[configuration template](/docs/templates/configuration-templates.html). [configuration template](/docs/templates/configuration-templates.html). The
The available variables are: available variables are:
* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server
that is started serving the directory specified by the `http_directory` that is started serving the directory specified by the `http_directory`
configuration parameter. If `http_directory` isn't specified, these will configuration parameter. If `http_directory` isn't specified, these will be
be blank! blank!
Example boot command. This is actually a working boot command used to start Example boot command. This is actually a working boot command used to start an
an Ubuntu 12.04 installer: Ubuntu 12.04 installer:
```text ``` {.text}
[ [
"<esc><esc><enter><wait>", "<esc><esc><enter><wait>",
"/install/vmlinuz noapic ", "/install/vmlinuz noapic ",
...@@ -296,31 +298,32 @@ an Ubuntu 12.04 installer: ...@@ -296,31 +298,32 @@ an Ubuntu 12.04 installer:
## Guest Additions ## Guest Additions
Packer will automatically download the proper guest additions for the Packer will automatically download the proper guest additions for the version of
version of VirtualBox that is running and upload those guest additions into VirtualBox that is running and upload those guest additions into the virtual
the virtual machine so that provisioners can easily install them. machine so that provisioners can easily install them.
Packer downloads the guest additions from the official VirtualBox website, Packer downloads the guest additions from the official VirtualBox website, and
and verifies the file with the official checksums released by VirtualBox. verifies the file with the official checksums released by VirtualBox.
After the virtual machine is up and the operating system is installed, After the virtual machine is up and the operating system is installed, Packer
Packer uploads the guest additions into the virtual machine. The path where uploads the guest additions into the virtual machine. The path where they are
they are uploaded is controllable by `guest_additions_path`, and defaults uploaded is controllable by `guest_additions_path`, and defaults to
to "VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the "VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the home
home directory of the SSH user. directory of the SSH user.
## VBoxManage Commands ## VBoxManage Commands
In order to perform extra customization of the virtual machine, a template In order to perform extra customization of the virtual machine, a template can
can define extra calls to `VBoxManage` to perform. [VBoxManage](http://www.virtualbox.org/manual/ch08.html) define extra calls to `VBoxManage` to perform.
is the command-line interface to VirtualBox where you can completely control [VBoxManage](http://www.virtualbox.org/manual/ch08.html) is the command-line
VirtualBox. It can be used to do things such as set RAM, CPUs, etc. interface to VirtualBox where you can completely control VirtualBox. It can be
used to do things such as set RAM, CPUs, etc.
Extra VBoxManage commands are defined in the template in the `vboxmanage` section. Extra VBoxManage commands are defined in the template in the `vboxmanage`
An example is shown below that sets the memory and number of CPUs within the section. An example is shown below that sets the memory and number of CPUs
virtual machine: within the virtual machine:
```javascript ``` {.javascript}
{ {
"vboxmanage": [ "vboxmanage": [
["modifyvm", "{{.Name}}", "--memory", "1024"], ["modifyvm", "{{.Name}}", "--memory", "1024"],
...@@ -329,12 +332,12 @@ virtual machine: ...@@ -329,12 +332,12 @@ virtual machine:
} }
``` ```
The value of `vboxmanage` is an array of commands to execute. These commands The value of `vboxmanage` is an array of commands to execute. These commands are
are executed in the order defined. So in the above example, the memory will be executed in the order defined. So in the above example, the memory will be set
set followed by the CPUs. followed by the CPUs.
Each command itself is an array of strings, where each string is an argument Each command itself is an array of strings, where each string is an argument to
to `VBoxManage`. Each argument is treated as a `VBoxManage`. Each argument is treated as a [configuration
[configuration template](/docs/templates/configuration-templates.html). template](/docs/templates/configuration-templates.html). The only available
The only available variable is `Name` which is replaced with the unique variable is `Name` which is replaced with the unique name of the VM, which is
name of the VM, which is required for many VBoxManage calls. required for many VBoxManage calls.
--- ---
layout: "docs" description: |
page_title: "VirtualBox Builder (from an OVF/OVA)" This VirtualBox Packer builder is able to create VirtualBox virtual machines and
description: |- export them in the OVF format, starting from an existing OVF/OVA (exported
This VirtualBox Packer builder is able to create VirtualBox virtual machines and export them in the OVF format, starting from an existing OVF/OVA (exported virtual machine image). virtual machine image).
--- layout: docs
page_title: 'VirtualBox Builder (from an OVF/OVA)'
...
# VirtualBox Builder (from an OVF/OVA) # VirtualBox Builder (from an OVF/OVA)
Type: `virtualbox-ovf` Type: `virtualbox-ovf`
This VirtualBox Packer builder is able to create [VirtualBox](https://www.virtualbox.org/) This VirtualBox Packer builder is able to create
virtual machines and export them in the OVF format, starting from an [VirtualBox](https://www.virtualbox.org/) virtual machines and export them in
existing OVF/OVA (exported virtual machine image). the OVF format, starting from an existing OVF/OVA (exported virtual machine
image).
When exporting from VirtualBox make sure to choose OVF Version 2, since Version 1 is not compatible and will generate errors like this: When exporting from VirtualBox make sure to choose OVF Version 2, since Version
1 is not compatible and will generate errors like this:
``` ==&gt; virtualbox-ovf: Progress state: VBOX\_E\_FILE\_ERROR ==&gt;
==> virtualbox-ovf: Progress state: VBOX_E_FILE_ERROR virtualbox-ovf: VBoxManage: error: Appliance read failed ==&gt; virtualbox-ovf:
==> virtualbox-ovf: VBoxManage: error: Appliance read failed VBoxManage: error: Error reading "source.ova": element "Section" has no "type"
==> virtualbox-ovf: VBoxManage: error: Error reading "source.ova": element "Section" has no "type" attribute, line 21 attribute, line 21 ==&gt; virtualbox-ovf: VBoxManage: error: Details: code
==> virtualbox-ovf: VBoxManage: error: Details: code VBOX_E_FILE_ERROR (0x80bb0004), component Appliance, interface IAppliance VBOX\_E\_FILE\_ERROR (0x80bb0004), component Appliance, interface IAppliance
==> virtualbox-ovf: VBoxManage: error: Context: "int handleImportAppliance(HandlerArg*)" at line 304 of file VBoxManageAppliance.cpp ==&gt; virtualbox-ovf: VBoxManage: error: Context: "int
``` handleImportAppliance(HandlerArg\*)" at line 304 of file VBoxManageAppliance.cpp
The builder builds a virtual machine by importing an existing OVF or OVA The builder builds a virtual machine by importing an existing OVF or OVA file.
file. It then boots this image, runs provisioners on this new VM, and It then boots this image, runs provisioners on this new VM, and exports that VM
exports that VM to create the image. The imported machine is deleted prior to create the image. The imported machine is deleted prior to finishing the
to finishing the build. build.
## Basic Example ## Basic Example
Here is a basic example. This example is functional if you have an OVF matching Here is a basic example. This example is functional if you have an OVF matching
the settings here. the settings here.
```javascript ``` {.javascript}
{ {
"type": "virtualbox-ovf", "type": "virtualbox-ovf",
"source_path": "source.ovf", "source_path": "source.ovf",
...@@ -43,193 +47,196 @@ the settings here. ...@@ -43,193 +47,196 @@ the settings here.
} }
``` ```
It is important to add a `shutdown_command`. By default Packer halts the It is important to add a `shutdown_command`. By default Packer halts the virtual
virtual machine and the file system may not be sync'd. Thus, changes made in a machine and the file system may not be sync'd. Thus, changes made in a
provisioner might not be saved. provisioner might not be saved.
## Configuration Reference ## Configuration Reference
There are many configuration options available for the VirtualBox builder. There are many configuration options available for the VirtualBox builder. They
They are organized below into two categories: required and optional. Within are organized below into two categories: required and optional. Within each
each category, the available options are alphabetized and described. category, the available options are alphabetized and described.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `source_path` (string) - The path to an OVF or OVA file that acts as - `source_path` (string) - The path to an OVF or OVA file that acts as the
the source of this build. source of this build.
* `ssh_username` (string) - The username to use to SSH into the machine - `ssh_username` (string) - The username to use to SSH into the machine once
once the OS is installed. the OS is installed.
### Optional: ### Optional:
* `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is first booted. The goal of these commands should when the virtual machine is first booted. The goal of these commands should
be to type just enough to initialize the operating system installer. Special be to type just enough to initialize the operating system installer. Special
keys can be typed as well, and are covered in the section below on the boot keys can be typed as well, and are covered in the section below on the
command. If this is not specified, it is assumed the installer will start boot command. If this is not specified, it is assumed the installer will
itself. start itself.
* `boot_wait` (string) - The time to wait after booting the initial virtual - `boot_wait` (string) - The time to wait after booting the initial virtual
machine before typing the `boot_command`. The value of this should be machine before typing the `boot_command`. The value of this should be
a duration. Examples are "5s" and "1m30s" which will cause Packer to wait a duration. Examples are "5s" and "1m30s" which will cause Packer to wait
five seconds and one minute 30 seconds, respectively. If this isn't specified, five seconds and one minute 30 seconds, respectively. If this isn't
the default is 10 seconds. specified, the default is 10 seconds.
* `export_opts` (array of strings) - Additional options to pass to the `VBoxManage export`. - `export_opts` (array of strings) - Additional options to pass to the
This can be useful for passing product information to include in the resulting `VBoxManage export`. This can be useful for passing product information to
appliance file. include in the resulting appliance file.
* `floppy_files` (array of strings) - A list of files to place onto a floppy - `floppy_files` (array of strings) - A list of files to place onto a floppy
disk that is attached when the VM is booted. This is most useful disk that is attached when the VM is booted. This is most useful for
for unattended Windows installs, which look for an `Autounattend.xml` file unattended Windows installs, which look for an `Autounattend.xml` file on
on removable media. By default, no floppy will be attached. All files removable media. By default, no floppy will be attached. All files listed in
listed in this setting get placed into the root directory of the floppy this setting get placed into the root directory of the floppy and the floppy
and the floppy is attached as the first floppy device. Currently, no is attached as the first floppy device. Currently, no support exists for
support exists for creating sub-directories on the floppy. Wildcard creating sub-directories on the floppy. Wildcard characters (\*, ?,
characters (*, ?, and []) are allowed. Directory names are also allowed, and \[\]) are allowed. Directory names are also allowed, which will add all
which will add all the files found in the directory to the floppy. the files found in the directory to the floppy.
* `format` (string) - Either "ovf" or "ova", this specifies the output - `format` (string) - Either "ovf" or "ova", this specifies the output format
format of the exported virtual machine. This defaults to "ovf". of the exported virtual machine. This defaults to "ovf".
* `guest_additions_mode` (string) - The method by which guest additions - `guest_additions_mode` (string) - The method by which guest additions are
are made available to the guest for installation. Valid options are made available to the guest for installation. Valid options are "upload",
"upload", "attach", or "disable". If the mode is "attach" the guest "attach", or "disable". If the mode is "attach" the guest additions ISO will
additions ISO will be attached as a CD device to the virtual machine. be attached as a CD device to the virtual machine. If the mode is "upload"
If the mode is "upload" the guest additions ISO will be uploaded to the guest additions ISO will be uploaded to the path specified by
the path specified by `guest_additions_path`. The default value is `guest_additions_path`. The default value is "upload". If "disable" is used,
"upload". If "disable" is used, guest additions won't be downloaded, guest additions won't be downloaded, either.
either.
- `guest_additions_path` (string) - The path on the guest virtual machine
* `guest_additions_path` (string) - The path on the guest virtual machine where the VirtualBox guest additions ISO will be uploaded. By default this
where the VirtualBox guest additions ISO will be uploaded. By default this is "VBoxGuestAdditions.iso" which should upload into the login directory of
is "VBoxGuestAdditions.iso" which should upload into the login directory the user. This is a [configuration
of the user. This is a [configuration template](/docs/templates/configuration-templates.html) template](/docs/templates/configuration-templates.html) where the `Version`
where the `Version` variable is replaced with the VirtualBox version. variable is replaced with the VirtualBox version.
* `guest_additions_sha256` (string) - The SHA256 checksum of the guest - `guest_additions_sha256` (string) - The SHA256 checksum of the guest
additions ISO that will be uploaded to the guest VM. By default the additions ISO that will be uploaded to the guest VM. By default the
checksums will be downloaded from the VirtualBox website, so this only checksums will be downloaded from the VirtualBox website, so this only needs
needs to be set if you want to be explicit about the checksum. to be set if you want to be explicit about the checksum.
* `guest_additions_url` (string) - The URL to the guest additions ISO - `guest_additions_url` (string) - The URL to the guest additions ISO
to upload. This can also be a file URL if the ISO is at a local path. to upload. This can also be a file URL if the ISO is at a local path. By
By default the VirtualBox builder will go and download the proper default the VirtualBox builder will go and download the proper guest
guest additions ISO from the internet. additions ISO from the internet.
* `headless` (boolean) - Packer defaults to building VirtualBox - `headless` (boolean) - Packer defaults to building VirtualBox virtual
virtual machines by launching a GUI that shows the console of the machines by launching a GUI that shows the console of the machine
machine being built. When this value is set to true, the machine will being built. When this value is set to true, the machine will start without
start without a console. a console.
* `http_directory` (string) - Path to a directory to serve using an HTTP - `http_directory` (string) - Path to a directory to serve using an
server. The files in this directory will be available over HTTP that will HTTP server. The files in this directory will be available over HTTP that
be requestable from the virtual machine. This is useful for hosting will be requestable from the virtual machine. This is useful for hosting
kickstart files and so on. By default this is "", which means no HTTP kickstart files and so on. By default this is "", which means no HTTP server
server will be started. The address and port of the HTTP server will be will be started. The address and port of the HTTP server will be available
available as variables in `boot_command`. This is covered in more detail as variables in `boot_command`. This is covered in more detail below.
below.
- `http_port_min` and `http_port_max` (integer) - These are the minimum and
* `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the
maximum port to use for the HTTP server started to serve the `http_directory`. `http_directory`. Because Packer often runs in parallel, Packer will choose
Because Packer often runs in parallel, Packer will choose a randomly available a randomly available port in this range to run the HTTP server. If you want
port in this range to run the HTTP server. If you want to force the HTTP to force the HTTP server to be on one port, make this minimum and maximum
server to be on one port, make this minimum and maximum port the same. port the same. By default the values are 8000 and 9000, respectively.
By default the values are 8000 and 9000, respectively.
- `import_flags` (array of strings) - Additional flags to pass to
* `import_flags` (array of strings) - Additional flags to pass to
`VBoxManage import`. This can be used to add additional command-line flags `VBoxManage import`. This can be used to add additional command-line flags
such as `--eula-accept` to accept a EULA in the OVF. such as `--eula-accept` to accept a EULA in the OVF.
* `import_opts` (string) - Additional options to pass to the `VBoxManage import`. - `import_opts` (string) - Additional options to pass to the
This can be useful for passing "keepallmacs" or "keepnatmacs" options for existing `VBoxManage import`. This can be useful for passing "keepallmacs" or
ovf images. "keepnatmacs" options for existing ovf images.
* `output_directory` (string) - This is the path to the directory where the - `output_directory` (string) - This is the path to the directory where the
resulting virtual machine will be created. This may be relative or absolute. resulting virtual machine will be created. This may be relative or absolute.
If relative, the path is relative to the working directory when `packer` If relative, the path is relative to the working directory when `packer`
is executed. This directory must not exist or be empty prior to running the builder. is executed. This directory must not exist or be empty prior to running
By default this is "output-BUILDNAME" where "BUILDNAME" is the name the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
of the build. name of the build.
* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all - `shutdown_command` (string) - The command to use to gracefully shut down the
the provisioning is done. By default this is an empty string, which tells Packer to just machine once all the provisioning is done. By default this is an empty
forcefully shut down the machine unless a shutdown command takes place inside script so this may string, which tells Packer to just forcefully shut down the machine unless a
safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank shutdown command takes place inside script so this may safely be omitted. If
since reboots may fail and specify the final shutdown command in your last script. one or more scripts require a reboot it is suggested to leave this blank
since reboots may fail and specify the final shutdown command in your
* `shutdown_timeout` (string) - The amount of time to wait after executing last script.
the `shutdown_command` for the virtual machine to actually shut down.
If it doesn't shut down in this time, it is an error. By default, the timeout - `shutdown_timeout` (string) - The amount of time to wait after executing the
is "5m", or five minutes. `shutdown_command` for the virtual machine to actually shut down. If it
doesn't shut down in this time, it is an error. By default, the timeout is
* `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and "5m", or five minutes.
maximum port to use for the SSH port on the host machine which is forwarded
to the SSH port on the guest machine. Because Packer often runs in parallel, - `ssh_host_port_min` and `ssh_host_port_max` (integer) - The minimum and
Packer will choose a randomly available port in this range to use as the maximum port to use for the SSH port on the host machine which is forwarded
host port. to the SSH port on the guest machine. Because Packer often runs in parallel,
Packer will choose a randomly available port in this range to use as the
* `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer does host port.
not setup forwarded port mapping for SSH requests and uses `ssh_port` on the
host to communicate to the virtual machine - `ssh_skip_nat_mapping` (boolean) - Defaults to false. When enabled, Packer
does not setup forwarded port mapping for SSH requests and uses `ssh_port`
* `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to on the host to communicate to the virtual machine
execute in order to further customize the virtual machine being created.
The value of this is an array of commands to execute. The commands are executed - `vboxmanage` (array of array of strings) - Custom `VBoxManage` commands to
in the order defined in the template. For each command, the command is execute in order to further customize the virtual machine being created. The
defined itself as an array of strings, where each string represents a single value of this is an array of commands to execute. The commands are executed
argument on the command-line to `VBoxManage` (but excluding `VBoxManage` in the order defined in the template. For each command, the command is
itself). Each arg is treated as a [configuration template](/docs/templates/configuration-templates.html), defined itself as an array of strings, where each string represents a single
where the `Name` variable is replaced with the VM name. More details on how argument on the command-line to `VBoxManage` (but excluding
to use `VBoxManage` are below. `VBoxManage` itself). Each arg is treated as a [configuration
template](/docs/templates/configuration-templates.html), where the `Name`
* `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`, variable is replaced with the VM name. More details on how to use
except that it is run after the virtual machine is shutdown, and before the `VBoxManage` are below.
virtual machine is exported.
- `vboxmanage_post` (array of array of strings) - Identical to `vboxmanage`,
* `virtualbox_version_file` (string) - The path within the virtual machine except that it is run after the virtual machine is shutdown, and before the
to upload a file that contains the VirtualBox version that was used to virtual machine is exported.
create the machine. This information can be useful for provisioning.
By default this is ".vbox_version", which will generally be upload it into - `virtualbox_version_file` (string) - The path within the virtual machine to
the home directory. upload a file that contains the VirtualBox version that was used to create
the machine. This information can be useful for provisioning. By default
* `vm_name` (string) - This is the name of the virtual machine when it is this is ".vbox\_version", which will generally be upload it into the
imported as well as the name of the OVF file when the virtual machine is home directory.
exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is
the name of the build. - `vm_name` (string) - This is the name of the virtual machine when it is
imported as well as the name of the OVF file when the virtual machine
is exported. By default this is "packer-BUILDNAME", where "BUILDNAME" is the
name of the build.
## Guest Additions ## Guest Additions
Packer will automatically download the proper guest additions for the Packer will automatically download the proper guest additions for the version of
version of VirtualBox that is running and upload those guest additions into VirtualBox that is running and upload those guest additions into the virtual
the virtual machine so that provisioners can easily install them. machine so that provisioners can easily install them.
Packer downloads the guest additions from the official VirtualBox website, Packer downloads the guest additions from the official VirtualBox website, and
and verifies the file with the official checksums released by VirtualBox. verifies the file with the official checksums released by VirtualBox.
After the virtual machine is up and the operating system is installed, After the virtual machine is up and the operating system is installed, Packer
Packer uploads the guest additions into the virtual machine. The path where uploads the guest additions into the virtual machine. The path where they are
they are uploaded is controllable by `guest_additions_path`, and defaults uploaded is controllable by `guest_additions_path`, and defaults to
to "VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the "VBoxGuestAdditions.iso". Without an absolute path, it is uploaded to the home
home directory of the SSH user. directory of the SSH user.
## VBoxManage Commands ## VBoxManage Commands
In order to perform extra customization of the virtual machine, a template In order to perform extra customization of the virtual machine, a template can
can define extra calls to `VBoxManage` to perform. [VBoxManage](http://www.virtualbox.org/manual/ch08.html) define extra calls to `VBoxManage` to perform.
is the command-line interface to VirtualBox where you can completely control [VBoxManage](http://www.virtualbox.org/manual/ch08.html) is the command-line
VirtualBox. It can be used to do things such as set RAM, CPUs, etc. interface to VirtualBox where you can completely control VirtualBox. It can be
used to do things such as set RAM, CPUs, etc.
Extra VBoxManage commands are defined in the template in the `vboxmanage` section. Extra VBoxManage commands are defined in the template in the `vboxmanage`
An example is shown below that sets the memory and number of CPUs within the section. An example is shown below that sets the memory and number of CPUs
virtual machine: within the virtual machine:
```javascript ``` {.javascript}
{ {
"vboxmanage": [ "vboxmanage": [
["modifyvm", "{{.Name}}", "--memory", "1024"], ["modifyvm", "{{.Name}}", "--memory", "1024"],
...@@ -238,12 +245,12 @@ virtual machine: ...@@ -238,12 +245,12 @@ virtual machine:
} }
``` ```
The value of `vboxmanage` is an array of commands to execute. These commands The value of `vboxmanage` is an array of commands to execute. These commands are
are executed in the order defined. So in the above example, the memory will be executed in the order defined. So in the above example, the memory will be set
set followed by the CPUs. followed by the CPUs.
Each command itself is an array of strings, where each string is an argument Each command itself is an array of strings, where each string is an argument to
to `VBoxManage`. Each argument is treated as a `VBoxManage`. Each argument is treated as a [configuration
[configuration template](/docs/templates/configuration-templates.html). template](/docs/templates/configuration-templates.html). The only available
The only available variable is `Name` which is replaced with the unique variable is `Name` which is replaced with the unique name of the VM, which is
name of the VM, which is required for many VBoxManage calls. required for many VBoxManage calls.
--- ---
layout: "docs" description: |
page_title: "VirtualBox Builder" The VirtualBox Packer builder is able to create VirtualBox virtual machines and
description: |- export them in the OVA or OVF format.
The VirtualBox Packer builder is able to create VirtualBox virtual machines and export them in the OVA or OVF format. layout: docs
--- page_title: VirtualBox Builder
...
# VirtualBox Builder # VirtualBox Builder
The VirtualBox Packer builder is able to create [VirtualBox](http://www.virtualbox.org) The VirtualBox Packer builder is able to create
virtual machines and export them in the OVA or OVF format. [VirtualBox](http://www.virtualbox.org) virtual machines and export them in the
OVA or OVF format.
Packer actually comes with multiple builders able to create VirtualBox Packer actually comes with multiple builders able to create VirtualBox machines,
machines, depending on the strategy you want to use to build the image. depending on the strategy you want to use to build the image. Packer supports
Packer supports the following VirtualBox builders: the following VirtualBox builders:
* [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from - [virtualbox-iso](/docs/builders/virtualbox-iso.html) - Starts from an ISO
an ISO file, creates a brand new VirtualBox VM, installs an OS, file, creates a brand new VirtualBox VM, installs an OS, provisions software
provisions software within the OS, then exports that machine to create within the OS, then exports that machine to create an image. This is best
an image. This is best for people who want to start from scratch. for people who want to start from scratch.
* [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder - [virtualbox-ovf](/docs/builders/virtualbox-ovf.html) - This builder imports
imports an existing OVF/OVA file, runs provisioners on top of that VM, an existing OVF/OVA file, runs provisioners on top of that VM, and exports
and exports that machine to create an image. This is best if you have that machine to create an image. This is best if you have an existing
an existing VirtualBox VM export you want to use as the source. As an VirtualBox VM export you want to use as the source. As an additional
additional benefit, you can feed the artifact of this builder back into benefit, you can feed the artifact of this builder back into itself to
itself to iterate on a machine. iterate on a machine.
--- ---
layout: "docs" description: |
page_title: "VMware Builder from ISO" This VMware Packer builder is able to create VMware virtual machines from an ISO
description: |- file as a source. It currently supports building virtual machines on hosts
This VMware Packer builder is able to create VMware virtual machines from an ISO file as a source. It currently supports building virtual machines on hosts running VMware Fusion for OS X, VMware Workstation for Linux and Windows, and VMware Player on Linux. It can also build machines directly on VMware vSphere Hypervisor using SSH as opposed to the vSphere API. running VMware Fusion for OS X, VMware Workstation for Linux and Windows, and
--- VMware Player on Linux. It can also build machines directly on VMware vSphere
Hypervisor using SSH as opposed to the vSphere API.
layout: docs
page_title: VMware Builder from ISO
...
# VMware Builder (from ISO) # VMware Builder (from ISO)
Type: `vmware-iso` Type: `vmware-iso`
This VMware Packer builder is able to create VMware virtual machines from an This VMware Packer builder is able to create VMware virtual machines from an ISO
ISO file as a source. It currently file as a source. It currently supports building virtual machines on hosts
supports building virtual machines on hosts running running [VMware Fusion](http://www.vmware.com/products/fusion/overview.html) for
[VMware Fusion](http://www.vmware.com/products/fusion/overview.html) for OS X, OS X, [VMware
[VMware Workstation](http://www.vmware.com/products/workstation/overview.html) Workstation](http://www.vmware.com/products/workstation/overview.html) for Linux
for Linux and Windows, and and Windows, and [VMware Player](http://www.vmware.com/products/player/) on
[VMware Player](http://www.vmware.com/products/player/) on Linux. It can Linux. It can also build machines directly on [VMware vSphere
also build machines directly on Hypervisor](http://www.vmware.com/products/vsphere-hypervisor/) using SSH as
[VMware vSphere Hypervisor](http://www.vmware.com/products/vsphere-hypervisor/) opposed to the vSphere API.
using SSH as opposed to the vSphere API.
The builder builds a virtual machine by creating a new virtual machine from
The builder builds a virtual machine by creating a new virtual machine scratch, booting it, installing an OS, provisioning software within the OS, then
from scratch, booting it, installing an OS, provisioning software within shutting it down. The result of the VMware builder is a directory containing all
the OS, then shutting it down. The result of the VMware builder is a directory the files necessary to run the virtual machine.
containing all the files necessary to run the virtual machine.
## Basic Example ## Basic Example
Here is a basic example. This example is not functional. It will start the Here is a basic example. This example is not functional. It will start the OS
OS installer but then fail because we don't provide the preseed file for installer but then fail because we don't provide the preseed file for Ubuntu to
Ubuntu to self-install. Still, the example serves to show the basic configuration: self-install. Still, the example serves to show the basic configuration:
```javascript ``` {.javascript}
{ {
"type": "vmware-iso", "type": "vmware-iso",
"iso_url": "http://old-releases.ubuntu.com/releases/precise/ubuntu-12.04.2-server-amd64.iso", "iso_url": "http://old-releases.ubuntu.com/releases/precise/ubuntu-12.04.2-server-amd64.iso",
...@@ -44,261 +47,265 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio ...@@ -44,261 +47,265 @@ Ubuntu to self-install. Still, the example serves to show the basic configuratio
## Configuration Reference ## Configuration Reference
There are many configuration options available for the VMware builder. There are many configuration options available for the VMware builder. They are
They are organized below into two categories: required and optional. Within organized below into two categories: required and optional. Within each
each category, the available options are alphabetized and described. category, the available options are alphabetized and described.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO - `iso_checksum` (string) - The checksum for the OS ISO file. Because ISO
files are so large, this is required and Packer will verify it prior files are so large, this is required and Packer will verify it prior to
to booting a virtual machine with the ISO attached. The type of the booting a virtual machine with the ISO attached. The type of the checksum is
checksum is specified with `iso_checksum_type`, documented below. specified with `iso_checksum_type`, documented below.
* `iso_checksum_type` (string) - The type of the checksum specified in - `iso_checksum_type` (string) - The type of the checksum specified in
`iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or `iso_checksum`. Valid values are "none", "md5", "sha1", "sha256", or
"sha512" currently. While "none" will skip checksumming, this is not "sha512" currently. While "none" will skip checksumming, this is not
recommended since ISO files are generally large and corruption does happen recommended since ISO files are generally large and corruption does happen
from time to time. from time to time.
* `iso_url` (string) - A URL to the ISO containing the installation image. - `iso_url` (string) - A URL to the ISO containing the installation image.
This URL can be either an HTTP URL or a file URL (or path to a file). This URL can be either an HTTP URL or a file URL (or path to a file). If
If this is an HTTP URL, Packer will download it and cache it between this is an HTTP URL, Packer will download it and cache it between runs.
runs.
* `ssh_username` (string) - The username to use to SSH into the machine - `ssh_username` (string) - The username to use to SSH into the machine once
once the OS is installed. the OS is installed.
### Optional: ### Optional:
* `disk_additional_size` (array of integers) - The size(s) of any additional - `disk_additional_size` (array of integers) - The size(s) of any additional
hard disks for the VM in megabytes. If this is not specified then the VM will hard disks for the VM in megabytes. If this is not specified then the VM
only contain a primary hard disk. The builder uses expandable, not fixed-size will only contain a primary hard disk. The builder uses expandable, not
virtual hard disks, so the actual file representing the disk will not use the fixed-size virtual hard disks, so the actual file representing the disk will
full size unless it is full. not use the full size unless it is full.
* `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is first booted. The goal of these commands should when the virtual machine is first booted. The goal of these commands should
be to type just enough to initialize the operating system installer. Special be to type just enough to initialize the operating system installer. Special
keys can be typed as well, and are covered in the section below on the boot keys can be typed as well, and are covered in the section below on the
command. If this is not specified, it is assumed the installer will start boot command. If this is not specified, it is assumed the installer will
itself. start itself.
* `boot_wait` (string) - The time to wait after booting the initial virtual - `boot_wait` (string) - The time to wait after booting the initial virtual
machine before typing the `boot_command`. The value of this should be machine before typing the `boot_command`. The value of this should be
a duration. Examples are "5s" and "1m30s" which will cause Packer to wait a duration. Examples are "5s" and "1m30s" which will cause Packer to wait
five seconds and one minute 30 seconds, respectively. If this isn't specified, five seconds and one minute 30 seconds, respectively. If this isn't
the default is 10 seconds. specified, the default is 10 seconds.
* `disk_size` (integer) - The size of the hard disk for the VM in megabytes. - `disk_size` (integer) - The size of the hard disk for the VM in megabytes.
The builder uses expandable, not fixed-size virtual hard disks, so the The builder uses expandable, not fixed-size virtual hard disks, so the
actual file representing the disk will not use the full size unless it is full. actual file representing the disk will not use the full size unless it
By default this is set to 40,000 (about 40 GB). is full. By default this is set to 40,000 (about 40 GB).
* `disk_type_id` (string) - The type of VMware virtual disk to create. - `disk_type_id` (string) - The type of VMware virtual disk to create. The
The default is "1", which corresponds to a growable virtual disk split in default is "1", which corresponds to a growable virtual disk split in
2GB files. This option is for advanced usage, modify only if you 2GB files. This option is for advanced usage, modify only if you know what
know what you're doing. For more information, please consult the you're doing. For more information, please consult the [Virtual Disk Manager
[Virtual Disk Manager User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) User's Guide](http://www.vmware.com/pdf/VirtualDiskManager.pdf) for desktop
for desktop VMware clients. For ESXi, refer to the proper ESXi documentation. VMware clients. For ESXi, refer to the proper ESXi documentation.
* `floppy_files` (array of strings) - A list of files to place onto a floppy - `floppy_files` (array of strings) - A list of files to place onto a floppy
disk that is attached when the VM is booted. This is most useful disk that is attached when the VM is booted. This is most useful for
for unattended Windows installs, which look for an `Autounattend.xml` file unattended Windows installs, which look for an `Autounattend.xml` file on
on removable media. By default, no floppy will be attached. All files removable media. By default, no floppy will be attached. All files listed in
listed in this setting get placed into the root directory of the floppy this setting get placed into the root directory of the floppy and the floppy
and the floppy is attached as the first floppy device. Currently, no is attached as the first floppy device. Currently, no support exists for
support exists for creating sub-directories on the floppy. Wildcard creating sub-directories on the floppy. Wildcard characters (\*, ?,
characters (*, ?, and []) are allowed. Directory names are also allowed, and \[\]) are allowed. Directory names are also allowed, which will add all
which will add all the files found in the directory to the floppy. the files found in the directory to the floppy.
* `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this - `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is
is "/Applications/VMware Fusion.app" but this setting allows you to "/Applications/VMware Fusion.app" but this setting allows you to
customize this. customize this.
* `guest_os_type` (string) - The guest OS type being installed. This will be - `guest_os_type` (string) - The guest OS type being installed. This will be
set in the VMware VMX. By default this is "other". By specifying a more specific set in the VMware VMX. By default this is "other". By specifying a more
OS type, VMware may perform some optimizations or virtual hardware changes specific OS type, VMware may perform some optimizations or virtual hardware
to better support the operating system running in the virtual machine. changes to better support the operating system running in the
virtual machine.
* `headless` (boolean) - Packer defaults to building VMware
virtual machines by launching a GUI that shows the console of the - `headless` (boolean) - Packer defaults to building VMware virtual machines
machine being built. When this value is set to true, the machine will by launching a GUI that shows the console of the machine being built. When
start without a console. For VMware machines, Packer will output VNC this value is set to true, the machine will start without a console. For
connection information in case you need to connect to the console to VMware machines, Packer will output VNC connection information in case you
debug the build process. need to connect to the console to debug the build process.
* `http_directory` (string) - Path to a directory to serve using an HTTP - `http_directory` (string) - Path to a directory to serve using an
server. The files in this directory will be available over HTTP that will HTTP server. The files in this directory will be available over HTTP that
be requestable from the virtual machine. This is useful for hosting will be requestable from the virtual machine. This is useful for hosting
kickstart files and so on. By default this is "", which means no HTTP kickstart files and so on. By default this is "", which means no HTTP server
server will be started. The address and port of the HTTP server will be will be started. The address and port of the HTTP server will be available
available as variables in `boot_command`. This is covered in more detail as variables in `boot_command`. This is covered in more detail below.
below.
- `http_port_min` and `http_port_max` (integer) - These are the minimum and
* `http_port_min` and `http_port_max` (integer) - These are the minimum and maximum port to use for the HTTP server started to serve the
maximum port to use for the HTTP server started to serve the `http_directory`. `http_directory`. Because Packer often runs in parallel, Packer will choose
Because Packer often runs in parallel, Packer will choose a randomly available a randomly available port in this range to run the HTTP server. If you want
port in this range to run the HTTP server. If you want to force the HTTP to force the HTTP server to be on one port, make this minimum and maximum
server to be on one port, make this minimum and maximum port the same. port the same. By default the values are 8000 and 9000, respectively.
By default the values are 8000 and 9000, respectively.
- `iso_urls` (array of strings) - Multiple URLs for the ISO to download.
* `iso_urls` (array of strings) - Multiple URLs for the ISO to download. Packer will try these in order. If anything goes wrong attempting to
Packer will try these in order. If anything goes wrong attempting to download download or while downloading a single URL, it will move on to the next. All
or while downloading a single URL, it will move on to the next. All URLs URLs must point to the same file (same checksum). By default this is empty
must point to the same file (same checksum). By default this is empty and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified.
and `iso_url` is used. Only one of `iso_url` or `iso_urls` can be specified.
- `output_directory` (string) - This is the path to the directory where the
* `output_directory` (string) - This is the path to the directory where the resulting virtual machine will be created. This may be relative or absolute.
resulting virtual machine will be created. This may be relative or absolute. If relative, the path is relative to the working directory when `packer`
If relative, the path is relative to the working directory when `packer` is executed. This directory must not exist or be empty prior to running
is executed. This directory must not exist or be empty prior to running the builder. the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
By default this is "output-BUILDNAME" where "BUILDNAME" is the name name of the build.
of the build.
- `remote_cache_datastore` (string) - The path to the datastore where
* `remote_cache_datastore` (string) - The path to the datastore where supporting files will be stored during the build on the remote machine. By
supporting files will be stored during the build on the remote machine. default this is the same as the `remote_datastore` option. This only has an
By default this is the same as the `remote_datastore` option. This only effect if `remote_type` is enabled.
has an effect if `remote_type` is enabled.
- `remote_cache_directory` (string) - The path where the ISO and/or floppy
* `remote_cache_directory` (string) - The path where the ISO and/or floppy files will be stored during the build on the remote machine. The path is
files will be stored during the build on the remote machine. The path is relative to the `remote_cache_datastore` on the remote machine. By default
relative to the `remote_cache_datastore` on the remote machine. By default this is "packer\_cache". This only has an effect if `remote_type`
this is "packer_cache". This only has an effect if `remote_type` is enabled. is enabled.
* `remote_datastore` (string) - The path to the datastore where the resulting - `remote_datastore` (string) - The path to the datastore where the resulting
VM will be stored when it is built on the remote machine. By default this VM will be stored when it is built on the remote machine. By default this
is "datastore1". This only has an effect if `remote_type` is enabled. is "datastore1". This only has an effect if `remote_type` is enabled.
* `remote_host` (string) - The host of the remote machine used for access. - `remote_host` (string) - The host of the remote machine used for access.
This is only required if `remote_type` is enabled. This is only required if `remote_type` is enabled.
* `remote_password` (string) - The SSH password for the user used to - `remote_password` (string) - The SSH password for the user used to access
access the remote machine. By default this is empty. This only has an the remote machine. By default this is empty. This only has an effect if
effect if `remote_type` is enabled. `remote_type` is enabled.
* `remote_type` (string) - The type of remote machine that will be used to - `remote_type` (string) - The type of remote machine that will be used to
build this VM rather than a local desktop product. The only value accepted build this VM rather than a local desktop product. The only value accepted
for this currently is "esx5". If this is not set, a desktop product will be for this currently is "esx5". If this is not set, a desktop product will
used. By default, this is not set. be used. By default, this is not set.
* `remote_username` (string) - The username for the SSH user that will access - `remote_username` (string) - The username for the SSH user that will access
the remote machine. This is required if `remote_type` is enabled. the remote machine. This is required if `remote_type` is enabled.
* `shutdown_command` (string) - The command to use to gracefully shut down - `shutdown_command` (string) - The command to use to gracefully shut down the
the machine once all the provisioning is done. By default this is an empty machine once all the provisioning is done. By default this is an empty
string, which tells Packer to just forcefully shut down the machine. string, which tells Packer to just forcefully shut down the machine.
* `shutdown_timeout` (string) - The amount of time to wait after executing - `shutdown_timeout` (string) - The amount of time to wait after executing the
the `shutdown_command` for the virtual machine to actually shut down. `shutdown_command` for the virtual machine to actually shut down. If it
If it doesn't shut down in this time, it is an error. By default, the timeout doesn't shut down in this time, it is an error. By default, the timeout is
is "5m", or five minutes. "5m", or five minutes.
* `skip_compaction` (boolean) - VMware-created disks are defragmented - `skip_compaction` (boolean) - VMware-created disks are defragmented and
and compacted at the end of the build process using `vmware-vdiskmanager`. compacted at the end of the build process using `vmware-vdiskmanager`. In
In certain rare cases, this might actually end up making the resulting disks certain rare cases, this might actually end up making the resulting disks
slightly larger. If you find this to be the case, you can disable compaction slightly larger. If you find this to be the case, you can disable compaction
using this configuration value. using this configuration value.
* `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to - `tools_upload_flavor` (string) - The flavor of the VMware Tools ISO to
upload into the VM. Valid values are "darwin", "linux", and "windows". upload into the VM. Valid values are "darwin", "linux", and "windows". By
By default, this is empty, which means VMware tools won't be uploaded. default, this is empty, which means VMware tools won't be uploaded.
* `tools_upload_path` (string) - The path in the VM to upload the VMware - `tools_upload_path` (string) - The path in the VM to upload the
tools. This only takes effect if `tools_upload_flavor` is non-empty. VMware tools. This only takes effect if `tools_upload_flavor` is non-empty.
This is a [configuration template](/docs/templates/configuration-templates.html) This is a [configuration
that has a single valid variable: `Flavor`, which will be the value of template](/docs/templates/configuration-templates.html) that has a single
`tools_upload_flavor`. By default the upload path is set to valid variable: `Flavor`, which will be the value of `tools_upload_flavor`.
`{{.Flavor}}.iso`. This setting is not used when `remote_type` is "esx5". By default the upload path is set to `{{.Flavor}}.iso`. This setting is not
used when `remote_type` is "esx5".
* `version` (string) - The [vmx hardware version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746) for the new virtual machine. Only the default value has been tested, any other value is experimental. Default value is '9'.
- `version` (string) - The [vmx hardware
* `vm_name` (string) - This is the name of the VMX file for the new virtual version](http://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=1003746)
machine, without the file extension. By default this is "packer-BUILDNAME", for the new virtual machine. Only the default value has been tested, any
where "BUILDNAME" is the name of the build. other value is experimental. Default value is '9'.
* `vmdk_name` (string) - The filename of the virtual disk that'll be created, - `vm_name` (string) - This is the name of the VMX file for the new virtual
without the extension. This defaults to "packer". machine, without the file extension. By default this is "packer-BUILDNAME",
where "BUILDNAME" is the name of the build.
* `vmx_data` (object of key/value strings) - Arbitrary key/values
to enter into the virtual machine VMX file. This is for advanced users - `vmdk_name` (string) - The filename of the virtual disk that'll be created,
who want to set properties such as memory, CPU, etc. without the extension. This defaults to "packer".
* `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, - `vmx_data` (object of key/value strings) - Arbitrary key/values to enter
except that it is run after the virtual machine is shutdown, and before the into the virtual machine VMX file. This is for advanced users who want to
virtual machine is exported. set properties such as memory, CPU, etc.
* `vmx_template_path` (string) - Path to a - `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`,
[configuration template](/docs/templates/configuration-templates.html) that except that it is run after the virtual machine is shutdown, and before the
defines the contents of the virtual machine VMX file for VMware. This is virtual machine is exported.
for **advanced users only** as this can render the virtual machine
non-functional. See below for more information. For basic VMX modifications, - `vmx_template_path` (string) - Path to a [configuration
try `vmx_data` first. template](/docs/templates/configuration-templates.html) that defines the
contents of the virtual machine VMX file for VMware. This is for **advanced
* `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to users only** as this can render the virtual machine non-functional. See
use for VNC access to the virtual machine. The builder uses VNC to type below for more information. For basic VMX modifications, try
the initial `boot_command`. Because Packer generally runs in parallel, Packer `vmx_data` first.
uses a randomly chosen port in this range that appears available. By default
this is 5900 to 6000. The minimum and maximum ports are inclusive. - `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port
to use for VNC access to the virtual machine. The builder uses VNC to type
the initial `boot_command`. Because Packer generally runs in parallel,
Packer uses a randomly chosen port in this range that appears available. By
default this is 5900 to 6000. The minimum and maximum ports are inclusive.
## Boot Command ## Boot Command
The `boot_command` configuration is very important: it specifies the keys The `boot_command` configuration is very important: it specifies the keys to
to type when the virtual machine is first booted in order to start the type when the virtual machine is first booted in order to start the OS
OS installer. This command is typed after `boot_wait`, which gives the installer. This command is typed after `boot_wait`, which gives the virtual
virtual machine some time to actually load the ISO. machine some time to actually load the ISO.
As documented above, the `boot_command` is an array of strings. The As documented above, the `boot_command` is an array of strings. The strings are
strings are all typed in sequence. It is an array only to improve readability all typed in sequence. It is an array only to improve readability within the
within the template. template.
The boot command is "typed" character for character over a VNC connection The boot command is "typed" character for character over a VNC connection to the
to the machine, simulating a human actually typing the keyboard. There are machine, simulating a human actually typing the keyboard. There are a set of
a set of special keys available. If these are in your boot command, they special keys available. If these are in your boot command, they will be replaced
will be replaced by the proper key: by the proper key:
* `<bs>` - Backspace - `<bs>` - Backspace
* `<del>` - Delete - `<del>` - Delete
* `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress. - `<enter>` and `<return>` - Simulates an actual "enter" or "return" keypress.
* `<esc>` - Simulates pressing the escape key. - `<esc>` - Simulates pressing the escape key.
* `<tab>` - Simulates pressing the tab key. - `<tab>` - Simulates pressing the tab key.
* `<f1>` - `<f12>` - Simulates pressing a function key. - `<f1>` - `<f12>` - Simulates pressing a function key.
* `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key. - `<up>` `<down>` `<left>` `<right>` - Simulates pressing an arrow key.
* `<spacebar>` - Simulates pressing the spacebar. - `<spacebar>` - Simulates pressing the spacebar.
* `<insert>` - Simulates pressing the insert key. - `<insert>` - Simulates pressing the insert key.
* `<home>` `<end>` - Simulates pressing the home and end keys. - `<home>` `<end>` - Simulates pressing the home and end keys.
* `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys. - `<pageUp>` `<pageDown>` - Simulates pressing the page up and page down keys.
* `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before sending any additional keys. This - `<wait>` `<wait5>` `<wait10>` - Adds a 1, 5 or 10 second pause before
is useful if you have to generally wait for the UI to update before typing more. sending any additional keys. This is useful if you have to generally wait
for the UI to update before typing more.
In addition to the special keys, each command to type is treated as a In addition to the special keys, each command to type is treated as a
[configuration template](/docs/templates/configuration-templates.html). [configuration template](/docs/templates/configuration-templates.html). The
The available variables are: available variables are:
* `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server - `HTTPIP` and `HTTPPort` - The IP and port, respectively of an HTTP server
that is started serving the directory specified by the `http_directory` that is started serving the directory specified by the `http_directory`
configuration parameter. If `http_directory` isn't specified, these will configuration parameter. If `http_directory` isn't specified, these will be
be blank! blank!
Example boot command. This is actually a working boot command used to start Example boot command. This is actually a working boot command used to start an
an Ubuntu 12.04 installer: Ubuntu 12.04 installer:
```text ``` {.text}
[ [
"<esc><esc><enter><wait>", "<esc><esc><enter><wait>",
"/install/vmlinuz noapic ", "/install/vmlinuz noapic ",
...@@ -314,71 +321,73 @@ an Ubuntu 12.04 installer: ...@@ -314,71 +321,73 @@ an Ubuntu 12.04 installer:
## VMX Template ## VMX Template
The heart of a VMware machine is the "vmx" file. This contains all the The heart of a VMware machine is the "vmx" file. This contains all the virtual
virtual hardware metadata necessary for the VM to function. Packer by default hardware metadata necessary for the VM to function. Packer by default uses a
uses a [safe, flexible VMX file](https://github.com/mitchellh/packer/blob/20541a7eda085aa5cf35bfed5069592ca49d106e/builder/vmware/step_create_vmx.go#L84). [safe, flexible VMX
But for advanced users, this template can be customized. This allows file](https://github.com/mitchellh/packer/blob/20541a7eda085aa5cf35bfed5069592ca49d106e/builder/vmware/step_create_vmx.go#L84).
Packer to build virtual machines of effectively any guest operating system But for advanced users, this template can be customized. This allows Packer to
type. build virtual machines of effectively any guest operating system type.
~> **This is an advanced feature.** Modifying the VMX template \~&gt; **This is an advanced feature.** Modifying the VMX template can easily
can easily cause your virtual machine to not boot properly. Please only cause your virtual machine to not boot properly. Please only modify the template
modify the template if you know what you're doing. if you know what you're doing.
Within the template, a handful of variables are available so that your Within the template, a handful of variables are available so that your template
template can continue working with the rest of the Packer machinery. Using can continue working with the rest of the Packer machinery. Using these
these variables isn't required, however. variables isn't required, however.
* `Name` - The name of the virtual machine. - `Name` - The name of the virtual machine.
* `GuestOS` - The VMware-valid guest OS type. - `GuestOS` - The VMware-valid guest OS type.
* `DiskName` - The filename (without the suffix) of the main virtual disk. - `DiskName` - The filename (without the suffix) of the main virtual disk.
* `ISOPath` - The path to the ISO to use for the OS installation. - `ISOPath` - The path to the ISO to use for the OS installation.
* `Version` - The Hardware version VMWare will execute this vm under. Also known as the `virtualhw.version`. - `Version` - The Hardware version VMWare will execute this vm under. Also
known as the `virtualhw.version`.
## Building on a Remote vSphere Hypervisor ## Building on a Remote vSphere Hypervisor
In addition to using the desktop products of VMware locally to build In addition to using the desktop products of VMware locally to build virtual
virtual machines, Packer can use a remote VMware Hypervisor to build machines, Packer can use a remote VMware Hypervisor to build the virtual
the virtual machine. machine.
-> **Note:** Packer supports ESXi 5.1 and above. -&gt; **Note:** Packer supports ESXi 5.1 and above.
Before using a remote vSphere Hypervisor, you need to enable GuestIPHack by running the following command: Before using a remote vSphere Hypervisor, you need to enable GuestIPHack by
running the following command:
```text ``` {.text}
esxcli system settings advanced set -o /Net/GuestIPHack -i 1 esxcli system settings advanced set -o /Net/GuestIPHack -i 1
``` ```
When using a remote VMware Hypervisor, the builder still downloads the When using a remote VMware Hypervisor, the builder still downloads the ISO and
ISO and various files locally, and uploads these to the remote machine. various files locally, and uploads these to the remote machine. Packer currently
Packer currently uses SSH to communicate to the ESXi machine rather than uses SSH to communicate to the ESXi machine rather than the vSphere API. At some
the vSphere API. At some point, the vSphere API may be used. point, the vSphere API may be used.
Packer also requires VNC to issue boot commands during a build, Packer also requires VNC to issue boot commands during a build, which may be
which may be disabled on some remote VMware Hypervisors. Please consult disabled on some remote VMware Hypervisors. Please consult the appropriate
the appropriate documentation on how to update VMware Hypervisor's firewall documentation on how to update VMware Hypervisor's firewall to allow these
to allow these connections. connections.
To use a remote VMware vSphere Hypervisor to build your virtual machine, To use a remote VMware vSphere Hypervisor to build your virtual machine, fill in
fill in the required `remote_*` configurations: the required `remote_*` configurations:
* `remote_type` - This must be set to "esx5". - `remote_type` - This must be set to "esx5".
* `remote_host` - The host of the remote machine. - `remote_host` - The host of the remote machine.
Additionally, there are some optional configurations that you'll likely Additionally, there are some optional configurations that you'll likely have to
have to modify as well: modify as well:
* `remote_datastore` - The path to the datastore where the VM will be - `remote_datastore` - The path to the datastore where the VM will be stored
stored on the ESXi machine. on the ESXi machine.
* `remote_cache_datastore` - The path to the datastore where - `remote_cache_datastore` - The path to the datastore where supporting files
supporting files will be stored during the build on the remote machine. will be stored during the build on the remote machine.
* `remote_cache_directory` - The path where the ISO and/or floppy - `remote_cache_directory` - The path where the ISO and/or floppy files will
files will be stored during the build on the remote machine. The path is be stored during the build on the remote machine. The path is relative to
relative to the `remote_cache_datastore` on the remote machine. the `remote_cache_datastore` on the remote machine.
* `remote_username` - The SSH username used to access the remote machine. - `remote_username` - The SSH username used to access the remote machine.
* `remote_password` - The SSH password for access to the remote machine. - `remote_password` - The SSH password for access to the remote machine.
--- ---
layout: "docs" description: |
page_title: "VMware Builder from VMX" This VMware Packer builder is able to create VMware virtual machines from an
description: |- existing VMware virtual machine (a VMX file). It currently supports building
This VMware Packer builder is able to create VMware virtual machines from an existing VMware virtual machine (a VMX file). It currently supports building virtual machines on hosts running VMware Fusion Professional for OS X, VMware Workstation for Linux and Windows, and VMware Player on Linux. virtual machines on hosts running VMware Fusion Professional for OS X, VMware
--- Workstation for Linux and Windows, and VMware Player on Linux.
layout: docs
page_title: VMware Builder from VMX
...
# VMware Builder (from VMX) # VMware Builder (from VMX)
Type: `vmware-vmx` Type: `vmware-vmx`
This VMware Packer builder is able to create VMware virtual machines from an This VMware Packer builder is able to create VMware virtual machines from an
existing VMware virtual machine (a VMX file). It currently existing VMware virtual machine (a VMX file). It currently supports building
supports building virtual machines on hosts running virtual machines on hosts running [VMware Fusion
[VMware Fusion Professional](http://www.vmware.com/products/fusion-professional/) for OS X, Professional](http://www.vmware.com/products/fusion-professional/) for OS X,
[VMware Workstation](http://www.vmware.com/products/workstation/overview.html) [VMware Workstation](http://www.vmware.com/products/workstation/overview.html)
for Linux and Windows, and for Linux and Windows, and [VMware
[VMware Player](http://www.vmware.com/products/player/) on Linux. Player](http://www.vmware.com/products/player/) on Linux.
The builder builds a virtual machine by cloning the VMX file using The builder builds a virtual machine by cloning the VMX file using the clone
the clone capabilities introduced in VMware Fusion Professional 6, Workstation 10, capabilities introduced in VMware Fusion Professional 6, Workstation 10, and
and Player 6. After cloning the VM, it provisions software within the Player 6. After cloning the VM, it provisions software within the new machine,
new machine, shuts it down, and compacts the disks. The resulting folder shuts it down, and compacts the disks. The resulting folder contains a new
contains a new VMware virtual machine. VMware virtual machine.
## Basic Example ## Basic Example
Here is an example. This example is fully functional as long as the source Here is an example. This example is fully functional as long as the source path
path points to a real VMX file with the proper settings: points to a real VMX file with the proper settings:
```javascript ``` {.javascript}
{ {
"type": "vmware-vmx", "type": "vmware-vmx",
"source_path": "/path/to/a/vm.vmx", "source_path": "/path/to/a/vm.vmx",
...@@ -40,110 +43,110 @@ path points to a real VMX file with the proper settings: ...@@ -40,110 +43,110 @@ path points to a real VMX file with the proper settings:
## Configuration Reference ## Configuration Reference
There are many configuration options available for the VMware builder. There are many configuration options available for the VMware builder. They are
They are organized below into two categories: required and optional. Within organized below into two categories: required and optional. Within each
each category, the available options are alphabetized and described. category, the available options are alphabetized and described.
In addition to the options listed here, a In addition to the options listed here, a
[communicator](/docs/templates/communicator.html) [communicator](/docs/templates/communicator.html) can be configured for this
can be configured for this builder. builder.
### Required: ### Required:
* `source_path` (string) - Path to the source VMX file to clone. - `source_path` (string) - Path to the source VMX file to clone.
* `ssh_username` (string) - The username to use to SSH into the machine - `ssh_username` (string) - The username to use to SSH into the machine once
once the OS is installed. the OS is installed.
### Optional: ### Optional:
* `boot_command` (array of strings) - This is an array of commands to type - `boot_command` (array of strings) - This is an array of commands to type
when the virtual machine is first booted. The goal of these commands should when the virtual machine is first booted. The goal of these commands should
be to type just enough to initialize the operating system installer. Special be to type just enough to initialize the operating system installer. Special
keys can be typed as well, and are covered in the section below on the boot keys can be typed as well, and are covered in the section below on the
command. If this is not specified, it is assumed the installer will start boot command. If this is not specified, it is assumed the installer will
itself. start itself.
* `boot_wait` (string) - The time to wait after booting the initial virtual - `boot_wait` (string) - The time to wait after booting the initial virtual
machine before typing the `boot_command`. The value of this should be machine before typing the `boot_command`. The value of this should be
a duration. Examples are "5s" and "1m30s" which will cause Packer to wait a duration. Examples are "5s" and "1m30s" which will cause Packer to wait
five seconds and one minute 30 seconds, respectively. If this isn't specified, five seconds and one minute 30 seconds, respectively. If this isn't
the default is 10 seconds. specified, the default is 10 seconds.
* `floppy_files` (array of strings) - A list of files to place onto a floppy - `floppy_files` (array of strings) - A list of files to place onto a floppy
disk that is attached when the VM is booted. This is most useful disk that is attached when the VM is booted. This is most useful for
for unattended Windows installs, which look for an `Autounattend.xml` file unattended Windows installs, which look for an `Autounattend.xml` file on
on removable media. By default, no floppy will be attached. All files removable media. By default, no floppy will be attached. All files listed in
listed in this setting get placed into the root directory of the floppy this setting get placed into the root directory of the floppy and the floppy
and the floppy is attached as the first floppy device. Currently, no is attached as the first floppy device. Currently, no support exists for
support exists for creating sub-directories on the floppy. Wildcard creating sub-directories on the floppy. Wildcard characters (\*, ?,
characters (*, ?, and []) are allowed. Directory names are also allowed, and \[\]) are allowed. Directory names are also allowed, which will add all
which will add all the files found in the directory to the floppy. the files found in the directory to the floppy.
* `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this - `fusion_app_path` (string) - Path to "VMware Fusion.app". By default this is
is "/Applications/VMware Fusion.app" but this setting allows you to "/Applications/VMware Fusion.app" but this setting allows you to
customize this. customize this.
* `headless` (boolean) - Packer defaults to building VMware - `headless` (boolean) - Packer defaults to building VMware virtual machines
virtual machines by launching a GUI that shows the console of the by launching a GUI that shows the console of the machine being built. When
machine being built. When this value is set to true, the machine will this value is set to true, the machine will start without a console. For
start without a console. For VMware machines, Packer will output VNC VMware machines, Packer will output VNC connection information in case you
connection information in case you need to connect to the console to need to connect to the console to debug the build process.
debug the build process.
- `http_directory` (string) - Path to a directory to serve using an
* `http_directory` (string) - Path to a directory to serve using an HTTP HTTP server. The files in this directory will be available over HTTP that
server. The files in this directory will be available over HTTP that will will be requestable from the virtual machine. This is useful for hosting
be requestable from the virtual machine. This is useful for hosting kickstart files and so on. By default this is "", which means no HTTP server
kickstart files and so on. By default this is "", which means no HTTP will be started. The address and port of the HTTP server will be available
server will be started. The address and port of the HTTP server will be as variables in `boot_command`. This is covered in more detail below.
available as variables in `boot_command`. This is covered in more detail
below. - `http_port_min` and `http_port_max` (integer) - These are the minimum and
maximum port to use for the HTTP server started to serve the
* `http_port_min` and `http_port_max` (integer) - These are the minimum and `http_directory`. Because Packer often runs in parallel, Packer will choose
maximum port to use for the HTTP server started to serve the `http_directory`. a randomly available port in this range to run the HTTP server. If you want
Because Packer often runs in parallel, Packer will choose a randomly available to force the HTTP server to be on one port, make this minimum and maximum
port in this range to run the HTTP server. If you want to force the HTTP port the same. By default the values are 8000 and 9000, respectively.
server to be on one port, make this minimum and maximum port the same.
By default the values are 8000 and 9000, respectively. - `output_directory` (string) - This is the path to the directory where the
resulting virtual machine will be created. This may be relative or absolute.
* `output_directory` (string) - This is the path to the directory where the If relative, the path is relative to the working directory when `packer`
resulting virtual machine will be created. This may be relative or absolute. is executed. This directory must not exist or be empty prior to running
If relative, the path is relative to the working directory when `packer` the builder. By default this is "output-BUILDNAME" where "BUILDNAME" is the
is executed. This directory must not exist or be empty prior to running the builder. name of the build.
By default this is "output-BUILDNAME" where "BUILDNAME" is the name
of the build. - `shutdown_command` (string) - The command to use to gracefully shut down the
machine once all the provisioning is done. By default this is an empty
* `shutdown_command` (string) - The command to use to gracefully shut down the machine once all string, which tells Packer to just forcefully shut down the machine unless a
the provisioning is done. By default this is an empty string, which tells Packer to just shutdown command takes place inside script so this may safely be omitted. If
forcefully shut down the machine unless a shutdown command takes place inside script so this may one or more scripts require a reboot it is suggested to leave this blank
safely be omitted. If one or more scripts require a reboot it is suggested to leave this blank since reboots may fail and specify the final shutdown command in your
since reboots may fail and specify the final shutdown command in your last script. last script.
* `shutdown_timeout` (string) - The amount of time to wait after executing - `shutdown_timeout` (string) - The amount of time to wait after executing the
the `shutdown_command` for the virtual machine to actually shut down. `shutdown_command` for the virtual machine to actually shut down. If it
If it doesn't shut down in this time, it is an error. By default, the timeout doesn't shut down in this time, it is an error. By default, the timeout is
is "5m", or five minutes. "5m", or five minutes.
* `skip_compaction` (boolean) - VMware-created disks are defragmented - `skip_compaction` (boolean) - VMware-created disks are defragmented and
and compacted at the end of the build process using `vmware-vdiskmanager`. compacted at the end of the build process using `vmware-vdiskmanager`. In
In certain rare cases, this might actually end up making the resulting disks certain rare cases, this might actually end up making the resulting disks
slightly larger. If you find this to be the case, you can disable compaction slightly larger. If you find this to be the case, you can disable compaction
using this configuration value. using this configuration value.
* `vm_name` (string) - This is the name of the VMX file for the new virtual - `vm_name` (string) - This is the name of the VMX file for the new virtual
machine, without the file extension. By default this is "packer-BUILDNAME", machine, without the file extension. By default this is "packer-BUILDNAME",
where "BUILDNAME" is the name of the build. where "BUILDNAME" is the name of the build.
* `vmx_data` (object of key/value strings) - Arbitrary key/values - `vmx_data` (object of key/value strings) - Arbitrary key/values to enter
to enter into the virtual machine VMX file. This is for advanced users into the virtual machine VMX file. This is for advanced users who want to
who want to set properties such as memory, CPU, etc. set properties such as memory, CPU, etc.
* `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`, - `vmx_data_post` (object of key/value strings) - Identical to `vmx_data`,
except that it is run after the virtual machine is shutdown, and before the except that it is run after the virtual machine is shutdown, and before the
virtual machine is exported. virtual machine is exported.
* `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port to - `vnc_port_min` and `vnc_port_max` (integer) - The minimum and maximum port
use for VNC access to the virtual machine. The builder uses VNC to type to use for VNC access to the virtual machine. The builder uses VNC to type
the initial `boot_command`. Because Packer generally runs in parallel, Packer the initial `boot_command`. Because Packer generally runs in parallel,
uses a randomly chosen port in this range that appears available. By default Packer uses a randomly chosen port in this range that appears available. By
this is 5900 to 6000. The minimum and maximum ports are inclusive. default this is 5900 to 6000. The minimum and maximum ports are inclusive.
--- ---
layout: "docs" description: |
page_title: "VMware Builder" The VMware Packer builder is able to create VMware virtual machines for use with
description: |- any VMware product.
The VMware Packer builder is able to create VMware virtual machines for use with any VMware product. layout: docs
--- page_title: VMware Builder
...
# VMware Builder # VMware Builder
The VMware Packer builder is able to create VMware virtual machines for use The VMware Packer builder is able to create VMware virtual machines for use with
with any VMware product. any VMware product.
Packer actually comes with multiple builders able to create VMware Packer actually comes with multiple builders able to create VMware machines,
machines, depending on the strategy you want to use to build the image. depending on the strategy you want to use to build the image. Packer supports
Packer supports the following VMware builders: the following VMware builders:
* [vmware-iso](/docs/builders/vmware-iso.html) - Starts from - [vmware-iso](/docs/builders/vmware-iso.html) - Starts from an ISO file,
an ISO file, creates a brand new VMware VM, installs an OS, creates a brand new VMware VM, installs an OS, provisions software within
provisions software within the OS, then exports that machine to create the OS, then exports that machine to create an image. This is best for
an image. This is best for people who want to start from scratch. people who want to start from scratch.
* [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder - [vmware-vmx](/docs/builders/vmware-vmx.html) - This builder imports an
imports an existing VMware machine (from a VMX file), runs provisioners existing VMware machine (from a VMX file), runs provisioners on top of that
on top of that VM, and exports that machine to create an image. VM, and exports that machine to create an image. This is best if you have an
This is best if you have an existing VMware VM you want to use as the existing VMware VM you want to use as the source. As an additional benefit,
source. As an additional benefit, you can feed the artifact of this you can feed the artifact of this builder back into Packer to iterate on
builder back into Packer to iterate on a machine. a machine.
--- ---
layout: "docs" description: |
page_title: "Build - Command-Line" The `packer build` Packer command takes a template and runs all the builds
description: |- within it in order to generate a set of artifacts. The various builds specified
The `packer build` Packer command takes a template and runs all the builds within it in order to generate a set of artifacts. The various builds specified within a template are executed in parallel, unless otherwise specified. And the artifacts that are created will be outputted at the end of the build. within a template are executed in parallel, unless otherwise specified. And the
--- artifacts that are created will be outputted at the end of the build.
layout: docs
page_title: 'Build - Command-Line'
...
# Command-Line: Build # Command-Line: Build
The `packer build` Packer command takes a template and runs all the builds within The `packer build` Packer command takes a template and runs all the builds
it in order to generate a set of artifacts. The various builds specified within within it in order to generate a set of artifacts. The various builds specified
a template are executed in parallel, unless otherwise specified. And the within a template are executed in parallel, unless otherwise specified. And the
artifacts that are created will be outputted at the end of the build. artifacts that are created will be outputted at the end of the build.
## Options ## Options
* `-color=false` - Disables colorized output. Enabled by default. - `-color=false` - Disables colorized output. Enabled by default.
* `-debug` - Disables parallelization and enables debug mode. Debug mode flags - `-debug` - Disables parallelization and enables debug mode. Debug mode flags
the builders that they should output debugging information. The exact behavior the builders that they should output debugging information. The exact
of debug mode is left to the builder. In general, builders usually will stop behavior of debug mode is left to the builder. In general, builders usually
between each step, waiting for keyboard input before continuing. This will allow will stop between each step, waiting for keyboard input before continuing.
the user to inspect state and so on. This will allow the user to inspect state and so on.
* `-except=foo,bar,baz` - Builds all the builds except those with the given - `-except=foo,bar,baz` - Builds all the builds except those with the given
comma-separated names. Build names by default are the names of their builders, comma-separated names. Build names by default are the names of their
unless a specific `name` attribute is specified within the configuration. builders, unless a specific `name` attribute is specified within
the configuration.
* `-force` - Forces a builder to run when artifacts from a previous build prevent
a build from running. The exact behavior of a forced build is left to the builder. - `-force` - Forces a builder to run when artifacts from a previous build
In general, a builder supporting the forced build will remove the artifacts from prevent a build from running. The exact behavior of a forced build is left
the previous build. This will allow the user to repeat a build without having to to the builder. In general, a builder supporting the forced build will
manually clean these artifacts beforehand. remove the artifacts from the previous build. This will allow the user to
repeat a build without having to manually clean these artifacts beforehand.
* `-only=foo,bar,baz` - Only build the builds with the given comma-separated
names. Build names by default are the names of their builders, unless a - `-only=foo,bar,baz` - Only build the builds with the given
specific `name` attribute is specified within the configuration. comma-separated names. Build names by default are the names of their
builders, unless a specific `name` attribute is specified within
the configuration.
--- ---
layout: "docs" description: |
page_title: "Fix - Command-Line" The `packer fix` Packer command takes a template and finds backwards
description: |- incompatible parts of it and brings it up to date so it can be used with the
The `packer fix` Packer command takes a template and finds backwards incompatible parts of it and brings it up to date so it can be used with the latest version of Packer. After you update to a new Packer release, you should run the fix command to make sure your templates work with the new release. latest version of Packer. After you update to a new Packer release, you should
--- run the fix command to make sure your templates work with the new release.
layout: docs
page_title: 'Fix - Command-Line'
...
# Command-Line: Fix # Command-Line: Fix
The `packer fix` Packer command takes a template and finds backwards incompatible The `packer fix` Packer command takes a template and finds backwards
parts of it and brings it up to date so it can be used with the latest version incompatible parts of it and brings it up to date so it can be used with the
of Packer. After you update to a new Packer release, you should run the latest version of Packer. After you update to a new Packer release, you should
fix command to make sure your templates work with the new release. run the fix command to make sure your templates work with the new release.
The fix command will output the changed template to standard out, so you The fix command will output the changed template to standard out, so you should
should redirect standard using standard OS-specific techniques if you want to redirect standard using standard OS-specific techniques if you want to save it
save it to a file. For example, on Linux systems, you may want to do this: to a file. For example, on Linux systems, you may want to do this:
``` \$ packer fix old.json &gt; new.json
$ packer fix old.json > new.json
```
If fixing fails for any reason, the fix command will exit with a non-zero If fixing fails for any reason, the fix command will exit with a non-zero exit
exit status. Error messages appear on standard error, so if you're redirecting status. Error messages appear on standard error, so if you're redirecting
output, you'll still see error messages. output, you'll still see error messages.
-> **Even when Packer fix doesn't do anything** to the template, -&gt; **Even when Packer fix doesn't do anything** to the template, the template
the template will be outputted to standard out. Things such as configuration will be outputted to standard out. Things such as configuration key ordering and
key ordering and indentation may be changed. The output format however, is indentation may be changed. The output format however, is pretty-printed for
pretty-printed for human readability. human readability.
The full list of fixes that the fix command performs is visible in the The full list of fixes that the fix command performs is visible in the help
help output, which can be seen via `packer fix -h`. output, which can be seen via `packer fix -h`.
--- ---
layout: "docs" description: |
page_title: "Inspect - Command-Line" The `packer inspect` Packer command takes a template and outputs the various
description: |- components a template defines. This can help you quickly learn about a template
The `packer inspect` Packer command takes a template and outputs the various components a template defines. This can help you quickly learn about a template without having to dive into the JSON itself. The command will tell you things like what variables a template accepts, the builders it defines, the provisioners it defines and the order they'll run, and more. without having to dive into the JSON itself. The command will tell you things
--- like what variables a template accepts, the builders it defines, the
provisioners it defines and the order they'll run, and more.
layout: docs
page_title: 'Inspect - Command-Line'
...
# Command-Line: Inspect # Command-Line: Inspect
The `packer inspect` Packer command takes a template and outputs the various components The `packer inspect` Packer command takes a template and outputs the various
a template defines. This can help you quickly learn about a template without components a template defines. This can help you quickly learn about a template
having to dive into the JSON itself. without having to dive into the JSON itself. The command will tell you things
The command will tell you things like what variables a template accepts, like what variables a template accepts, the builders it defines, the
the builders it defines, the provisioners it defines and the order they'll provisioners it defines and the order they'll run, and more.
run, and more.
This command is extra useful when used with This command is extra useful when used with [machine-readable
[machine-readable output](/docs/command-line/machine-readable.html) enabled. output](/docs/command-line/machine-readable.html) enabled. The command outputs
The command outputs the components in a way that is parseable by machines. the components in a way that is parseable by machines.
The command doesn't validate the actual configuration of the various The command doesn't validate the actual configuration of the various components
components (that is what the `validate` command is for), but it will (that is what the `validate` command is for), but it will validate the syntax of
validate the syntax of your template by necessity. your template by necessity.
## Usage Example ## Usage Example
Given a basic template, here is an example of what the output might Given a basic template, here is an example of what the output might look like:
look like:
```text ``` {.text}
$ packer inspect template.json $ packer inspect template.json
Variables and their defaults: Variables and their defaults:
......
--- ---
layout: "docs" description: |
page_title: "Packer Command-Line" Packer is controlled using a command-line interface. All interaction with Packer
description: |- is done via the `packer` tool. Like many other command-line tools, the `packer`
Packer is controlled using a command-line interface. All interaction with Packer is done via the `packer` tool. Like many other command-line tools, the `packer` tool takes a subcommand to execute, and that subcommand may have additional options as well. Subcommands are executed with `packer SUBCOMMAND`, where "SUBCOMMAND" is obviously the actual command you wish to execute. tool takes a subcommand to execute, and that subcommand may have additional
--- options as well. Subcommands are executed with `packer SUBCOMMAND`, where
"SUBCOMMAND" is obviously the actual command you wish to execute.
layout: docs
page_title: 'Packer Command-Line'
...
# Packer Command-Line # Packer Command-Line
Packer is controlled using a command-line interface. All interaction with Packer is controlled using a command-line interface. All interaction with Packer
Packer is done via the `packer` tool. Like many other command-line tools, is done via the `packer` tool. Like many other command-line tools, the `packer`
the `packer` tool takes a subcommand to execute, and that subcommand may tool takes a subcommand to execute, and that subcommand may have additional
have additional options as well. Subcommands are executed with options as well. Subcommands are executed with `packer SUBCOMMAND`, where
`packer SUBCOMMAND`, where "SUBCOMMAND" is obviously the actual command you wish "SUBCOMMAND" is obviously the actual command you wish to execute.
to execute.
If you run `packer` by itself, help will be displayed showing all available If you run `packer` by itself, help will be displayed showing all available
subcommands and a brief synopsis of what they do. In addition to this, you can subcommands and a brief synopsis of what they do. In addition to this, you can
run any `packer` command with the `-h` flag to output more detailed help for run any `packer` command with the `-h` flag to output more detailed help for a
a specific subcommand. specific subcommand.
In addition to the documentation available on the command-line, each command In addition to the documentation available on the command-line, each command is
is documented on this website. You can find the documentation for a specific documented on this website. You can find the documentation for a specific
subcommand using the navigation to the left. subcommand using the navigation to the left.
--- ---
layout: "docs" description: |
page_title: "Machine-Readable Output - Command-Line" By default, the output of Packer is very human-readable. It uses nice
description: |- formatting, spacing, and colors in order to make Packer a pleasure to use.
By default, the output of Packer is very human-readable. It uses nice formatting, spacing, and colors in order to make Packer a pleasure to use. However, Packer was built with automation in mind. To that end, Packer supports a fully machine-readable output setting, allowing you to use Packer in automated environments. However, Packer was built with automation in mind. To that end, Packer supports
--- a fully machine-readable output setting, allowing you to use Packer in automated
environments.
layout: docs
page_title: 'Machine-Readable Output - Command-Line'
...
# Machine-Readable Output # Machine-Readable Output
By default, the output of Packer is very human-readable. It uses nice By default, the output of Packer is very human-readable. It uses nice
formatting, spacing, and colors in order to make Packer a pleasure to use. formatting, spacing, and colors in order to make Packer a pleasure to use.
However, Packer was built with automation in mind. To that end, Packer However, Packer was built with automation in mind. To that end, Packer supports
supports a fully machine-readable output setting, allowing you to use a fully machine-readable output setting, allowing you to use Packer in automated
Packer in automated environments. environments.
The machine-readable output format is easy to use and read and was made The machine-readable output format is easy to use and read and was made with
with Unix tools in mind, so it is awk/sed/grep/etc. friendly. Unix tools in mind, so it is awk/sed/grep/etc. friendly.
## Enabling ## Enabling
The machine-readable output format can be enabled by passing the The machine-readable output format can be enabled by passing the
`-machine-readable` flag to any Packer command. This immediately enables `-machine-readable` flag to any Packer command. This immediately enables all
all output to become machine-readable on stdout. Logging, if enabled, output to become machine-readable on stdout. Logging, if enabled, continues to
continues to appear on stderr. An example of the output is shown appear on stderr. An example of the output is shown below:
below:
```text ``` {.text}
$ packer -machine-readable version $ packer -machine-readable version
1376289459,,version,0.2.4 1376289459,,version,0.2.4
1376289459,,version-prerelease, 1376289459,,version-prerelease,
...@@ -32,54 +35,52 @@ $ packer -machine-readable version ...@@ -32,54 +35,52 @@ $ packer -machine-readable version
1376289459,,ui,say,Packer v0.2.4.dev (eed6ece+CHANGES) 1376289459,,ui,say,Packer v0.2.4.dev (eed6ece+CHANGES)
``` ```
The format will be covered in more detail later. But as you can see, The format will be covered in more detail later. But as you can see, the output
the output immediately becomes machine-friendly. Try some other commands immediately becomes machine-friendly. Try some other commands with the
with the `-machine-readable` flag to see! `-machine-readable` flag to see!
## Format ## Format
The machine readable format is a line-oriented, comma-delimited text The machine readable format is a line-oriented, comma-delimited text format.
format. This makes it extremely easy to parse using standard Unix tools such This makes it extremely easy to parse using standard Unix tools such as awk or
as awk or grep in addition to full programming languages like Ruby or grep in addition to full programming languages like Ruby or Python.
Python.
The format is: The format is:
```text ``` {.text}
timestamp,target,type,data... timestamp,target,type,data...
``` ```
Each component is explained below: Each component is explained below:
* **timestamp** is a Unix timestamp in UTC of when the message was - **timestamp** is a Unix timestamp in UTC of when the message was printed.
printed.
* **target** is the target of the following output. This is empty if - **target** is the target of the following output. This is empty if the
the message is related to Packer globally. Otherwise, this is generally message is related to Packer globally. Otherwise, this is generally a build
a build name so you can relate output to a specific build while parallel name so you can relate output to a specific build while parallel builds
builds are running. are running.
* **type** is the type of machine-readable message being outputted. There - **type** is the type of machine-readable message being outputted. There are
are a set of standard types which are covered later, but each component a set of standard types which are covered later, but each component of
of Packer (builders, provisioners, etc.) may output their own custom types Packer (builders, provisioners, etc.) may output their own custom types as
as well, allowing the machine-readable output to be infinitely flexible. well, allowing the machine-readable output to be infinitely flexible.
* **data** is zero or more comma-seperated values associated with the prior - **data** is zero or more comma-seperated values associated with the
type. The exact amount and meaning of this data is type-dependent, so you prior type. The exact amount and meaning of this data is type-dependent, so
must read the documentation associated with the type to understand fully. you must read the documentation associated with the type to
understand fully.
Within the format, if data contains a comma, it is replaced with Within the format, if data contains a comma, it is replaced with
`%!(PACKER_COMMA)`. This was preferred over an escape character such as `%!(PACKER_COMMA)`. This was preferred over an escape character such as `\'`
`\'` because it is more friendly to tools like awk. because it is more friendly to tools like awk.
Newlines within the format are replaced with their respective standard Newlines within the format are replaced with their respective standard escape
escape sequence. Newlines become a literal `\n` within the output. Carriage sequence. Newlines become a literal `\n` within the output. Carriage returns
returns become a literal `\r`. become a literal `\r`.
## Message Types ## Message Types
The set of machine-readable message types can be found in the The set of machine-readable message types can be found in the [machine-readable
[machine-readable format](/docs/machine-readable/index.html) format](/docs/machine-readable/index.html) complete documentation section. This
complete documentation section. This section contains documentation section contains documentation on all the message types exposed by Packer core
on all the message types exposed by Packer core as well as all the as well as all the components that ship with Packer by default.
components that ship with Packer by default.
--- ---
layout: "docs" description: |
page_title: "Push - Command-Line" The `packer push` Packer command takes a template and pushes it to a build
description: |- service that will automatically build this Packer template.
The `packer push` Packer command takes a template and pushes it to a build service that will automatically build this Packer template. layout: docs
--- page_title: 'Push - Command-Line'
...
# Command-Line: Push # Command-Line: Push
...@@ -16,36 +17,36 @@ External build services such as HashiCorp's Atlas make it easy to iterate on ...@@ -16,36 +17,36 @@ External build services such as HashiCorp's Atlas make it easy to iterate on
Packer templates, especially when the builder you are running may not be easily Packer templates, especially when the builder you are running may not be easily
accessable (such as developing `qemu` builders on Mac or Windows). accessable (such as developing `qemu` builders on Mac or Windows).
!> The Packer build service will receive the raw copy of your Packer template !&gt; The Packer build service will receive the raw copy of your Packer template
when you push. **If you have sensitive data in your Packer template, you should when you push. **If you have sensitive data in your Packer template, you should
move that data into Packer variables or environment variables!** move that data into Packer variables or environment variables!**
For the `push` command to work, the [push configuration](/docs/templates/push.html) For the `push` command to work, the [push
must be completed within the template. configuration](/docs/templates/push.html) must be completed within the template.
## Options ## Options
* `-message` - A message to identify the purpose or changes in this Packer - `-message` - A message to identify the purpose or changes in this Packer
template much like a VCS commit message. This message will be passed to the template much like a VCS commit message. This message will be passed to the
Packer build service. This option is also available as a short option `-m`. Packer build service. This option is also available as a short option `-m`.
* `-token` - An access token for authenticating the push to the Packer build - `-token` - An access token for authenticating the push to the Packer build
service such as Atlas. This can also be specified within the push service such as Atlas. This can also be specified within the push
configuration in the template. configuration in the template.
* `-name` - The name of the build in the service. This typically - `-name` - The name of the build in the service. This typically looks like
looks like `hashicorp/precise64`. `hashicorp/precise64`.
## Examples ## Examples
Push a Packer template: Push a Packer template:
```shell ``` {.shell}
$ packer push -m "Updating the apache version" template.json $ packer push -m "Updating the apache version" template.json
``` ```
Push a Packer template with a custom token: Push a Packer template with a custom token:
```shell ``` {.shell}
$ packer push -token ABCD1234 template.json $ packer push -token ABCD1234 template.json
``` ```
--- ---
layout: "docs" description: |
page_title: "Validate - Command-Line" The `packer validate` Packer command is used to validate the syntax and
description: |- configuration of a template. The command will return a zero exit status on
The `packer validate` Packer command is used to validate the syntax and configuration of a template. The command will return a zero exit status on success, and a non-zero exit status on failure. Additionally, if a template doesn't validate, any error messages will be outputted. success, and a non-zero exit status on failure. Additionally, if a template
--- doesn't validate, any error messages will be outputted.
layout: docs
page_title: 'Validate - Command-Line'
...
# Command-Line: Validate # Command-Line: Validate
The `packer validate` Packer command is used to validate the syntax and configuration The `packer validate` Packer command is used to validate the syntax and
of a [template](/docs/templates/introduction.html). The command will return configuration of a [template](/docs/templates/introduction.html). The command
a zero exit status on success, and a non-zero exit status on failure. Additionally, will return a zero exit status on success, and a non-zero exit status on
if a template doesn't validate, any error messages will be outputted. failure. Additionally, if a template doesn't validate, any error messages will
be outputted.
Example usage: Example usage:
```text ``` {.text}
$ packer validate my-template.json $ packer validate my-template.json
Template validation failed. Errors are shown below. Template validation failed. Errors are shown below.
...@@ -25,5 +29,5 @@ Errors validating build 'vmware'. 1 error(s) occurred: ...@@ -25,5 +29,5 @@ Errors validating build 'vmware'. 1 error(s) occurred:
## Options ## Options
* `-syntax-only` - Only the syntax of the template is checked. The configuration - `-syntax-only` - Only the syntax of the template is checked. The
is not validated. configuration is not validated.
--- ---
layout: "docs" description: |
page_title: "Custom Builder - Extend Packer" Packer Builders are the components of Packer responsible for creating a machine,
description: |- bringing it to a point where it can be provisioned, and then turning that
Packer Builders are the components of Packer responsible for creating a machine, bringing it to a point where it can be provisioned, and then turning that provisioned machine into some sort of machine image. Several builders are officially distributed with Packer itself, such as the AMI builder, the VMware builder, etc. However, it is possible to write custom builders using the Packer plugin interface, and this page documents how to do that. provisioned machine into some sort of machine image. Several builders are
--- officially distributed with Packer itself, such as the AMI builder, the VMware
builder, etc. However, it is possible to write custom builders using the Packer
plugin interface, and this page documents how to do that.
layout: docs
page_title: 'Custom Builder - Extend Packer'
...
# Custom Builder Development # Custom Builder Development
Packer Builders are the components of Packer responsible for creating a machine, Packer Builders are the components of Packer responsible for creating a machine,
bringing it to a point where it can be provisioned, and then turning bringing it to a point where it can be provisioned, and then turning that
that provisioned machine into some sort of machine image. Several builders provisioned machine into some sort of machine image. Several builders are
are officially distributed with Packer itself, such as the AMI builder, the officially distributed with Packer itself, such as the AMI builder, the VMware
VMware builder, etc. However, it is possible to write custom builders using builder, etc. However, it is possible to write custom builders using the Packer
the Packer plugin interface, and this page documents how to do that. plugin interface, and this page documents how to do that.
Prior to reading this page, it is assumed you have read the page on Prior to reading this page, it is assumed you have read the page on [plugin
[plugin development basics](/docs/extend/developing-plugins.html). development basics](/docs/extend/developing-plugins.html).
~> **Warning!** This is an advanced topic. If you're new to Packer, we \~&gt; **Warning!** This is an advanced topic. If you're new to Packer, we
recommend getting a bit more comfortable before you dive into writing plugins. recommend getting a bit more comfortable before you dive into writing plugins.
## The Interface ## The Interface
The interface that must be implemented for a builder is the `packer.Builder` The interface that must be implemented for a builder is the `packer.Builder`
interface. It is reproduced below for easy reference. The actual interface interface. It is reproduced below for easy reference. The actual interface in
in the source code contains some basic documentation as well explaining the source code contains some basic documentation as well explaining what each
what each method should do. method should do.
```go ``` {.go}
type Builder interface { type Builder interface {
Prepare(...interface{}) error Prepare(...interface{}) error
Run(ui Ui, hook Hook, cache Cache) (Artifact, error) Run(ui Ui, hook Hook, cache Cache) (Artifact, error)
Cancel() Cancel()
} }
``` ```
### The "Prepare" Method ### The "Prepare" Method
The `Prepare` method for each builder is called prior to any runs with The `Prepare` method for each builder is called prior to any runs with the
the configuration that was given in the template. This is passed in as configuration that was given in the template. This is passed in as an array of
an array of `interface{}` types, but is generally `map[string]interface{}`. The prepare `interface{}` types, but is generally `map[string]interface{}`. The prepare
method is responsible for translating this configuration into an internal method is responsible for translating this configuration into an internal
structure, validating it, and returning any errors. structure, validating it, and returning any errors.
For multiple parameters, they should be merged together into the final For multiple parameters, they should be merged together into the final
configuration, with later parameters overwriting any previous configuration. configuration, with later parameters overwriting any previous configuration. The
The exact semantics of the merge are left to the builder author. exact semantics of the merge are left to the builder author.
For decoding the `interface{}` into a meaningful structure, the For decoding the `interface{}` into a meaningful structure, the
[mapstructure](https://github.com/mitchellh/mapstructure) library is recommended. [mapstructure](https://github.com/mitchellh/mapstructure) library is
Mapstructure will take an `interface{}` and decode it into an arbitrarily recommended. Mapstructure will take an `interface{}` and decode it into an
complex struct. If there are any errors, it generates very human friendly arbitrarily complex struct. If there are any errors, it generates very human
errors that can be returned directly from the prepare method. friendly errors that can be returned directly from the prepare method.
While it is not actively enforced, **no side effects** should occur from While it is not actively enforced, **no side effects** should occur from running
running the `Prepare` method. Specifically, don't create files, don't launch the `Prepare` method. Specifically, don't create files, don't launch virtual
virtual machines, etc. Prepare's purpose is solely to configure the builder machines, etc. Prepare's purpose is solely to configure the builder and validate
and validate the configuration. the configuration.
In addition to normal configuration, Packer will inject a `map[string]interface{}` In addition to normal configuration, Packer will inject a
with a key of `packer.DebugConfigKey` set to boolean `true` if debug mode `map[string]interface{}` with a key of `packer.DebugConfigKey` set to boolean
is enabled for the build. If this is set to true, then the builder `true` if debug mode is enabled for the build. If this is set to true, then the
should enable a debug mode which assists builder developers and advanced builder should enable a debug mode which assists builder developers and advanced
users to introspect what is going on during a build. During debug users to introspect what is going on during a build. During debug builds,
builds, parallelism is strictly disabled, so it is safe to request input parallelism is strictly disabled, so it is safe to request input from stdin and
from stdin and so on. so on.
### The "Run" Method ### The "Run" Method
`Run` is where all the interesting stuff happens. Run is executed, often `Run` is where all the interesting stuff happens. Run is executed, often in
in parallel for multiple builders, to actually build the machine, provision parallel for multiple builders, to actually build the machine, provision it, and
it, and create the resulting machine image, which is returned as an create the resulting machine image, which is returned as an implementation of
implementation of the `packer.Artifact` interface. the `packer.Artifact` interface.
The `Run` method takes three parameters. These are all very useful. The The `Run` method takes three parameters. These are all very useful. The
`packer.Ui` object is used to send output to the console. `packer.Hook` is `packer.Ui` object is used to send output to the console. `packer.Hook` is used
used to execute hooks, which are covered in more detail in the hook section to execute hooks, which are covered in more detail in the hook section below.
below. And `packer.Cache` is used to store files between multiple Packer And `packer.Cache` is used to store files between multiple Packer runs, and is
runs, and is covered in more detail in the cache section below. covered in more detail in the cache section below.
Because builder runs are typically a complex set of many steps, the Because builder runs are typically a complex set of many steps, the
[multistep](https://github.com/mitchellh/multistep) library is recommended [multistep](https://github.com/mitchellh/multistep) library is recommended to
to bring order to the complexity. Multistep is a library which allows you to bring order to the complexity. Multistep is a library which allows you to
separate your logic into multiple distinct "steps" and string them together. separate your logic into multiple distinct "steps" and string them together. It
It fully supports cancellation mid-step and so on. Please check it out, it is fully supports cancellation mid-step and so on. Please check it out, it is how
how the built-in builders are all implemented. the built-in builders are all implemented.
Finally, as a result of `Run`, an implementation of `packer.Artifact` should Finally, as a result of `Run`, an implementation of `packer.Artifact` should be
be returned. More details on creating a `packer.Artifact` are covered in the returned. More details on creating a `packer.Artifact` are covered in the
artifact section below. If something goes wrong during the build, an error artifact section below. If something goes wrong during the build, an error can
can be returned, as well. Note that it is perfectly fine to produce no artifact be returned, as well. Note that it is perfectly fine to produce no artifact and
and no error, although this is rare. no error, although this is rare.
### The "Cancel" Method ### The "Cancel" Method
The `Run` method is often run in parallel. The `Cancel` method can be The `Run` method is often run in parallel. The `Cancel` method can be called at
called at any time and requests cancellation of any builder run in progress. any time and requests cancellation of any builder run in progress. This method
This method should block until the run actually stops. should block until the run actually stops.
Cancels are most commonly triggered by external interrupts, such as the Cancels are most commonly triggered by external interrupts, such as the user
user pressing `Ctrl-C`. Packer will only exit once all the builders clean up, pressing `Ctrl-C`. Packer will only exit once all the builders clean up, so it
so it is important that you architect your builder in a way that it is quick is important that you architect your builder in a way that it is quick to
to respond to these cancellations and clean up after itself. respond to these cancellations and clean up after itself.
## Creating an Artifact ## Creating an Artifact
The `Run` method is expected to return an implementation of the The `Run` method is expected to return an implementation of the
`packer.Artifact` interface. Each builder must create their own `packer.Artifact` interface. Each builder must create their own implementation.
implementation. The interface is very simple and the documentation on the The interface is very simple and the documentation on the interface is quite
interface is quite clear. clear.
The only part of an artifact that may be confusing is the `BuilderId` The only part of an artifact that may be confusing is the `BuilderId` method.
method. This method must return an absolutely unique ID for the builder. This method must return an absolutely unique ID for the builder. In general, I
In general, I follow the practice of making the ID contain my GitHub username follow the practice of making the ID contain my GitHub username and then the
and then the platform it is building for. For example, the builder ID of platform it is building for. For example, the builder ID of the VMware builder
the VMware builder is "mitchellh.vmware" or something similar. is "mitchellh.vmware" or something similar.
Post-processors use the builder ID value in order to make some assumptions Post-processors use the builder ID value in order to make some assumptions about
about the artifact results, so it is important it never changes. the artifact results, so it is important it never changes.
Other than the builder ID, the rest should be self-explanatory by reading Other than the builder ID, the rest should be self-explanatory by reading the
the [packer.Artifact interface documentation](#). [packer.Artifact interface documentation](#).
## Provisioning ## Provisioning
Packer has built-in support for provisioning, but the moment when provisioning Packer has built-in support for provisioning, but the moment when provisioning
runs must be invoked by the builder itself, since only the builder knows runs must be invoked by the builder itself, since only the builder knows when
when the machine is running and ready for communication. the machine is running and ready for communication.
When the machine is ready to be provisioned, run the `packer.HookProvision` When the machine is ready to be provisioned, run the `packer.HookProvision`
hook, making sure the communicator is not nil, since this is required for hook, making sure the communicator is not nil, since this is required for
provisioners. An example of calling the hook is shown below: provisioners. An example of calling the hook is shown below:
```go ``` {.go}
hook.Run(packer.HookProvision, ui, comm, nil) hook.Run(packer.HookProvision, ui, comm, nil)
``` ```
At this point, Packer will run the provisioners and no additional work At this point, Packer will run the provisioners and no additional work is
is necessary. necessary.
-> **Note:** Hooks are still undergoing thought around their -&gt; **Note:** Hooks are still undergoing thought around their general design
general design and will likely change in a future version. They aren't and will likely change in a future version. They aren't fully "baked" yet, so
fully "baked" yet, so they aren't documented here other than to tell you they aren't documented here other than to tell you how to hook in provisioners.
how to hook in provisioners.
## Caching Files ## Caching Files
It is common for some builders to deal with very large files, or files that It is common for some builders to deal with very large files, or files that take
take a long time to generate. For example, the VMware builder has the capability a long time to generate. For example, the VMware builder has the capability to
to download the operating system ISO from the internet. This is timely process, download the operating system ISO from the internet. This is timely process, so
so it would be convenient to cache the file. This sort of caching is a core it would be convenient to cache the file. This sort of caching is a core part of
part of Packer that is exposed to builders. Packer that is exposed to builders.
The cache interface is `packer.Cache`. It behaves much like a Go The cache interface is `packer.Cache`. It behaves much like a Go
[RWMutex](http://golang.org/pkg/sync/#RWMutex). The builder requests a "lock" [RWMutex](http://golang.org/pkg/sync/#RWMutex). The builder requests a "lock" on
on certain cache keys, and is given exclusive access to that key for the certain cache keys, and is given exclusive access to that key for the duration
duration of the lock. This locking mechanism allows multiple builders to of the lock. This locking mechanism allows multiple builders to share cache data
share cache data even though they're running in parallel. even though they're running in parallel.
For example, both the VMware and VirtualBox builders support downloading an For example, both the VMware and VirtualBox builders support downloading an
operating system ISO from the internet. Most of the time, this ISO is identical. operating system ISO from the internet. Most of the time, this ISO is identical.
The locking mechanisms of the cache allow one of the builders to download it The locking mechanisms of the cache allow one of the builders to download it
only once, but allow both builders to share the downloaded file. only once, but allow both builders to share the downloaded file.
The [documentation for packer.Cache](#) is The [documentation for packer.Cache](#) is very detailed in how it works.
very detailed in how it works.
--- ---
layout: "docs" description: |
page_title: "Custom Command Development" Packer Commands are the components of Packer that add functionality to the
description: |- `packer` application. Packer comes with a set of commands out of the box, such
Packer Commands are the components of Packer that add functionality to the `packer` application. Packer comes with a set of commands out of the box, such as `build`. Commands are invoked as `packer <COMMAND>`. Custom commands allow you to add new commands to Packer to perhaps perform new functionality. as `build`. Commands are invoked as `packer <COMMAND>`. Custom commands allow
--- you to add new commands to Packer to perhaps perform new functionality.
layout: docs
page_title: Custom Command Development
...
# Custom Command Development # Custom Command Development
Packer Commands are the components of Packer that add functionality to the Packer Commands are the components of Packer that add functionality to the
`packer` application. Packer comes with a set of commands out of the `packer` application. Packer comes with a set of commands out of the box, such
box, such as `build`. Commands are invoked as `packer <COMMAND>`. as `build`. Commands are invoked as `packer <COMMAND>`. Custom commands allow
Custom commands allow you to add new commands to Packer to perhaps you to add new commands to Packer to perhaps perform new functionality.
perform new functionality.
Prior to reading this page, it is assumed you have read the page on Prior to reading this page, it is assumed you have read the page on [plugin
[plugin development basics](/docs/extend/developing-plugins.html). development basics](/docs/extend/developing-plugins.html).
Command plugins implement the `packer.Command` interface and are served Command plugins implement the `packer.Command` interface and are served using
using the `plugin.ServeCommand` function. Commands actually have no control the `plugin.ServeCommand` function. Commands actually have no control over what
over what keyword invokes the command with the `packer` binary. The keyword keyword invokes the command with the `packer` binary. The keyword to invoke the
to invoke the command depends on how the plugin is installed and configured command depends on how the plugin is installed and configured in the core Packer
in the core Packer configuration. configuration.
~> **Warning!** This is an advanced topic. If you're new to Packer, we \~&gt; **Warning!** This is an advanced topic. If you're new to Packer, we
recommend getting a bit more comfortable before you dive into writing plugins. recommend getting a bit more comfortable before you dive into writing plugins.
## The Interface ## The Interface
The interface that must be implemented for a command is the `packer.Command` The interface that must be implemented for a command is the `packer.Command`
interface. It is reproduced below for easy reference. The actual interface interface. It is reproduced below for easy reference. The actual interface in
in the source code contains some basic documentation as well explaining the source code contains some basic documentation as well explaining what each
what each method should do. method should do.
```go ``` {.go}
type Command interface { type Command interface {
Help() string Help() string
Run(env Environment, args []string) int Run(env Environment, args []string) int
Synopsis() string Synopsis() string
} }
``` ```
### The "Help" Method ### The "Help" Method
The `Help` method returns long-form help. This help is most commonly The `Help` method returns long-form help. This help is most commonly shown when
shown when a command is invoked with the `--help` or `-h` option. a command is invoked with the `--help` or `-h` option. The help should document
The help should document all the available command line flags, purpose all the available command line flags, purpose of the command, etc.
of the command, etc.
Packer commands generally follow the following format for help, but Packer commands generally follow the following format for help, but it is not
it is not required. You're allowed to make the help look like anything required. You're allowed to make the help look like anything you please.
you please.
```text ``` {.text}
Usage: packer COMMAND [options] ARGS... Usage: packer COMMAND [options] ARGS...
Brief one or two sentence about the function of the command. Brief one or two sentence about the function of the command.
...@@ -64,23 +64,23 @@ Options: ...@@ -64,23 +64,23 @@ Options:
### The "Run" Method ### The "Run" Method
`Run` is what is called when the command is actually invoked. It is given `Run` is what is called when the command is actually invoked. It is given the
the `packer.Environment`, which has access to almost all components of `packer.Environment`, which has access to almost all components of the current
the current Packer run, such as UI, builders, other plugins, etc. In addition Packer run, such as UI, builders, other plugins, etc. In addition to the
to the environment, the remaining command line args are given. These command environment, the remaining command line args are given. These command line args
line args have already been stripped of the command name, so they can be have already been stripped of the command name, so they can be passed directly
passed directly into something like the standard Go `flag` package for into something like the standard Go `flag` package for command-line flag
command-line flag parsing. parsing.
The return value of `Run` is the exit status for the command. If everything The return value of `Run` is the exit status for the command. If everything ran
ran successfully, this should be 0. If any errors occurred, it should be any successfully, this should be 0. If any errors occurred, it should be any
positive integer. positive integer.
### The "Synopsis" Method ### The "Synopsis" Method
The `Synopsis` method should return a short single-line description The `Synopsis` method should return a short single-line description of what the
of what the command does. This is used when `packer` is invoked on its own command does. This is used when `packer` is invoked on its own in order to show
in order to show a brief summary of the commands that Packer supports. a brief summary of the commands that Packer supports.
The synopsis should be no longer than around 50 characters, since it is The synopsis should be no longer than around 50 characters, since it is already
already appearing on a line with other text. appearing on a line with other text.
--- ---
layout: "docs" description: |
page_title: "Developing Plugins" This page will document how you can develop your own Packer plugins. Prior to
description: |- reading this, it is assumed that you're comfortable with Packer and also know
This page will document how you can develop your own Packer plugins. Prior to reading this, it is assumed that you're comfortable with Packer and also know the basics of how Plugins work, from a user standpoint. the basics of how Plugins work, from a user standpoint.
--- layout: docs
page_title: Developing Plugins
...
# Developing Plugins # Developing Plugins
This page will document how you can develop your own Packer plugins. This page will document how you can develop your own Packer plugins. Prior to
Prior to reading this, it is assumed that you're comfortable with Packer reading this, it is assumed that you're comfortable with Packer and also know
and also know the [basics of how Plugins work](/docs/extend/plugins.html), the [basics of how Plugins work](/docs/extend/plugins.html), from a user
from a user standpoint. standpoint.
Packer plugins must be written in [Go](http://golang.org/), so it is also Packer plugins must be written in [Go](http://golang.org/), so it is also
assumed that you're familiar with the language. This page will not be a assumed that you're familiar with the language. This page will not be a Go
Go language tutorial. Thankfully, if you are familiar with Go, the Go toolchain language tutorial. Thankfully, if you are familiar with Go, the Go toolchain
makes it extremely easy to develop Packer plugins. makes it extremely easy to develop Packer plugins.
~> **Warning!** This is an advanced topic. If you're new to Packer, we \~&gt; **Warning!** This is an advanced topic. If you're new to Packer, we
recommend getting a bit more comfortable before you dive into writing plugins. recommend getting a bit more comfortable before you dive into writing plugins.
## Plugin System Architecture ## Plugin System Architecture
Packer has a fairly unique plugin architecture. Instead of loading plugins Packer has a fairly unique plugin architecture. Instead of loading plugins
directly into a running application, Packer runs each plugin as a directly into a running application, Packer runs each plugin as a *separate
_separate application_. Inter-process communication and RPC is then used application*. Inter-process communication and RPC is then used to communicate
to communicate between the many running Packer processes. Packer core between the many running Packer processes. Packer core itself is responsible for
itself is responsible for orchestrating the processes and handles cleanup. orchestrating the processes and handles cleanup.
The beauty of this is that your plugin can have any dependencies it wants. The beauty of this is that your plugin can have any dependencies it wants.
Dependencies don't need to line up with what Packer core or any other plugin Dependencies don't need to line up with what Packer core or any other plugin
uses, because they're completely isolated into the process space of the uses, because they're completely isolated into the process space of the plugin
plugin itself. itself.
And, thanks to Go's [interfaces](http://golang.org/doc/effective_go.html#interfaces_and_types), And, thanks to Go's
it doesn't even look like inter-process communication is occurring. You just [interfaces](http://golang.org/doc/effective_go.html#interfaces_and_types), it
use the interfaces like normal, but in fact they're being executed in doesn't even look like inter-process communication is occurring. You just use
a remote process. Pretty cool. the interfaces like normal, but in fact they're being executed in a remote
process. Pretty cool.
## Plugin Development Basics ## Plugin Development Basics
Developing a plugin is quite simple. All the various kinds of plugins Developing a plugin is quite simple. All the various kinds of plugins have a
have a corresponding interface. The plugin simply needs to implement corresponding interface. The plugin simply needs to implement this interface and
this interface and expose it using the Packer plugin package (covered here shortly), expose it using the Packer plugin package (covered here shortly), and that's it!
and that's it!
There are two packages that really matter that every plugin must use. There are two packages that really matter that every plugin must use. Other than
Other than the following two packages, you're encouraged to use whatever the following two packages, you're encouraged to use whatever packages you want.
packages you want. Because plugins are their own processes, there is Because plugins are their own processes, there is no danger of colliding
no danger of colliding dependencies. dependencies.
* `github.com/mitchellh/packer` - Contains all the interfaces that you - `github.com/mitchellh/packer` - Contains all the interfaces that you have to
have to implement for any given plugin. implement for any given plugin.
* `github.com/mitchellh/packer/plugin` - Contains the code to serve the - `github.com/mitchellh/packer/plugin` - Contains the code to serve
plugin. This handles all the inter-process communication stuff. the plugin. This handles all the inter-process communication stuff.
There are two steps involved in creating a plugin: There are two steps involved in creating a plugin:
1. Implement the desired interface. For example, if you're building a 1. Implement the desired interface. For example, if you're building a builder
builder plugin, implement the `packer.Builder` interface. plugin, implement the `packer.Builder` interface.
2. Serve the interface by calling the appropriate plugin serving method 2. Serve the interface by calling the appropriate plugin serving method in your
in your main method. In the case of a builder, this is `plugin.ServeBuilder`. main method. In the case of a builder, this is `plugin.ServeBuilder`.
A basic example is shown below. In this example, assume the `Builder` struct A basic example is shown below. In this example, assume the `Builder` struct
implements the `packer.Builder` interface: implements the `packer.Builder` interface:
```go ``` {.go}
import ( import (
"github.com/mitchellh/packer/plugin" "github.com/mitchellh/packer/plugin"
) )
...@@ -76,40 +78,38 @@ import ( ...@@ -76,40 +78,38 @@ import (
type Builder struct{} type Builder struct{}
func main() { func main() {
plugin.ServeBuilder(new(Builder)) plugin.ServeBuilder(new(Builder))
} }
``` ```
**That's it!** `plugin.ServeBuilder` handles all the nitty gritty of **That's it!** `plugin.ServeBuilder` handles all the nitty gritty of
communicating with Packer core and serving your builder over RPC. It communicating with Packer core and serving your builder over RPC. It can't get
can't get much easier than that. much easier than that.
Next, just build your plugin like a normal Go application, using `go build` Next, just build your plugin like a normal Go application, using `go build` or
or however you please. The resulting binary is the plugin that can be however you please. The resulting binary is the plugin that can be installed
installed using standard installation procedures. using standard installation procedures.
The specifics of how to implement each type of interface are covered The specifics of how to implement each type of interface are covered in the
in the relevant subsections available in the navigation to the left. relevant subsections available in the navigation to the left.
~> **Lock your dependencies!** Unfortunately, Go's dependency \~&gt; **Lock your dependencies!** Unfortunately, Go's dependency management
management story is fairly sad. There are various unofficial methods out story is fairly sad. There are various unofficial methods out there for locking
there for locking dependencies, and using one of them is highly recommended dependencies, and using one of them is highly recommended since the Packer
since the Packer codebase will continue to improve, potentially breaking codebase will continue to improve, potentially breaking APIs along the way until
APIs along the way until there is a stable release. By locking your dependencies, there is a stable release. By locking your dependencies, your plugins will
your plugins will continue to work with the version of Packer you lock to. continue to work with the version of Packer you lock to.
## Logging and Debugging ## Logging and Debugging
Plugins can use the standard Go `log` package to log. Anything logged Plugins can use the standard Go `log` package to log. Anything logged using this
using this will be available in the Packer log files automatically. will be available in the Packer log files automatically. The Packer log is
The Packer log is visible on stderr when the `PACKER_LOG` environmental visible on stderr when the `PACKER_LOG` environmental is set.
is set.
Packer will prefix any logs from plugins with the path to that plugin Packer will prefix any logs from plugins with the path to that plugin to make it
to make it identifiable where the logs come from. Some example logs are identifiable where the logs come from. Some example logs are shown below:
shown below:
```text ``` {.text}
2013/06/10 21:44:43 ui: Available commands are: 2013/06/10 21:44:43 ui: Available commands are:
2013/06/10 21:44:43 Loading command: build 2013/06/10 21:44:43 Loading command: build
2013/06/10 21:44:43 packer-command-build: 2013/06/10 21:44:43 Plugin minimum port: 10000 2013/06/10 21:44:43 packer-command-build: 2013/06/10 21:44:43 Plugin minimum port: 10000
...@@ -117,31 +117,31 @@ shown below: ...@@ -117,31 +117,31 @@ shown below:
2013/06/10 21:44:43 packer-command-build: 2013/06/10 21:44:43 Plugin address: :10000 2013/06/10 21:44:43 packer-command-build: 2013/06/10 21:44:43 Plugin address: :10000
``` ```
As you can see, the log messages from the "build" command plugin are As you can see, the log messages from the "build" command plugin are prefixed
prefixed with "packer-command-build". Log output is _extremely_ helpful with "packer-command-build". Log output is *extremely* helpful in debugging
in debugging issues and you're encouraged to be as verbose as you need to issues and you're encouraged to be as verbose as you need to be in order for the
be in order for the logs to be helpful. logs to be helpful.
## Plugin Development Tips ## Plugin Development Tips
Here are some tips for developing plugins, often answering common questions Here are some tips for developing plugins, often answering common questions or
or concerns. concerns.
### Naming Conventions ### Naming Conventions
It is standard practice to name the resulting plugin application It is standard practice to name the resulting plugin application in the format
in the format of `packer-TYPE-NAME`. For example, if you're building a of `packer-TYPE-NAME`. For example, if you're building a new builder for
new builder for CustomCloud, it would be standard practice to name the CustomCloud, it would be standard practice to name the resulting plugin
resulting plugin `packer-builder-custom-cloud`. This naming convention `packer-builder-custom-cloud`. This naming convention helps users identify the
helps users identify the purpose of a plugin. purpose of a plugin.
### Testing Plugins ### Testing Plugins
While developing plugins, you can configure your Packer configuration While developing plugins, you can configure your Packer configuration to point
to point directly to the compiled plugin in order to test it. For example, directly to the compiled plugin in order to test it. For example, building the
building the CustomCloud plugin, I may configure packer like so: CustomCloud plugin, I may configure packer like so:
```javascript ``` {.javascript}
{ {
"builders": { "builders": {
"custom-cloud": "/an/absolute/path/to/packer-builder-custom-cloud" "custom-cloud": "/an/absolute/path/to/packer-builder-custom-cloud"
...@@ -149,13 +149,13 @@ building the CustomCloud plugin, I may configure packer like so: ...@@ -149,13 +149,13 @@ building the CustomCloud plugin, I may configure packer like so:
} }
``` ```
This would configure Packer to have the "custom-cloud" plugin, and execute This would configure Packer to have the "custom-cloud" plugin, and execute the
the binary that I am building during development. This is extremely useful binary that I am building during development. This is extremely useful during
during development. development.
### Distributing Plugins ### Distributing Plugins
It is recommended you use a tool like [goxc](https://github.com/laher/goxc) It is recommended you use a tool like [goxc](https://github.com/laher/goxc) in
in order to cross-compile your plugin for every platform that Packer supports, order to cross-compile your plugin for every platform that Packer supports,
since Go applications are platform-specific. goxc will allow you to build since Go applications are platform-specific. goxc will allow you to build for
for every platform from your own computer. every platform from your own computer.
--- ---
layout: "docs" description: |
page_title: "Packer Plugins - Extend Packer" Packer Plugins allow new functionality to be added to Packer without modifying
description: |- the core source code. Packer plugins are able to add new commands, builders,
Packer Plugins allow new functionality to be added to Packer without modifying the core source code. Packer plugins are able to add new commands, builders, provisioners, hooks, and more. In fact, much of Packer itself is implemented by writing plugins that are simply distributed with Packer. For example, all the commands, builders, provisioners, and more that ship with Packer are implemented as Plugins that are simply hardcoded to load with Packer. provisioners, hooks, and more. In fact, much of Packer itself is implemented by
--- writing plugins that are simply distributed with Packer. For example, all the
commands, builders, provisioners, and more that ship with Packer are implemented
as Plugins that are simply hardcoded to load with Packer.
layout: docs
page_title: 'Packer Plugins - Extend Packer'
...
# Packer Plugins # Packer Plugins
Packer Plugins allow new functionality to be added to Packer without Packer Plugins allow new functionality to be added to Packer without modifying
modifying the core source code. Packer plugins are able to add new the core source code. Packer plugins are able to add new commands, builders,
commands, builders, provisioners, hooks, and more. In fact, much of Packer provisioners, hooks, and more. In fact, much of Packer itself is implemented by
itself is implemented by writing plugins that are simply distributed with writing plugins that are simply distributed with Packer. For example, all the
Packer. For example, all the commands, builders, provisioners, and more commands, builders, provisioners, and more that ship with Packer are implemented
that ship with Packer are implemented as Plugins that are simply hardcoded as Plugins that are simply hardcoded to load with Packer.
to load with Packer.
This page will cover how to install and use plugins. If you're interested This page will cover how to install and use plugins. If you're interested in
in developing plugins, the documentation for that is available the developing plugins, the documentation for that is available the [developing
[developing plugins](/docs/extend/developing-plugins.html) page. plugins](/docs/extend/developing-plugins.html) page.
Because Packer is so young, there is no official listing of available Because Packer is so young, there is no official listing of available Packer
Packer plugins. Plugins are best found via Google. Typically, searching plugins. Plugins are best found via Google. Typically, searching "packer plugin
"packer plugin _x_" will find what you're looking for if it exists. As *x*" will find what you're looking for if it exists. As Packer gets older, an
Packer gets older, an official plugin directory is planned. official plugin directory is planned.
## How Plugins Work ## How Plugins Work
Packer plugins are completely separate, standalone applications that the Packer plugins are completely separate, standalone applications that the core of
core of Packer starts and communicates with. Packer starts and communicates with.
These plugin applications aren't meant to be run manually. Instead, Packer core executes These plugin applications aren't meant to be run manually. Instead, Packer core
these plugin applications in a certain way and communicates with them. executes these plugin applications in a certain way and communicates with them.
For example, the VMware builder is actually a standalone binary named For example, the VMware builder is actually a standalone binary named
`packer-builder-vmware`. The next time you run a Packer build, look at `packer-builder-vmware`. The next time you run a Packer build, look at your
your process list and you should see a handful of `packer-` prefixed process list and you should see a handful of `packer-` prefixed applications
applications running. running.
## Installing Plugins ## Installing Plugins
The easiest way to install a plugin is to name it correctly, then place The easiest way to install a plugin is to name it correctly, then place it in
it in the proper directory. To name a plugin correctly, make sure the the proper directory. To name a plugin correctly, make sure the binary is named
binary is named `packer-TYPE-NAME`. For example, `packer-builder-amazon-ebs` `packer-TYPE-NAME`. For example, `packer-builder-amazon-ebs` for a "builder"
for a "builder" type plugin named "amazon-ebs". Valid types for plugins type plugin named "amazon-ebs". Valid types for plugins are down this page more.
are down this page more.
Once the plugin is named properly, Packer automatically discovers plugins Once the plugin is named properly, Packer automatically discovers plugins in the
in the following directories in the given order. If a conflicting plugin is following directories in the given order. If a conflicting plugin is found
found later, it will take precedence over one found earlier. later, it will take precedence over one found earlier.
1. The directory where `packer` is, or the executable directory. 1. The directory where `packer` is, or the executable directory.
2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins` on 2. `~/.packer.d/plugins` on Unix systems or `%APPDATA%/packer.d/plugins`
Windows. on Windows.
3. The current working directory. 3. The current working directory.
The valid types for plugins are: The valid types for plugins are:
* `builder` - Plugins responsible for building images for a specific platform. - `builder` - Plugins responsible for building images for a specific platform.
* `command` - A CLI sub-command for `packer`. - `command` - A CLI sub-command for `packer`.
* `post-processor` - A post-processor responsible for taking an artifact - `post-processor` - A post-processor responsible for taking an artifact from
from a builder and turning it into something else. a builder and turning it into something else.
* `provisioner` - A provisioner to install software on images created by - `provisioner` - A provisioner to install software on images created by
a builder. a builder.
--- ---
layout: "docs" description: |
page_title: "Custom Post-Processor Development" Packer Post-processors are the components of Packer that transform one artifact
description: |- into another, for example by compressing files, or uploading them.
Packer Post-processors are the components of Packer that transform one artifact into another, for example by compressing files, or uploading them. layout: docs
--- page_title: 'Custom Post-Processor Development'
...
# Custom Post-Processor Development # Custom Post-Processor Development
Packer Post-processors are the components of Packer that transform one artifact Packer Post-processors are the components of Packer that transform one artifact
into another, for example by compressing files, or uploading them. into another, for example by compressing files, or uploading them.
In the compression example, the transformation would be taking an artifact In the compression example, the transformation would be taking an artifact with
with a set of files, compressing those files, and returning a new a set of files, compressing those files, and returning a new artifact with only
artifact with only a single file (the compressed archive). For the a single file (the compressed archive). For the upload example, the
upload example, the transformation would be taking an artifact with transformation would be taking an artifact with some set of files, uploading
some set of files, uploading those files, and returning an artifact those files, and returning an artifact with a single ID: the URL of the upload.
with a single ID: the URL of the upload.
Prior to reading this page, it is assumed you have read the page on Prior to reading this page, it is assumed you have read the page on [plugin
[plugin development basics](/docs/extend/developing-plugins.html). development basics](/docs/extend/developing-plugins.html).
Post-processor plugins implement the `packer.PostProcessor` interface and Post-processor plugins implement the `packer.PostProcessor` interface and are
are served using the `plugin.ServePostProcessor` function. served using the `plugin.ServePostProcessor` function.
~> **Warning!** This is an advanced topic. If you're new to Packer, we \~&gt; **Warning!** This is an advanced topic. If you're new to Packer, we
recommend getting a bit more comfortable before you dive into writing plugins. recommend getting a bit more comfortable before you dive into writing plugins.
## The Interface ## The Interface
The interface that must be implemented for a post-processor is the The interface that must be implemented for a post-processor is the
`packer.PostProcessor` interface. It is reproduced below for easy reference. `packer.PostProcessor` interface. It is reproduced below for easy reference. The
The actual interface in the source code contains some basic documentation as well explaining actual interface in the source code contains some basic documentation as well
what each method should do. explaining what each method should do.
```go ``` {.go}
type PostProcessor interface { type PostProcessor interface {
Configure(interface{}) error Configure(interface{}) error
PostProcess(Ui, Artifact) (a Artifact, keep bool, err error) PostProcess(Ui, Artifact) (a Artifact, keep bool, err error)
} }
``` ```
### The "Configure" Method ### The "Configure" Method
The `Configure` method for each post-processor is called early in the The `Configure` method for each post-processor is called early in the build
build process to configure the post-processor. The configuration is passed process to configure the post-processor. The configuration is passed in as a raw
in as a raw `interface{}`. The configure method is responsible for translating `interface{}`. The configure method is responsible for translating this
this configuration into an internal structure, validating it, and returning configuration into an internal structure, validating it, and returning any
any errors. errors.
For decoding the `interface{}` into a meaningful structure, the For decoding the `interface{}` into a meaningful structure, the
[mapstructure](https://github.com/mitchellh/mapstructure) library is [mapstructure](https://github.com/mitchellh/mapstructure) library is
recommended. Mapstructure will take an `interface{}` and decode it into an recommended. Mapstructure will take an `interface{}` and decode it into an
arbitrarily complex struct. If there are any errors, it generates very arbitrarily complex struct. If there are any errors, it generates very
human-friendly errors that can be returned directly from the configure human-friendly errors that can be returned directly from the configure method.
method.
While it is not actively enforced, **no side effects** should occur from While it is not actively enforced, **no side effects** should occur from running
running the `Configure` method. Specifically, don't create files, don't the `Configure` method. Specifically, don't create files, don't create network
create network connections, etc. Configure's purpose is solely to setup connections, etc. Configure's purpose is solely to setup internal state and
internal state and validate the configuration as much as possible. validate the configuration as much as possible.
`Configure` being run is not an indication that `PostProcess` will ever `Configure` being run is not an indication that `PostProcess` will ever run. For
run. For example, `packer validate` will run `Configure` to verify the example, `packer validate` will run `Configure` to verify the configuration
configuration validates, but will never actually run the build. validates, but will never actually run the build.
### The "PostProcess" Method ### The "PostProcess" Method
The `PostProcess` method is where the real work goes. PostProcess is The `PostProcess` method is where the real work goes. PostProcess is responsible
responsible for taking one `packer.Artifact` implementation, and transforming for taking one `packer.Artifact` implementation, and transforming it into
it into another. another.
When we say "transform," we don't mean actually modifying the existing When we say "transform," we don't mean actually modifying the existing
`packer.Artifact` value itself. We mean taking the contents of the artifact `packer.Artifact` value itself. We mean taking the contents of the artifact and
and creating a new artifact from that. For example, if we were creating creating a new artifact from that. For example, if we were creating a "compress"
a "compress" post-processor that is responsible for compressing files, post-processor that is responsible for compressing files, the transformation
the transformation would be taking the `Files()` from the original artifact, would be taking the `Files()` from the original artifact, compressing them, and
compressing them, and creating a new artifact with a single file: the creating a new artifact with a single file: the compressed archive.
compressed archive.
The result signature of this method is `(Artifact, bool, error)`. Each return
The result signature of this method is `(Artifact, bool, error)`. Each value is explained below:
return value is explained below:
- `Artifact` - The newly created artifact if no errors occurred.
* `Artifact` - The newly created artifact if no errors occurred. - `bool` - If true, the input artifact will forcefully be kept. By default,
* `bool` - If true, the input artifact will forcefully be kept. By default, Packer typically deletes all input artifacts, since the user doesn't
Packer typically deletes all input artifacts, since the user doesn't generally generally want intermediary artifacts. However, some post-processors depend
want intermediary artifacts. However, some post-processors depend on the on the previous artifact existing. If this is `true`, it forces packer to
previous artifact existing. If this is `true`, it forces packer to keep the keep the artifact around.
artifact around. - `error` - Non-nil if there was an error in any way. If this is the case, the
* `error` - Non-nil if there was an error in any way. If this is the case, other two return values are ignored.
the other two return values are ignored.
--- ---
layout: "docs" description: |
page_title: "Custom Provisioner Development" Packer Provisioners are the components of Packer that install and configure
description: |- software into a running machine prior to turning that machine into an image. An
Packer Provisioners are the components of Packer that install and configure software into a running machine prior to turning that machine into an image. An example of a provisioner is the shell provisioner, which runs shell scripts within the machines. example of a provisioner is the shell provisioner, which runs shell scripts
--- within the machines.
layout: docs
page_title: Custom Provisioner Development
...
# Custom Provisioner Development # Custom Provisioner Development
Packer Provisioners are the components of Packer that install and configure Packer Provisioners are the components of Packer that install and configure
software into a running machine prior to turning that machine into an software into a running machine prior to turning that machine into an image. An
image. An example of a provisioner is the [shell provisioner](/docs/provisioners/shell.html), example of a provisioner is the [shell
which runs shell scripts within the machines. provisioner](/docs/provisioners/shell.html), which runs shell scripts within the
machines.
Prior to reading this page, it is assumed you have read the page on Prior to reading this page, it is assumed you have read the page on [plugin
[plugin development basics](/docs/extend/developing-plugins.html). development basics](/docs/extend/developing-plugins.html).
Provisioner plugins implement the `packer.Provisioner` interface and Provisioner plugins implement the `packer.Provisioner` interface and are served
are served using the `plugin.ServeProvisioner` function. using the `plugin.ServeProvisioner` function.
~> **Warning!** This is an advanced topic. If you're new to Packer, we \~&gt; **Warning!** This is an advanced topic. If you're new to Packer, we
recommend getting a bit more comfortable before you dive into writing plugins. recommend getting a bit more comfortable before you dive into writing plugins.
## The Interface ## The Interface
The interface that must be implemented for a provisioner is the The interface that must be implemented for a provisioner is the
`packer.Provisioner` interface. It is reproduced below for easy reference. `packer.Provisioner` interface. It is reproduced below for easy reference. The
The actual interface in the source code contains some basic documentation as well explaining actual interface in the source code contains some basic documentation as well
what each method should do. explaining what each method should do.
```go ``` {.go}
type Provisioner interface { type Provisioner interface {
Prepare(...interface{}) error Prepare(...interface{}) error
Provision(Ui, Communicator) error Provision(Ui, Communicator) error
} }
``` ```
### The "Prepare" Method ### The "Prepare" Method
The `Prepare` method for each provisioner is called prior to any runs with The `Prepare` method for each provisioner is called prior to any runs with the
the configuration that was given in the template. This is passed in as configuration that was given in the template. This is passed in as an array of
an array of `interface{}` types, but is generally `map[string]interface{}`. The prepare `interface{}` types, but is generally `map[string]interface{}`. The prepare
method is responsible for translating this configuration into an internal method is responsible for translating this configuration into an internal
structure, validating it, and returning any errors. structure, validating it, and returning any errors.
For multiple parameters, they should be merged together into the final For multiple parameters, they should be merged together into the final
configuration, with later parameters overwriting any previous configuration. configuration, with later parameters overwriting any previous configuration. The
The exact semantics of the merge are left to the builder author. exact semantics of the merge are left to the builder author.
For decoding the `interface{}` into a meaningful structure, the For decoding the `interface{}` into a meaningful structure, the
[mapstructure](https://github.com/mitchellh/mapstructure) library is recommended. [mapstructure](https://github.com/mitchellh/mapstructure) library is
Mapstructure will take an `interface{}` and decode it into an arbitrarily recommended. Mapstructure will take an `interface{}` and decode it into an
complex struct. If there are any errors, it generates very human friendly arbitrarily complex struct. If there are any errors, it generates very human
errors that can be returned directly from the prepare method. friendly errors that can be returned directly from the prepare method.
While it is not actively enforced, **no side effects** should occur from While it is not actively enforced, **no side effects** should occur from running
running the `Prepare` method. Specifically, don't create files, don't launch the `Prepare` method. Specifically, don't create files, don't launch virtual
virtual machines, etc. Prepare's purpose is solely to configure the builder machines, etc. Prepare's purpose is solely to configure the builder and validate
and validate the configuration. the configuration.
The `Prepare` method is called very early in the build process so that The `Prepare` method is called very early in the build process so that errors
errors may be displayed to the user before anything actually happens. may be displayed to the user before anything actually happens.
### The "Provision" Method ### The "Provision" Method
The `Provision` method is called when a machine is running and ready The `Provision` method is called when a machine is running and ready to be
to be provisioned. The provisioner should do its real work here. provisioned. The provisioner should do its real work here.
The method takes two parameters: a `packer.Ui` and a `packer.Communicator`. The method takes two parameters: a `packer.Ui` and a `packer.Communicator`. The
The UI can be used to communicate with the user what is going on. The UI can be used to communicate with the user what is going on. The communicator
communicator is used to communicate with the running machine, and is is used to communicate with the running machine, and is guaranteed to be
guaranteed to be connected at this point. connected at this point.
The provision method should not return until provisioning is complete. The provision method should not return until provisioning is complete.
## Using the Communicator ## Using the Communicator
The `packer.Communicator` parameter and interface is used to communicate The `packer.Communicator` parameter and interface is used to communicate with
with running machine. The machine may be local (in a virtual machine or running machine. The machine may be local (in a virtual machine or container of
container of some sort) or it may be remote (in a cloud). The communicator some sort) or it may be remote (in a cloud). The communicator interface
interface abstracts this away so that communication is the same overall. abstracts this away so that communication is the same overall.
The documentation around the [code itself](https://github.com/mitchellh/packer/blob/master/packer/communicator.go) The documentation around the [code
is really great as an overview of how to use the interface. You should begin itself](https://github.com/mitchellh/packer/blob/master/packer/communicator.go)
by reading this. Once you have read it, you can see some example usage below: is really great as an overview of how to use the interface. You should begin by
reading this. Once you have read it, you can see some example usage below:
```go ``` {.go}
// Build the remote command. // Build the remote command.
var cmd packer.RemoteCmd var cmd packer.RemoteCmd
cmd.Command = "echo foo" cmd.Command = "echo foo"
......
--- ---
layout: "docs" description: |
page_title: "Packer Documentation" Welcome to the Packer documentation! This documentation is more of a reference
description: |- guide for all available features and options in Packer. If you're just getting
Welcome to the Packer documentation! This documentation is more of a reference guide for all available features and options in Packer. If you're just getting started with Packer, please start with the introduction and getting started guide instead. started with Packer, please start with the introduction and getting started
--- guide instead.
layout: docs
page_title: Packer Documentation
...
# Packer Documentation # Packer Documentation
Welcome to the Packer documentation! This documentation is more of a reference Welcome to the Packer documentation! This documentation is more of a reference
guide for all available features and options in Packer. If you're just getting guide for all available features and options in Packer. If you're just getting
started with Packer, please start with the started with Packer, please start with the [introduction and getting started
[introduction and getting started guide](/intro) instead. guide](/intro) instead.
--- ---
layout: "docs" description: |
page_title: "Install Packer" Packer must first be installed on the machine you want to run it on. To make
description: |- installation easy, Packer is distributed as a binary package for all supported
Packer must first be installed on the machine you want to run it on. To make installation easy, Packer is distributed as a binary package for all supported platforms and architectures. This page will not cover how to compile Packer from source, as that is covered in the README and is only recommended for advanced users. platforms and architectures. This page will not cover how to compile Packer from
--- source, as that is covered in the README and is only recommended for advanced
users.
layout: docs
page_title: Install Packer
...
# Install Packer # Install Packer
Packer must first be installed on the machine you want to run it on. Packer must first be installed on the machine you want to run it on. To make
To make installation easy, Packer is distributed as a [binary package](/downloads.html) installation easy, Packer is distributed as a [binary package](/downloads.html)
for all supported platforms and architectures. This page will not cover how for all supported platforms and architectures. This page will not cover how to
to compile Packer from source, as that is covered in the compile Packer from source, as that is covered in the
[README](https://github.com/mitchellh/packer/blob/master/README.md) and is only [README](https://github.com/mitchellh/packer/blob/master/README.md) and is only
recommended for advanced users. recommended for advanced users.
## Installing Packer ## Installing Packer
To install packer, first find the [appropriate package](/downloads.html) To install packer, first find the [appropriate package](/downloads.html) for
for your system and download it. Packer is packaged as a "zip" file. your system and download it. Packer is packaged as a "zip" file.
Next, unzip the downloaded package into a directory where Packer will be Next, unzip the downloaded package into a directory where Packer will be
installed. On Unix systems, `~/packer` or `/usr/local/packer` is generally good, installed. On Unix systems, `~/packer` or `/usr/local/packer` is generally good,
depending on whether you want to restrict the install to just your user depending on whether you want to restrict the install to just your user or
or install it system-wide. On Windows systems, you can put it wherever you'd install it system-wide. On Windows systems, you can put it wherever you'd like.
like.
After unzipping the package, the directory should contain a set of binary After unzipping the package, the directory should contain a set of binary
programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step to
to installation is to make sure the directory you installed Packer to installation is to make sure the directory you installed Packer to is on the
is on the PATH. See [this page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) PATH. See [this
for instructions on setting the PATH on Linux and Mac. page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux)
[This page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) for instructions on setting the PATH on Linux and Mac. [This
page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows)
contains instructions for setting the PATH on Windows. contains instructions for setting the PATH on Windows.
## Verifying the Installation ## Verifying the Installation
After installing Packer, verify the installation worked by opening After installing Packer, verify the installation worked by opening a new command
a new command prompt or console, and checking that `packer` is available: prompt or console, and checking that `packer` is available:
```text ``` {.text}
$ packer $ packer
usage: packer [--version] [--help] <command> [<args>] usage: packer [--version] [--help] <command> [<args>]
...@@ -50,8 +54,8 @@ Available commands are: ...@@ -50,8 +54,8 @@ Available commands are:
``` ```
If you get an error that `packer` could not be found, then your PATH If you get an error that `packer` could not be found, then your PATH
environmental variable was not setup properly. Please go back and ensure environmental variable was not setup properly. Please go back and ensure that
that your PATH variable contains the directory which has Packer installed. your PATH variable contains the directory which has Packer installed.
Otherwise, Packer is installed and you're ready to go! Otherwise, Packer is installed and you're ready to go!
...@@ -59,24 +63,24 @@ Otherwise, Packer is installed and you're ready to go! ...@@ -59,24 +63,24 @@ Otherwise, Packer is installed and you're ready to go!
Installation from binary packages is currently the only officially supported Installation from binary packages is currently the only officially supported
installation method. The binary packages are guaranteed to be the latest installation method. The binary packages are guaranteed to be the latest
available version and match the proper checksums. However, in addition to available version and match the proper checksums. However, in addition to the
the official binaries, there are other unofficial 3rd party methods of official binaries, there are other unofficial 3rd party methods of installation
installation managed by the Packer community: managed by the Packer community:
### Homebrew ### Homebrew
If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: If you're using OS X and [Homebrew](http://brew.sh), you can install Packer:
```text ``` {.text}
$ brew install packer $ brew install packer
``` ```
### Chocolatey ### Chocolatey
If you're using Windows and [Chocolatey](http://chocolatey.org), you can install Packer from If you're using Windows and [Chocolatey](http://chocolatey.org), you can install
Windows command line (cmd). Remember that this is updated by a 3rd party, so Packer from Windows command line (cmd). Remember that this is updated by a 3rd
it may not be the latest available version. party, so it may not be the latest available version.
```text ``` {.text}
$ choco install packer $ choco install packer
``` ```
--- ---
layout: "docs_machine_readable" description: |
page_title: "Command: build - Machine-Readable Reference" These are the machine-readable types that exist as part of the output of
description: |- `packer build`.
These are the machine-readable types that exist as part of the output of `packer build`. layout: 'docs\_machine\_readable'
--- page_title: 'Command: build - Machine-Readable Reference'
...
# Build Command Types # Build Command Types
These are the machine-readable types that exist as part of the output These are the machine-readable types that exist as part of the output of
of `packer build`. `packer build`.
<dl> <dl>
<dt>artifact (>= 2)</dt> <dt>
<dd> artifact (&gt;= 2)
<p> </dt>
Information about an artifact of the targeted item. This is a <dd>
fairly complex (but uniform!) machine-readable type that contains <p>
subtypes. The subtypes are documented within this page in the Information about an artifact of the targeted item. This is a
syntax of "artifact subtype: SUBTYPE". The number of arguments within fairly complex (but uniform!) machine-readable type that contains
that subtype is in addition to the artifact args. subtypes. The subtypes are documented within this page in the
</p> syntax of "artifact subtype: SUBTYPE". The number of arguments within
that subtype is in addition to the artifact args.
<p> </p>
<strong>Data 1: index</strong> - The zero-based index of the
artifact being described. This goes up to "artifact-count" (see <p>
below). <strong>Data 1: index</strong> - The zero-based index of the
</p> artifact being described. This goes up to "artifact-count" (see
<p> below).
<strong>Data 2: subtype</strong> - The subtype that describes </p>
the remaining arguments. See the documentation for the <p>
subtype docs throughout this page. <strong>Data 2: subtype</strong> - The subtype that describes
</p> the remaining arguments. See the documentation for the
<p> subtype docs throughout this page.
<strong>Data 3..n: subtype data</strong> - Zero or more additional </p>
data points related to the subtype. The exact count and meaning <p>
of this subtypes comes from the subtype documentation. <strong>Data 3..n: subtype data</strong> - Zero or more additional
</p> data points related to the subtype. The exact count and meaning
</dd> of this subtypes comes from the subtype documentation.
</p>
<dt>artifact-count (1)</dt>
<dd> </dd>
<p> <dt>
The number of artifacts associated with the given target. This artifact-count (1)
will always be outputted _before_ any other artifact information, </dt>
so you're able to know how many upcoming artifacts to look for. <dd>
</p> <p>
The number of artifacts associated with the given target. This
<p> will always be outputted _before_ any other artifact information,
<strong>Data 1: count</strong> - The number of artifacts as so you're able to know how many upcoming artifacts to look for.
a base 10 integer. </p>
</p>
</dd> <p>
<strong>Data 1: count</strong> - The number of artifacts as
<dt>artifact subtype: builder-id (1)</dt> a base 10 integer.
<dd> </p>
<p>
The unique ID of the builder that created this artifact. </dd>
</p> <dt>
artifact subtype: builder-id (1)
<p> </dt>
<strong>Data 1: id</strong> - The unique ID of the builder. <dd>
</p> <p>
</dd> The unique ID of the builder that created this artifact.
</p>
<dt>artifact subtype: end (0)</dt>
<dd> <p>
<p> <strong>Data 1: id</strong> - The unique ID of the builder.
The last machine-readable output line outputted for an artifact. </p>
This is a sentinel value so you know that no more data related to
the targetted artifact will be outputted. </dd>
</p> <dt>
</dd> artifact subtype: end (0)
</dt>
<dt>artifact subtype: file (2)</dt> <dd>
<dd> <p>
<p> The last machine-readable output line outputted for an artifact.
A single file associated with the artifact. There are 0 to This is a sentinel value so you know that no more data related to
"files-count" of these entries to describe every file that is the targetted artifact will be outputted.
part of the artifact. </p>
</p>
</dd>
<p> <dt>
<strong>Data 1: index</strong> - Zero-based index of the file. artifact subtype: file (2)
This goes from 0 to "files-count" minus one. </dt>
</p> <dd>
<p>
<p> A single file associated with the artifact. There are 0 to
<strong>Data 2: filename</strong> - The filename. "files-count" of these entries to describe every file that is
</p> part of the artifact.
</dd> </p>
<dt>artifact subtype: files-count (1)</dt> <p>
<dd> <strong>Data 1: index</strong> - Zero-based index of the file.
<p> This goes from 0 to "files-count" minus one.
The number of files associated with this artifact. Not all </p>
artifacts have files associated with it.
</p> <p>
<strong>Data 2: filename</strong> - The filename.
<p> </p>
<strong>Data 1: count</strong> - The number of files.
</p> </dd>
</dd> <dt>
artifact subtype: files-count (1)
<dt>artifact subtype: id (1)</dt> </dt>
<dd> <dd>
<p> <p>
The ID (if any) of the artifact that was built. Not all artifacts The number of files associated with this artifact. Not all
have associated IDs. For example, AMIs built have IDs associated artifacts have files associated with it.
with them, but VirtualBox images do not. The exact format of the ID </p>
is specific to the builder.
</p> <p>
<strong>Data 1: count</strong> - The number of files.
<p> </p>
<strong>Data 1: id</strong> - The ID of the artifact.
</p> </dd>
</dd> <dt>
artifact subtype: id (1)
<dt>artifact subtype: nil (0)</dt> </dt>
<dd> <dd>
<p> <p>
If present, this means that the artifact was nil, or that the targeted The ID (if any) of the artifact that was built. Not all artifacts
build completed successfully but no artifact was created. have associated IDs. For example, AMIs built have IDs associated
</p> with them, but VirtualBox images do not. The exact format of the ID
</dd> is specific to the builder.
</p>
<dt>artifact subtype: string (1)</dt>
<dd> <p>
<p> <strong>Data 1: id</strong> - The ID of the artifact.
The human-readable string description of the artifact provided by </p>
the artifact itself.
</p> </dd>
<dt>
<p> artifact subtype: nil (0)
<strong>Data 1: string</strong> - The string output for the artifact. </dt>
</p> <dd>
</dd> <p>
If present, this means that the artifact was nil, or that the targeted
<dt>error-count (1)</dt> build completed successfully but no artifact was created.
<dd> </p>
<p>
The number of errors that occurred during the build. This will </dd>
always be outputted before any errors so you know how many are coming. <dt>
</p> artifact subtype: string (1)
</dt>
<p> <dd>
<strong>Data 1: count</strong> - The number of build errors as <p>
a base 10 integer. The human-readable string description of the artifact provided by
</p> the artifact itself.
</dd> </p>
<dt>error (1)</dt> <p>
<dd> <strong>Data 1: string</strong> - The string output for the artifact.
<p> </p>
A build error that occurred. The target of this output will be
the build that had the error. </dd>
</p> <dt>
error-count (1)
<p> </dt>
<strong>Data 1: error</strong> - The error message as a string. <dd>
</p> <p>
</dd> The number of errors that occurred during the build. This will
always be outputted before any errors so you know how many are coming.
</p>
<p>
<strong>Data 1: count</strong> - The number of build errors as
a base 10 integer.
</p>
</dd>
<dt>
error (1)
</dt>
<dd>
<p>
A build error that occurred. The target of this output will be
the build that had the error.
</p>
<p>
<strong>Data 1: error</strong> - The error message as a string.
</p>
</dd>
</dl> </dl>
--- ---
layout: "docs_machine_readable" description: |
page_title: "Command: inspect - Machine-Readable Reference" These are the machine-readable types that exist as part of the output of
description: |- `packer inspect`.
These are the machine-readable types that exist as part of the output of `packer inspect`. layout: 'docs\_machine\_readable'
--- page_title: 'Command: inspect - Machine-Readable Reference'
...
# Inspect Command Types # Inspect Command Types
These are the machine-readable types that exist as part of the output These are the machine-readable types that exist as part of the output of
of `packer inspect`. `packer inspect`.
<dl> <dl>
<dt>template-variable (3)</dt> <dt>
<dd> template-variable (3)
<p> </dt>
A <a href="/docs/templates/user-variables.html">user variable</a> <dd>
defined within the template. <p>
</p> A <a href="/docs/templates/user-variables.html">user variable</a>
defined within the template.
<p> </p>
<strong>Data 1: name</strong> - Name of the variable.
</p> <p>
<strong>Data 1: name</strong> - Name of the variable.
<p> </p>
<strong>Data 2: default</strong> - The default value of the
variable. <p>
</p> <strong>Data 2: default</strong> - The default value of the
variable.
<p> </p>
<strong>Data 3: required</strong> - If non-zero, then this variable
is required. <p>
</p> <strong>Data 3: required</strong> - If non-zero, then this variable
</dd> is required.
</p>
<dt>template-builder (2)</dt>
<dd> </dd>
<p> <dt>
A builder defined within the template template-builder (2)
</p> </dt>
<dd>
<p> <p>
<strong>Data 1: name</strong> - The name of the builder. A builder defined within the template
</p> </p>
<p>
<strong>Data 2: type</strong> - The type of the builder. This will <p>
generally be the same as the name unless you explicitly override <strong>Data 1: name</strong> - The name of the builder.
the name. </p>
</p> <p>
</dd> <strong>Data 2: type</strong> - The type of the builder. This will
generally be the same as the name unless you explicitly override
<dt>template-provisioner (1)</dt> the name.
<dd> </p>
<p>
A provisioner defined within the template. Multiple of these may </dd>
exist. If so, they are outputted in the order they would run. <dt>
</p> template-provisioner (1)
</dt>
<p> <dd>
<strong>Data 1: name</strong> - The name/type of the provisioner. <p>
</p> A provisioner defined within the template. Multiple of these may
</dd> exist. If so, they are outputted in the order they would run.
</p>
<p>
<strong>Data 1: name</strong> - The name/type of the provisioner.
</p>
</dd>
</dl> </dl>
--- ---
layout: "docs_machine_readable" description: |
page_title: "Command: version - Machine-Readable Reference" These are the machine-readable types that exist as part of the output of
description: |- `packer version`.
These are the machine-readable types that exist as part of the output of `packer version`. layout: 'docs\_machine\_readable'
--- page_title: 'Command: version - Machine-Readable Reference'
...
# Version Command Types # Version Command Types
These are the machine-readable types that exist as part of the output These are the machine-readable types that exist as part of the output of
of `packer version`. `packer version`.
<dl> <dl>
<dt>version (1)</dt> <dt>
<dd> version (1)
<p>The version number of Packer running.</p> </dt>
<dd>
<p> <p>The version number of Packer running.</p>
<strong>Data 1: version</strong> - The version of Packer running,
only including the major, minor, and patch versions. Example: <p>
"0.2.4". <strong>Data 1: version</strong> - The version of Packer running,
</p> only including the major, minor, and patch versions. Example:
</dd> "0.2.4".
</p>
<dt>version-commit (1)</dt>
<dd> </dd>
<p>The SHA1 of the Git commit that built this version of Packer.</p> <dt>
version-commit (1)
<p> </dt>
<strong>Data 1: commit SHA1</strong> - The SHA1 of the commit. <dd>
</p> <p>The SHA1 of the Git commit that built this version of Packer.</p>
</dd>
<p>
<dt>version-prerelease (1)</dt> <strong>Data 1: commit SHA1</strong> - The SHA1 of the commit.
<dd> </p>
<p>
The prerelease tag (if any) for the running version of Packer. This </dd>
can be "beta", "dev", "alpha", etc. If this is empty, you can assume <dt>
it is a release version running. version-prerelease (1)
</p> </dt>
<dd>
<p> <p>
<strong>Data 1: prerelease name</strong> - The name of the The prerelease tag (if any) for the running version of Packer. This
prerelease tag. can be "beta", "dev", "alpha", etc. If this is empty, you can assume
</p> it is a release version running.
</dd> </p>
<p>
<strong>Data 1: prerelease name</strong> - The name of the
prerelease tag.
</p>
</dd>
</dl> </dl>
--- ---
layout: "docs_machine_readable" description: |
page_title: "General Types - Machine-Readable Reference" These are the machine-readable types that can appear in almost any
description: |- machine-readable output and are provided by Packer core itself.
These are the machine-readable types that can appear in almost any machine-readable output and are provided by Packer core itself. layout: 'docs\_machine\_readable'
--- page_title: 'General Types - Machine-Readable Reference'
...
# General Types # General Types
...@@ -11,21 +12,24 @@ These are the machine-readable types that can appear in almost any ...@@ -11,21 +12,24 @@ These are the machine-readable types that can appear in almost any
machine-readable output and are provided by Packer core itself. machine-readable output and are provided by Packer core itself.
<dl> <dl>
<dt>ui (2)</dt> <dt>
<dd> ui (2)
<p> </dt>
Specifies the output and type of output that would've normally <dd>
gone to the console if Packer were running in human-readable <p>
mode. Specifies the output and type of output that would've normally
</p> gone to the console if Packer were running in human-readable
mode.
</p>
<p>
<strong>Data 1: type</strong> - The type of UI message that would've
been outputted. Can be "say", "message", or "error".
</p>
<p>
<strong>Data 2: output</strong> - The UI message that would have
been outputted.
</p>
<p> </dd>
<strong>Data 1: type</strong> - The type of UI message that would've
been outputted. Can be "say", "message", or "error".
</p>
<p>
<strong>Data 2: output</strong> - The UI message that would have
been outputted.
</p>
</dd>
</dl> </dl>
--- ---
layout: "docs_machine_readable" description: |
page_title: "Machine-Readable Reference" This is the reference for the various message categories for Packer
description: |- machine-readable output. Please read that page if you're unfamiliar with the
This is the reference for the various message categories for Packer machine-readable output. Please read that page if you're unfamiliar with the general format and usage for the machine-readable output. general format and usage for the machine-readable output.
--- layout: 'docs\_machine\_readable'
page_title: 'Machine-Readable Reference'
...
# Machine-Readable Reference # Machine-Readable Reference
This is the reference for the various message categories for Packer This is the reference for the various message categories for Packer
[machine-readable output](/docs/command-line/machine-readable.html). [machine-readable output](/docs/command-line/machine-readable.html). Please read
Please read that page if you're unfamiliar with the general format and that page if you're unfamiliar with the general format and usage for the
usage for the machine-readable output. machine-readable output.
The layout of this reference is split into where the types come from. The layout of this reference is split into where the types come from. There are
There are a set of core types that are from Packer core itself. Then a set of core types that are from Packer core itself. Then there are types that
there are types that come from various components of Packer such as the come from various components of Packer such as the builders, provisioners, and
builders, provisioners, and more. more.
Within each section, the format of the documentation is the following: Within each section, the format of the documentation is the following:
<br> <br>
<dl> <dl>
<dt>type-name (data-count)</dt> <dt>
<dd> type-name (data-count)
<p>Description of the type.</p> </dt>
<p> <dd>
<strong>Data 1: name</strong> - Description. <p>Description of the type.</p>
</p> <p>
</dd> <strong>Data 1: name</strong> - Description.
</p>
</dd>
</dl> </dl>
--- ---
layout: "docs" description: |
page_title: "Core Configuration" There are a few configuration settings that affect Packer globally by
description: |- configuring the core of Packer. These settings all have reasonable defaults, so
There are a few configuration settings that affect Packer globally by configuring the core of Packer. These settings all have reasonable defaults, so you generally don't have to worry about it until you want to tweak a configuration. If you're just getting started with Packer, don't worry about core configuration for now. you generally don't have to worry about it until you want to tweak a
--- configuration. If you're just getting started with Packer, don't worry about
core configuration for now.
layout: docs
page_title: Core Configuration
...
# Core Configuration # Core Configuration
There are a few configuration settings that affect Packer globally by There are a few configuration settings that affect Packer globally by
configuring the core of Packer. These settings all have reasonable defaults, so configuring the core of Packer. These settings all have reasonable defaults, so
you generally don't have to worry about it until you want to tweak you generally don't have to worry about it until you want to tweak a
a configuration. If you're just getting started with Packer, don't worry configuration. If you're just getting started with Packer, don't worry about
about core configuration for now. core configuration for now.
The default location where Packer looks for this file depends on the The default location where Packer looks for this file depends on the platform.
platform. For all non-Windows platforms, Packer looks for `$HOME/.packerconfig`. For all non-Windows platforms, Packer looks for `$HOME/.packerconfig`. For
For Windows, Packer looks for `%APPDATA%/packer.config`. If the file Windows, Packer looks for `%APPDATA%/packer.config`. If the file doesn't exist,
doesn't exist, then Packer ignores it and just uses the default configuration. then Packer ignores it and just uses the default configuration.
The location of the core configuration file can be modified by setting The location of the core configuration file can be modified by setting the
the `PACKER_CONFIG` environmental variable to be the path to another file. `PACKER_CONFIG` environmental variable to be the path to another file.
The format of the configuration file is basic JSON. The format of the configuration file is basic JSON.
...@@ -28,12 +32,13 @@ The format of the configuration file is basic JSON. ...@@ -28,12 +32,13 @@ The format of the configuration file is basic JSON.
Below is the list of all available configuration parameters for the core Below is the list of all available configuration parameters for the core
configuration file. None of these are required, since all have sane defaults. configuration file. None of these are required, since all have sane defaults.
* `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum and - `plugin_min_port` and `plugin_max_port` (integer) - These are the minimum
maximum ports that Packer uses for communication with plugins, since and maximum ports that Packer uses for communication with plugins, since
plugin communication happens over TCP connections on your local host. plugin communication happens over TCP connections on your local host. By
By default these are 10,000 and 25,000, respectively. Be sure to set a fairly default these are 10,000 and 25,000, respectively. Be sure to set a fairly
wide range here, since Packer can easily use over 25 ports on a single run. wide range here, since Packer can easily use over 25 ports on a single run.
* `builders`, `commands`, `post-processors`, and `provisioners` are objects that are used to - `builders`, `commands`, `post-processors`, and `provisioners` are objects
install plugins. The details of how exactly these are set is covered that are used to install plugins. The details of how exactly these are set
in more detail in the [installing plugins documentation page](/docs/extend/plugins.html). is covered in more detail in the [installing plugins documentation
page](/docs/extend/plugins.html).
--- ---
layout: "docs" description: |
page_title: "Debugging Packer" Packer strives to be stable and bug-free, but issues inevitably arise where
description: |- certain things may not work entirely correctly, or may not appear to work
Packer strives to be stable and bug-free, but issues inevitably arise where certain things may not work entirely correctly, or may not appear to work correctly. In these cases, it is sometimes helpful to see more details about what Packer is actually doing. correctly. In these cases, it is sometimes helpful to see more details about
--- what Packer is actually doing.
layout: docs
page_title: Debugging Packer
...
# Debugging Packer Builds # Debugging Packer Builds
...@@ -17,39 +20,40 @@ usually will stop between each step, waiting for keyboard input before ...@@ -17,39 +20,40 @@ usually will stop between each step, waiting for keyboard input before
continuing. This will allow you to inspect state and so on. continuing. This will allow you to inspect state and so on.
In debug mode once the remote instance is instantiated, Packer will emit to the In debug mode once the remote instance is instantiated, Packer will emit to the
current directory an emphemeral private ssh key as a .pem file. Using that you current directory an emphemeral private ssh key as a .pem file. Using that you
can `ssh -i <key.pem>` into the remote build instance and see what is going on can `ssh -i <key.pem>` into the remote build instance and see what is going on
for debugging. The emphemeral key will be deleted at the end of the packer run for debugging. The emphemeral key will be deleted at the end of the packer run
during cleanup. during cleanup.
### Windows ### Windows
As of Packer 0.8.1 the default WinRM communicator will emit the password for a As of Packer 0.8.1 the default WinRM communicator will emit the password for a
Remote Desktop Connection into your instance. This happens following the several Remote Desktop Connection into your instance. This happens following the several
minute pause as the instance is booted. Note a .pem key is still created for minute pause as the instance is booted. Note a .pem key is still created for
securely transmitting the password. Packer automatically decrypts the password securely transmitting the password. Packer automatically decrypts the password
for you in debug mode. for you in debug mode.
## Debugging Packer ## Debugging Packer
Issues occasionally arise where certain things may not work entirely correctly, Issues occasionally arise where certain things may not work entirely correctly,
or may not appear to work correctly. In these cases, it is sometimes helpful to or may not appear to work correctly. In these cases, it is sometimes helpful to
see more details about what Packer is actually doing. see more details about what Packer is actually doing.
Packer has detailed logs which can be enabled by setting the `PACKER_LOG` Packer has detailed logs which can be enabled by setting the `PACKER_LOG`
environmental variable to any value like this `PACKER_LOG=1 packer build environmental variable to any value like this
<config.json>`. This will cause detailed logs to appear on stderr. The logs `PACKER_LOG=1 packer build <config.json>`. This will cause detailed logs to
contain log messages from Packer as well as any plugins that are being used. Log appear on stderr. The logs contain log messages from Packer as well as any
messages from plugins are prefixed by their application name. plugins that are being used. Log messages from plugins are prefixed by their
application name.
Note that because Packer is highly parallelized, log messages sometimes Note that because Packer is highly parallelized, log messages sometimes appear
appear out of order, especially with respect to plugins. In this case, out of order, especially with respect to plugins. In this case, it is important
it is important to pay attention to the timestamp of the log messages to pay attention to the timestamp of the log messages to determine order.
to determine order.
In addition to simply enabling the log, you can set `PACKER_LOG_PATH` in order In addition to simply enabling the log, you can set `PACKER_LOG_PATH` in order
to force the log to always go to a specific file when logging is enabled. to force the log to always go to a specific file when logging is enabled. Note
Note that even when `PACKER_LOG_PATH` is set, `PACKER_LOG` must be set in that even when `PACKER_LOG_PATH` is set, `PACKER_LOG` must be set in order for
order for any logging to be enabled. any logging to be enabled.
If you find a bug with Packer, please include the detailed log by using If you find a bug with Packer, please include the detailed log by using a
a service such as [gist](http://gist.github.com). service such as [gist](http://gist.github.com).
--- ---
layout: "docs" description: 'Packer uses a variety of environmental variables.'
page_title: "Environmental Variables for Packer" layout: docs
description: |- page_title: Environmental Variables for Packer
Packer uses a variety of environmental variables. ...
---
# Environmental Variables for Packer # Environmental Variables for Packer
Packer uses a variety of environmental variables. A listing and description of each can be found below: Packer uses a variety of environmental variables. A listing and description of
each can be found below:
* `PACKER_CACHE_DIR` - The location of the packer cache. - `PACKER_CACHE_DIR` - The location of the packer cache.
* `PACKER_CONFIG` - The location of the core configuration file. The format - `PACKER_CONFIG` - The location of the core configuration file. The format of
of the configuration file is basic JSON. the configuration file is basic JSON. See the [core configuration
See the [core configuration page](/docs/other/core-configuration.html). page](/docs/other/core-configuration.html).
* `PACKER_LOG` - Setting this to any value will enable the logger. - `PACKER_LOG` - Setting this to any value will enable the logger. See the
See the [debugging page](/docs/other/debugging.html). [debugging page](/docs/other/debugging.html).
* `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must - `PACKER_LOG_PATH` - The location of the log file. Note: `PACKER_LOG` must be
be set for any logging to occur. See the [debugging page](/docs/other/debugging.html). set for any logging to occur. See the [debugging
page](/docs/other/debugging.html).
* `PACKER_NO_COLOR` - Setting this to any value will disable color in the terminal. - `PACKER_NO_COLOR` - Setting this to any value will disable color in
the terminal.
* `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for - `PACKER_PLUGIN_MAX_PORT` - The maximum port that Packer uses for
communication with plugins, since plugin communication happens over communication with plugins, since plugin communication happens over TCP
TCP connections on your local host. The default is 25,000. connections on your local host. The default is 25,000. See the [core
See the [core configuration page](/docs/other/core-configuration.html). configuration page](/docs/other/core-configuration.html).
* `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for - `PACKER_PLUGIN_MIN_PORT` - The minimum port that Packer uses for
communication with plugins, since plugin communication happens communication with plugins, since plugin communication happens over TCP
over TCP connections on your local host. The default is 10,000. connections on your local host. The default is 10,000. See the [core
See the [core configuration page](/docs/other/core-configuration.html). configuration page](/docs/other/core-configuration.html).
--- ---
layout: "docs" description: |
page_title: "Atlas Post-Processor" The Atlas post-processor for Packer receives an artifact from a Packer build and
description: |- uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version
The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. Atlas hosts and serves artifacts, allowing you to version and distribute them in a simple way. and distribute them in a simple way.
--- layout: docs
page_title: 'Atlas Post-Processor'
...
# Atlas Post-Processor # Atlas Post-Processor
Type: `atlas` Type: `atlas`
The Atlas post-processor for Packer receives an artifact from a Packer build and uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves artifacts, allowing you to version and distribute them in a simple way. The Atlas post-processor for Packer receives an artifact from a Packer build and
uploads it to Atlas. [Atlas](https://atlas.hashicorp.com) hosts and serves
artifacts, allowing you to version and distribute them in a simple way.
## Workflow ## Workflow
To take full advantage of Packer and Atlas, it's important to understand the To take full advantage of Packer and Atlas, it's important to understand the
workflow for creating artifacts with Packer and storing them in Atlas using this post-processor. The goal of the Atlas post-processor is to streamline the distribution of public or private artifacts by hosting them in a central location in Atlas. workflow for creating artifacts with Packer and storing them in Atlas using this
post-processor. The goal of the Atlas post-processor is to streamline the
distribution of public or private artifacts by hosting them in a central
location in Atlas.
Here is an example workflow: Here is an example workflow:
1. Packer builds an AMI with the [Amazon AMI builder](/docs/builders/amazon.html) 1. Packer builds an AMI with the [Amazon AMI
2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas. The `atlas` post-processor is configured with the name of the AMI, for example `hashicorp/foobar`, to create the artifact in Atlas or update the version if the artifact already exists builder](/docs/builders/amazon.html)
3. The new version is ready and available to be used in deployments with a tool like [Terraform](https://terraform.io) 2. The `atlas` post-processor takes the resulting AMI and uploads it to Atlas.
The `atlas` post-processor is configured with the name of the AMI, for
example `hashicorp/foobar`, to create the artifact in Atlas or update the
version if the artifact already exists
3. The new version is ready and available to be used in deployments with a tool
like [Terraform](https://terraform.io)
## Configuration ## Configuration
...@@ -29,32 +40,37 @@ The configuration allows you to specify and access the artifact in Atlas. ...@@ -29,32 +40,37 @@ The configuration allows you to specify and access the artifact in Atlas.
### Required: ### Required:
* `token` (string) - Your access token for the Atlas API. - `token` (string) - Your access token for the Atlas API. This can be
This can be generated on your [tokens page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can export your Atlas token as an environmental variable and remove it from the configuration. generated on your [tokens
page](https://atlas.hashicorp.com/settings/tokens). Alternatively you can
export your Atlas token as an environmental variable and remove it from
the configuration.
* `artifact` (string) - The shorthand tag for your artifact that maps to - `artifact` (string) - The shorthand tag for your artifact that maps to
Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`. You must Atlas, i.e `hashicorp/foobar` for `atlas.hashicorp.com/hashicorp/foobar`.
have access to the organization, hashicorp in this example, in order to add an artifact to You must have access to the organization, hashicorp in this example, in
the organization in Atlas. order to add an artifact to the organization in Atlas.
* `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will always be `amazon.ami`. - `artifact_type` (string) - For uploading AMIs to Atlas, `artifact_type` will
This field must be defined because Atlas can host other artifact types, such as Vagrant boxes. always be `amazon.ami`. This field must be defined because Atlas can host
other artifact types, such as Vagrant boxes.
-> **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas post-processor](/docs/post-processors/atlas.html). -&gt; **Note:** If you want to upload Vagrant boxes to Atlas, use the [Atlas
post-processor](/docs/post-processors/atlas.html).
### Optional: ### Optional:
* `atlas_url` (string) - Override the base URL for Atlas. This - `atlas_url` (string) - Override the base URL for Atlas. This is useful if
is useful if you're using Atlas Enterprise in your own network. Defaults you're using Atlas Enterprise in your own network. Defaults to
to `https://atlas.hashicorp.com/api/v1`. `https://atlas.hashicorp.com/api/v1`.
* `metadata` (map) - Send metadata about the artifact. If the artifact - `metadata` (map) - Send metadata about the artifact. If the artifact type is
type is "vagrant.box", you must specify a "provider" metadata about "vagrant.box", you must specify a "provider" metadata about what provider
what provider to use. to use.
### Example Configuration ### Example Configuration
```javascript ``` {.javascript}
{ {
"variables": { "variables": {
"aws_access_key": "ACCESS_KEY_HERE", "aws_access_key": "ACCESS_KEY_HERE",
......
--- ---
layout: "docs" description: |
page_title: "compress Post-Processor" The Packer compress post-processor takes an artifact with files (such as from
description: |- VMware or VirtualBox) and compresses the artifact into a single archive.
The Packer compress post-processor takes an artifact with files (such as from VMware or VirtualBox) and compresses the artifact into a single archive. layout: docs
--- page_title: 'compress Post-Processor'
...
# Compress Post-Processor # Compress Post-Processor
...@@ -16,49 +17,55 @@ VMware or VirtualBox) and compresses the artifact into a single archive. ...@@ -16,49 +17,55 @@ VMware or VirtualBox) and compresses the artifact into a single archive.
### Required: ### Required:
You must specify the output filename. The archive format is derived from the filename. You must specify the output filename. The archive format is derived from the
filename.
* `output` (string) - The path to save the compressed archive. The archive - `output` (string) - The path to save the compressed archive. The archive
format is inferred from the filename. E.g. `.tar.gz` will be a gzipped format is inferred from the filename. E.g. `.tar.gz` will be a
tarball. `.zip` will be a zip file. If the extension can't be detected packer gzipped tarball. `.zip` will be a zip file. If the extension can't be
defaults to `.tar.gz` behavior but will not change the filename. detected packer defaults to `.tar.gz` behavior but will not change
the filename.
If you are executing multiple builders in parallel you should make sure If you are executing multiple builders in parallel you should make sure `output`
`output` is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`. is unique for each one. For example `packer_{{.BuildName}}_{{.Provider}}.zip`.
### Optional: ### Optional:
If you want more control over how the archive is created you can specify the following settings: If you want more control over how the archive is created you can specify the
following settings:
* `compression_level` (integer) - Specify the compression level, for algorithms - `compression_level` (integer) - Specify the compression level, for
that support it, from 1 through 9 inclusive. Typically higher compression algorithms that support it, from 1 through 9 inclusive. Typically higher
levels take longer but produce smaller files. Defaults to `6` compression levels take longer but produce smaller files. Defaults to `6`
* `keep_input_artifact` (boolean) - Keep source files; defaults to `false` - `keep_input_artifact` (boolean) - Keep source files; defaults to `false`
### Supported Formats ### Supported Formats
Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and `.tar.lz4`. Note that `.gz` and `.lz4` will fail if you have multiple files to compress. Supported file extensions include `.zip`, `.tar`, `.gz`, `.tar.gz`, `.lz4` and
`.tar.lz4`. Note that `.gz` and `.lz4` will fail if you have multiple files to
compress.
## Examples ## Examples
Some minimal examples are shown below, showing only the post-processor configuration: Some minimal examples are shown below, showing only the post-processor
configuration:
```json ``` {.json}
{ {
"type": "compress", "type": "compress",
"output": "archive.tar.lz4" "output": "archive.tar.lz4"
} }
``` ```
```json ``` {.json}
{ {
"type": "compress", "type": "compress",
"output": "archive.zip" "output": "archive.zip"
} }
``` ```
```json ``` {.json}
{ {
"type": "compress", "type": "compress",
"output": "archive.gz", "output": "archive.gz",
......
--- ---
layout: "docs" description: |
page_title: "docker-import Post-Processor" The Packer Docker import post-processor takes an artifact from the docker
description: |- builder and imports it with Docker locally. This allows you to apply a
The Packer Docker import post-processor takes an artifact from the docker builder and imports it with Docker locally. This allows you to apply a repository and tag to the image and lets you use the other Docker post-processors such as docker-push to push the image to a registry. repository and tag to the image and lets you use the other Docker
--- post-processors such as docker-push to push the image to a registry.
layout: docs
page_title: 'docker-import Post-Processor'
...
# Docker Import Post-Processor # Docker Import Post-Processor
Type: `docker-import` Type: `docker-import`
The Packer Docker import post-processor takes an artifact from the The Packer Docker import post-processor takes an artifact from the [docker
[docker builder](/docs/builders/docker.html) and imports it with Docker builder](/docs/builders/docker.html) and imports it with Docker locally. This
locally. This allows you to apply a repository and tag to the image allows you to apply a repository and tag to the image and lets you use the other
and lets you use the other Docker post-processors such as Docker post-processors such as
[docker-push](/docs/post-processors/docker-push.html) to push the image [docker-push](/docs/post-processors/docker-push.html) to push the image to a
to a registry. registry.
## Configuration ## Configuration
The configuration for this post-processor is extremely simple. At least The configuration for this post-processor is extremely simple. At least a
a repository is required. repository is required.
* `repository` (string) - The repository of the imported image. - `repository` (string) - The repository of the imported image.
* `tag` (string) - The tag for the imported image. By default this is not - `tag` (string) - The tag for the imported image. By default this is not set.
set.
## Example ## Example
An example is shown below, showing only the post-processor configuration: An example is shown below, showing only the post-processor configuration:
```javascript ``` {.javascript}
{ {
"type": "docker-import", "type": "docker-import",
"repository": "mitchellh/packer", "repository": "mitchellh/packer",
...@@ -38,9 +40,9 @@ An example is shown below, showing only the post-processor configuration: ...@@ -38,9 +40,9 @@ An example is shown below, showing only the post-processor configuration:
} }
``` ```
This example would take the image created by the Docker builder This example would take the image created by the Docker builder and import it
and import it into the local Docker process with a name of `mitchellh/packer:0.7`. into the local Docker process with a name of `mitchellh/packer:0.7`.
Following this, you can use the Following this, you can use the
[docker-push](/docs/post-processors/docker-push.html) [docker-push](/docs/post-processors/docker-push.html) post-processor to push it
post-processor to push it to a registry, if you want. to a registry, if you want.
--- ---
layout: "docs" description: |
page_title: "Docker Push Post-Processor" The Packer Docker push post-processor takes an artifact from the docker-import
description: |- post-processor and pushes it to a Docker registry.
The Packer Docker push post-processor takes an artifact from the docker-import post-processor and pushes it to a Docker registry. layout: docs
--- page_title: 'Docker Push Post-Processor'
...
# Docker Push Post-Processor # Docker Push Post-Processor
Type: `docker-push` Type: `docker-push`
The Packer Docker push post-processor takes an artifact from the The Packer Docker push post-processor takes an artifact from the
[docker-import](/docs/post-processors/docker-import.html) post-processor [docker-import](/docs/post-processors/docker-import.html) post-processor and
and pushes it to a Docker registry. pushes it to a Docker registry.
## Configuration ## Configuration
This post-processor has only optional configuration: This post-processor has only optional configuration:
* `login` (boolean) - Defaults to false. If true, the post-processor will - `login` (boolean) - Defaults to false. If true, the post-processor will
login prior to pushing. login prior to pushing.
* `login_email` (string) - The email to use to authenticate to login. - `login_email` (string) - The email to use to authenticate to login.
* `login_username` (string) - The username to use to authenticate to login. - `login_username` (string) - The username to use to authenticate to login.
* `login_password` (string) - The password to use to authenticate to login. - `login_password` (string) - The password to use to authenticate to login.
* `login_server` (string) - The server address to login to. - `login_server` (string) - The server address to login to.
-> **Note:** If you login using the credentials above, the -&gt; **Note:** If you login using the credentials above, the post-processor
post-processor will automatically log you out afterwards (just the server will automatically log you out afterwards (just the server specified).
specified).
## Example ## Example
For an example of using docker-push, see the section on using For an example of using docker-push, see the section on using generated
generated artifacts from the [docker builder](/docs/builders/docker.html). artifacts from the [docker builder](/docs/builders/docker.html).
--- ---
layout: "docs" description: |
page_title: "docker-save Post-Processor" The Packer Docker Save post-processor takes an artifact from the docker builder
description: |- that was committed and saves it to a file. This is similar to exporting the
The Packer Docker Save post-processor takes an artifact from the docker builder that was committed and saves it to a file. This is similar to exporting the Docker image directly from the builder, except that it preserves the hierarchy of images and metadata. Docker image directly from the builder, except that it preserves the hierarchy
--- of images and metadata.
layout: docs
page_title: 'docker-save Post-Processor'
...
# Docker Save Post-Processor # Docker Save Post-Processor
Type: `docker-save` Type: `docker-save`
The Packer Docker Save post-processor takes an artifact from the The Packer Docker Save post-processor takes an artifact from the [docker
[docker builder](/docs/builders/docker.html) that was committed builder](/docs/builders/docker.html) that was committed and saves it to a file.
and saves it to a file. This is similar to exporting the Docker image This is similar to exporting the Docker image directly from the builder, except
directly from the builder, except that it preserves the hierarchy of that it preserves the hierarchy of images and metadata.
images and metadata.
We understand the terminology can be a bit confusing, but we've We understand the terminology can be a bit confusing, but we've adopted the
adopted the terminology from Docker, so if you're familiar with that, then terminology from Docker, so if you're familiar with that, then you'll be
you'll be familiar with this and vice versa. familiar with this and vice versa.
## Configuration ## Configuration
The configuration for this post-processor is extremely simple. The configuration for this post-processor is extremely simple.
* `path` (string) - The path to save the image. - `path` (string) - The path to save the image.
## Example ## Example
An example is shown below, showing only the post-processor configuration: An example is shown below, showing only the post-processor configuration:
```javascript ``` {.javascript}
{ {
"type": "docker-save", "type": "docker-save",
"path": "foo.tar" "path": "foo.tar"
......
--- ---
layout: "docs" description: |
page_title: "docker-tag Post-Processor" The Packer Docker Tag post-processor takes an artifact from the docker builder
description: |- that was committed and tags it into a repository. This allows you to use the
The Packer Docker Tag post-processor takes an artifact from the docker builder that was committed and tags it into a repository. This allows you to use the other Docker post-processors such as docker-push to push the image to a registry. other Docker post-processors such as docker-push to push the image to a
--- registry.
layout: docs
page_title: 'docker-tag Post-Processor'
...
# Docker Tag Post-Processor # Docker Tag Post-Processor
Type: `docker-tag` Type: `docker-tag`
The Packer Docker Tag post-processor takes an artifact from the The Packer Docker Tag post-processor takes an artifact from the [docker
[docker builder](/docs/builders/docker.html) that was committed builder](/docs/builders/docker.html) that was committed and tags it into a
and tags it into a repository. This allows you to use the other repository. This allows you to use the other Docker post-processors such as
Docker post-processors such as [docker-push](/docs/post-processors/docker-push.html) to push the image to a
[docker-push](/docs/post-processors/docker-push.html) to push the image registry.
to a registry.
This is very similar to the [docker-import](/docs/post-processors/docker-import.html) This is very similar to the
post-processor except that this works with committed resources, rather [docker-import](/docs/post-processors/docker-import.html) post-processor except
than exported. that this works with committed resources, rather than exported.
## Configuration ## Configuration
The configuration for this post-processor is extremely simple. At least The configuration for this post-processor is extremely simple. At least a
a repository is required. repository is required.
* `repository` (string) - The repository of the image. - `repository` (string) - The repository of the image.
* `tag` (string) - The tag for the image. By default this is not - `tag` (string) - The tag for the image. By default this is not set.
set.
* `force` (boolean) - If true, this post-processor forcibly tag the image - `force` (boolean) - If true, this post-processor forcibly tag the image even
even if tag name is collided. Default to `false`. if tag name is collided. Default to `false`.
## Example ## Example
An example is shown below, showing only the post-processor configuration: An example is shown below, showing only the post-processor configuration:
```javascript ``` {.javascript}
{ {
"type": "docker-tag", "type": "docker-tag",
"repository": "mitchellh/packer", "repository": "mitchellh/packer",
...@@ -45,9 +46,9 @@ An example is shown below, showing only the post-processor configuration: ...@@ -45,9 +46,9 @@ An example is shown below, showing only the post-processor configuration:
} }
``` ```
This example would take the image created by the Docker builder This example would take the image created by the Docker builder and tag it into
and tag it into the local Docker process with a name of `mitchellh/packer:0.7`. the local Docker process with a name of `mitchellh/packer:0.7`.
Following this, you can use the Following this, you can use the
[docker-push](/docs/post-processors/docker-push.html) [docker-push](/docs/post-processors/docker-push.html) post-processor to push it
post-processor to push it to a registry, if you want. to a registry, if you want.
--- ---
layout: "docs" description: |
page_title: "Vagrant Cloud Post-Processor" The Packer Vagrant Cloud post-processor receives a Vagrant box from the
description: |- `vagrant` post-processor and pushes it to Vagrant Cloud. Vagrant Cloud hosts and
The Packer Vagrant Cloud post-processor receives a Vagrant box from the `vagrant` post-processor and pushes it to Vagrant Cloud. Vagrant Cloud hosts and serves boxes to Vagrant, allowing you to version and distribute boxes to an organization in a simple way. serves boxes to Vagrant, allowing you to version and distribute boxes to an
--- organization in a simple way.
layout: docs
page_title: 'Vagrant Cloud Post-Processor'
...
# Vagrant Cloud Post-Processor # Vagrant Cloud Post-Processor
~> Vagrant Cloud has been superseded by Atlas. Please use the [Atlas post-processor](/docs/post-processors/atlas.html) instead. Learn more about [Atlas](https://atlas.hashicorp.com/). \~&gt; Vagrant Cloud has been superseded by Atlas. Please use the [Atlas
post-processor](/docs/post-processors/atlas.html) instead. Learn more about
[Atlas](https://atlas.hashicorp.com/).
Type: `vagrant-cloud` Type: `vagrant-cloud`
The Packer Vagrant Cloud post-processor receives a Vagrant box from the `vagrant` The Packer Vagrant Cloud post-processor receives a Vagrant box from the
post-processor and pushes it to Vagrant Cloud. [Vagrant Cloud](https://vagrantcloud.com) `vagrant` post-processor and pushes it to Vagrant Cloud. [Vagrant
hosts and serves boxes to Vagrant, allowing you to version and distribute Cloud](https://vagrantcloud.com) hosts and serves boxes to Vagrant, allowing you
boxes to an organization in a simple way. to version and distribute boxes to an organization in a simple way.
You'll need to be familiar with Vagrant Cloud, have an upgraded account You'll need to be familiar with Vagrant Cloud, have an upgraded account to
to enable box hosting, and be distributing your box via the [shorthand name](http://docs.vagrantup.com/v2/cli/box.html) enable box hosting, and be distributing your box via the [shorthand
configuration. name](http://docs.vagrantup.com/v2/cli/box.html) configuration.
## Workflow ## Workflow
It's important to understand the workflow that using this post-processor It's important to understand the workflow that using this post-processor
enforces in order to take full advantage of Vagrant and Vagrant Cloud. enforces in order to take full advantage of Vagrant and Vagrant Cloud.
The use of this processor assume that you currently distribute, or plan The use of this processor assume that you currently distribute, or plan to
to distribute, boxes via Vagrant Cloud. It also assumes you create Vagrant distribute, boxes via Vagrant Cloud. It also assumes you create Vagrant Boxes
Boxes and deliver them to your team in some fashion. and deliver them to your team in some fashion.
Here is an example workflow: Here is an example workflow:
1. You use Packer to build a Vagrant Box for the `virtualbox` provider 1. You use Packer to build a Vagrant Box for the `virtualbox` provider
2. The `vagrant-cloud` post-processor is configured to point to the box `hashicorp/foobar` on Vagrant Cloud 2. The `vagrant-cloud` post-processor is configured to point to the box
via the `box_tag` configuration `hashicorp/foobar` on Vagrant Cloud via the `box_tag` configuration
2. The post-processor receives the box from the `vagrant` post-processor 3. The post-processor receives the box from the `vagrant` post-processor
3. It then creates the configured version, or verifies the existence of it, on Vagrant Cloud 4. It then creates the configured version, or verifies the existence of it, on
4. A provider matching the name of the Vagrant provider is then created Vagrant Cloud
5. The box is uploaded to Vagrant Cloud 5. A provider matching the name of the Vagrant provider is then created
6. The upload is verified 6. The box is uploaded to Vagrant Cloud
7. The version is released and available to users of the box 7. The upload is verified
8. The version is released and available to users of the box
## Configuration ## Configuration
The configuration allows you to specify the target box that you have The configuration allows you to specify the target box that you have access to
access to on Vagrant Cloud, as well as authentication and version information. on Vagrant Cloud, as well as authentication and version information.
### Required: ### Required:
* `access_token` (string) - Your access token for the Vagrant Cloud API. - `access_token` (string) - Your access token for the Vagrant Cloud API. This
This can be generated on your [tokens page](https://vagrantcloud.com/account/tokens). can be generated on your [tokens
page](https://vagrantcloud.com/account/tokens).
* `box_tag` (string) - The shorthand tag for your box that maps to
Vagrant Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64`
* `version` (string) - The version number, typically incrementing a previous version. - `box_tag` (string) - The shorthand tag for your box that maps to Vagrant
The version string is validated based on [Semantic Versioning](http://semver.org/). The string must match Cloud, i.e `hashicorp/precise64` for `vagrantcloud.com/hashicorp/precise64`
a pattern that could be semver, and doesn't validate that the version comes after
your previous versions.
- `version` (string) - The version number, typically incrementing a
previous version. The version string is validated based on [Semantic
Versioning](http://semver.org/). The string must match a pattern that could
be semver, and doesn't validate that the version comes after your
previous versions.
### Optional: ### Optional:
* `no_release` (string) - If set to true, does not release the version - `no_release` (string) - If set to true, does not release the version on
on Vagrant Cloud, making it active. You can manually release the version Vagrant Cloud, making it active. You can manually release the version via
via the API or Web UI. Defaults to false. the API or Web UI. Defaults to false.
* `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This - `vagrant_cloud_url` (string) - Override the base URL for Vagrant Cloud. This
is useful if you're using Vagrant Private Cloud in your own network. Defaults is useful if you're using Vagrant Private Cloud in your own network.
to `https://vagrantcloud.com/api/v1` Defaults to `https://vagrantcloud.com/api/v1`
* `version_description` (string) - Optionally markdown text used as a full-length - `version_description` (string) - Optionally markdown text used as a
and in-depth description of the version, typically for denoting changes introduced full-length and in-depth description of the version, typically for denoting
changes introduced
* `box_download_url` (string) - Optional URL for a self-hosted box. If this is set - `box_download_url` (string) - Optional URL for a self-hosted box. If this is
the box will not be uploaded to the Vagrant Cloud. set the box will not be uploaded to the Vagrant Cloud.
## Use with Vagrant Post-Processor ## Use with Vagrant Post-Processor
...@@ -84,7 +91,7 @@ An example configuration is below. Note the use of a doubly-nested array, which ...@@ -84,7 +91,7 @@ An example configuration is below. Note the use of a doubly-nested array, which
ensures that the Vagrant Cloud post-processor is run after the Vagrant ensures that the Vagrant Cloud post-processor is run after the Vagrant
post-processor. post-processor.
```javascript ``` {.javascript}
{ {
"variables": { "variables": {
"version": "", "version": "",
......
--- ---
layout: "docs" description: |
page_title: "Vagrant Post-Processor" The Packer Vagrant post-processor takes a build and converts the artifact into a
description: |- valid Vagrant box, if it can. This lets you use Packer to automatically create
The Packer Vagrant post-processor takes a build and converts the artifact into a valid Vagrant box, if it can. This lets you use Packer to automatically create arbitrarily complex Vagrant boxes, and is in fact how the official boxes distributed by Vagrant are created. arbitrarily complex Vagrant boxes, and is in fact how the official boxes
--- distributed by Vagrant are created.
layout: docs
page_title: 'Vagrant Post-Processor'
...
# Vagrant Post-Processor # Vagrant Post-Processor
Type: `vagrant` Type: `vagrant`
The Packer Vagrant post-processor takes a build and converts the artifact The Packer Vagrant post-processor takes a build and converts the artifact into a
into a valid [Vagrant](http://www.vagrantup.com) box, if it can. valid [Vagrant](http://www.vagrantup.com) box, if it can. This lets you use
This lets you use Packer to automatically create arbitrarily complex Packer to automatically create arbitrarily complex Vagrant boxes, and is in fact
Vagrant boxes, and is in fact how the official boxes distributed by how the official boxes distributed by Vagrant are created.
Vagrant are created.
If you've never used a post-processor before, please read the If you've never used a post-processor before, please read the documentation on
documentation on [using post-processors](/docs/templates/post-processors.html) [using post-processors](/docs/templates/post-processors.html) in templates. This
in templates. This knowledge will be expected for the remainder of knowledge will be expected for the remainder of this document.
this document.
Because Vagrant boxes are [provider-specific](http://docs.vagrantup.com/v2/boxes/format.html), Because Vagrant boxes are
the Vagrant post-processor is hardcoded to understand how to convert [provider-specific](http://docs.vagrantup.com/v2/boxes/format.html), the Vagrant
the artifacts of certain builders into proper boxes for their post-processor is hardcoded to understand how to convert the artifacts of
respective providers. certain builders into proper boxes for their respective providers.
Currently, the Vagrant post-processor can create boxes for the following Currently, the Vagrant post-processor can create boxes for the following
providers. providers.
* AWS - AWS
* DigitalOcean - DigitalOcean
* Hyper-V - Hyper-V
* Parallels - Parallels
* QEMU - QEMU
* VirtualBox - VirtualBox
* VMware - VMware
-> **Support for additional providers** is planned. If the -&gt; **Support for additional providers** is planned. If the Vagrant
Vagrant post-processor doesn't support creating boxes for a provider you post-processor doesn't support creating boxes for a provider you care about,
care about, please help by contributing to Packer and adding support for it. please help by contributing to Packer and adding support for it.
## Configuration ## Configuration
The simplest way to use the post-processor is to just enable it. No The simplest way to use the post-processor is to just enable it. No
configuration is required by default. This will mostly do what you expect configuration is required by default. This will mostly do what you expect and
and will build functioning boxes for many of the built-in builders of will build functioning boxes for many of the built-in builders of Packer.
Packer.
However, if you want to configure things a bit more, the post-processor does
However, if you want to configure things a bit more, the post-processor expose some configuration options. The available options are listed below, with
does expose some configuration options. The available options are listed more details about certain options in following sections.
below, with more details about certain options in following sections.
- `compression_level` (integer) - An integer representing the compression
* `compression_level` (integer) - An integer representing the level to use when creating the Vagrant box. Valid values range from 0 to 9,
compression level to use when creating the Vagrant box. Valid with 0 being no compression and 9 being the best compression. By default,
values range from 0 to 9, with 0 being no compression and 9 being compression is enabled at level 6.
the best compression. By default, compression is enabled at level 6.
- `include` (array of strings) - Paths to files to include in the Vagrant box.
* `include` (array of strings) - Paths to files to include in the These files will each be copied into the top level directory of the Vagrant
Vagrant box. These files will each be copied into the top level directory box (regardless of their paths). They can then be used from the Vagrantfile.
of the Vagrant box (regardless of their paths). They can then be used
from the Vagrantfile. - `keep_input_artifact` (boolean) - If set to true, do not delete the
`output_directory` on a successful build. Defaults to false.
* `keep_input_artifact` (boolean) - If set to true, do not delete the
`output_directory` on a successful build. Defaults to false. - `output` (string) - The full path to the box file that will be created by
this post-processor. This is a [configuration
* `output` (string) - The full path to the box file that will be created template](/docs/templates/configuration-templates.html). The variable
by this post-processor. This is a `Provider` is replaced by the Vagrant provider the box is for. The variable
[configuration template](/docs/templates/configuration-templates.html). `ArtifactId` is replaced by the ID of the input artifact. The variable
The variable `Provider` is replaced by the Vagrant provider the box is for. `BuildName` is replaced with the name of the build. By default, the value of
The variable `ArtifactId` is replaced by the ID of the input artifact. this config is `packer_{{.BuildName}}_{{.Provider}}.box`.
The variable `BuildName` is replaced with the name of the build.
By default, the value of this config is `packer_{{.BuildName}}_{{.Provider}}.box`. - `vagrantfile_template` (string) - Path to a template to use for the
Vagrantfile that is packaged with the box.
* `vagrantfile_template` (string) - Path to a template to use for the
Vagrantfile that is packaged with the box.
## Provider-Specific Overrides ## Provider-Specific Overrides
If you have a Packer template with multiple builder types within it, If you have a Packer template with multiple builder types within it, you may
you may want to configure the box creation for each type a little differently. want to configure the box creation for each type a little differently. For
For example, the contents of the Vagrantfile for a Vagrant box for AWS might example, the contents of the Vagrantfile for a Vagrant box for AWS might be
be different from the contents of the Vagrantfile you want for VMware. different from the contents of the Vagrantfile you want for VMware. The
The post-processor lets you do this. post-processor lets you do this.
Specify overrides within the `override` configuration by provider name: Specify overrides within the `override` configuration by provider name:
```javascript ``` {.javascript}
{ {
"type": "vagrant", "type": "vagrant",
"compression_level": 1, "compression_level": 1,
...@@ -97,18 +96,18 @@ Specify overrides within the `override` configuration by provider name: ...@@ -97,18 +96,18 @@ Specify overrides within the `override` configuration by provider name:
} }
``` ```
In the example above, the compression level will be set to 1 except for In the example above, the compression level will be set to 1 except for VMware,
VMware, where it will be set to 0. where it will be set to 0.
The available provider names are: `aws`, `digitalocean`, `virtualbox`, The available provider names are: `aws`, `digitalocean`, `virtualbox`, `vmware`,
`vmware`, and `parallels`. and `parallels`.
## Input Artifacts ## Input Artifacts
By default, Packer will delete the original input artifact, assuming By default, Packer will delete the original input artifact, assuming you only
you only want the final Vagrant box as the result. If you wish to keep the want the final Vagrant box as the result. If you wish to keep the input artifact
input artifact (the raw virtual machine, for example), then you must (the raw virtual machine, for example), then you must configure Packer to keep
configure Packer to keep it. it.
Please see the [documentation on input artifacts](/docs/templates/post-processors.html#toc_2) Please see the [documentation on input
for more information. artifacts](/docs/templates/post-processors.html#toc_2) for more information.
--- ---
layout: "docs" description: |
page_title: "vSphere Post-Processor" The Packer vSphere post-processor takes an artifact from the VMware builder and
description: |- uploads it to a vSphere endpoint.
The Packer vSphere post-processor takes an artifact from the VMware builder and uploads it to a vSphere endpoint. layout: docs
--- page_title: 'vSphere Post-Processor'
...
# vSphere Post-Processor # vSphere Post-Processor
Type: `vsphere` Type: `vsphere`
The Packer vSphere post-processor takes an artifact from the VMware builder The Packer vSphere post-processor takes an artifact from the VMware builder and
and uploads it to a vSphere endpoint. uploads it to a vSphere endpoint.
## Configuration ## Configuration
...@@ -20,37 +21,36 @@ each category, the available configuration keys are alphabetized. ...@@ -20,37 +21,36 @@ each category, the available configuration keys are alphabetized.
Required: Required:
* `cluster` (string) - The cluster to upload the VM to. - `cluster` (string) - The cluster to upload the VM to.
* `datacenter` (string) - The name of the datacenter within vSphere to - `datacenter` (string) - The name of the datacenter within vSphere to add the
add the VM to. VM to.
* `datastore` (string) - The name of the datastore to store this VM. - `datastore` (string) - The name of the datastore to store this VM. This is
This is _not required_ if `resource_pool` is specified. *not required* if `resource_pool` is specified.
* `host` (string) - The vSphere host that will be contacted to perform - `host` (string) - The vSphere host that will be contacted to perform the
the VM upload. VM upload.
* `password` (string) - Password to use to authenticate to the vSphere - `password` (string) - Password to use to authenticate to the
endpoint. vSphere endpoint.
* `resource_pool` (string) - The resource pool to upload the VM to. - `resource_pool` (string) - The resource pool to upload the VM to. This is
This is _not required_. *not required*.
* `username` (string) - The username to use to authenticate to the vSphere - `username` (string) - The username to use to authenticate to the
endpoint. vSphere endpoint.
* `vm_name` (string) - The name of the VM once it is uploaded. - `vm_name` (string) - The name of the VM once it is uploaded.
Optional: Optional:
* `disk_mode` (string) - Target disk format. See `ovftool` manual for - `disk_mode` (string) - Target disk format. See `ovftool` manual for
available options. By default, "thick" will be used. available options. By default, "thick" will be used.
* `insecure` (boolean) - Whether or not the connection to vSphere can be done - `insecure` (boolean) - Whether or not the connection to vSphere can be done
over an insecure connection. By default this is false. over an insecure connection. By default this is false.
* `vm_folder` (string) - The folder within the datastore to store the VM. - `vm_folder` (string) - The folder within the datastore to store the VM.
* `vm_network` (string) - The name of the VM network this VM will be - `vm_network` (string) - The name of the VM network this VM will be added to.
added to.
--- ---
layout: "docs" description: |
page_title: "Ansible (Local) Provisioner" The `ansible-local` Packer provisioner configures Ansible to run on the machine
description: |- by Packer from local Playbook and Role files. Playbooks and Roles can be
The `ansible-local` Packer provisioner configures Ansible to run on the machine by Packer from local Playbook and Role files. Playbooks and Roles can be uploaded from your local machine to the remote machine. Ansible is run in local mode via the `ansible-playbook` command. uploaded from your local machine to the remote machine. Ansible is run in local
--- mode via the `ansible-playbook` command.
layout: docs
page_title: 'Ansible (Local) Provisioner'
...
# Ansible Local Provisioner # Ansible Local Provisioner
Type: `ansible-local` Type: `ansible-local`
The `ansible-local` Packer provisioner configures Ansible to run on the machine by The `ansible-local` Packer provisioner configures Ansible to run on the machine
Packer from local Playbook and Role files. Playbooks and Roles can be uploaded by Packer from local Playbook and Role files. Playbooks and Roles can be
from your local machine to the remote machine. Ansible is run in [local mode](http://docs.ansible.com/playbooks_delegation.html#local-playbooks) via the `ansible-playbook` command. uploaded from your local machine to the remote machine. Ansible is run in [local
mode](http://docs.ansible.com/playbooks_delegation.html#local-playbooks) via the
`ansible-playbook` command.
## Basic Example ## Basic Example
The example below is fully functional. The example below is fully functional.
```javascript ``` {.javascript}
{ {
"type": "ansible-local", "type": "ansible-local",
"playbook_file": "local.yml" "playbook_file": "local.yml"
...@@ -30,81 +35,70 @@ The reference of available configuration options is listed below. ...@@ -30,81 +35,70 @@ The reference of available configuration options is listed below.
Required: Required:
* `playbook_file` (string) - The playbook file to be executed by ansible. - `playbook_file` (string) - The playbook file to be executed by ansible. This
This file must exist on your local system and will be uploaded to the file must exist on your local system and will be uploaded to the
remote machine. remote machine.
Optional: Optional:
* `command` (string) - The command to invoke ansible. Defaults to "ansible-playbook". - `command` (string) - The command to invoke ansible. Defaults
to "ansible-playbook".
* `extra_arguments` (array of strings) - An array of extra arguments to pass to the - `extra_arguments` (array of strings) - An array of extra arguments to pass
ansible command. By default, this is empty. to the ansible command. By default, this is empty.
* `inventory_groups` (string) - A comma-separated list of groups to which - `inventory_groups` (string) - A comma-separated list of groups to which
packer will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2` packer will assign the host `127.0.0.1`. A value of `my_group_1,my_group_2`
will generate an Ansible inventory like: will generate an Ansible inventory like:
```text `{.text} [my_group_1] 127.0.0.1 [my_group_2] 127.0.0.1`
[my_group_1]
127.0.0.1
[my_group_2]
127.0.0.1
```
* `inventory_file` (string) - The inventory file to be used by ansible. - `inventory_file` (string) - The inventory file to be used by ansible. This
This file must exist on your local system and will be uploaded to the file must exist on your local system and will be uploaded to the
remote machine. remote machine.
When using an inventory file, it's also required to `--limit` the hosts to When using an inventory file, it's also required to `--limit` the hosts to the
the specified host you're buiding. The `--limit` argument can be provided in specified host you're buiding. The `--limit` argument can be provided in the
the `extra_arguments` option. `extra_arguments` option.
An example inventory file may look like: An example inventory file may look like:
```text \`\`\` {.text} \[chi-dbservers\] db-01 ansible\_connection=local db-02
[chi-dbservers] ansible\_connection=local
db-01 ansible_connection=local
db-02 ansible_connection=local
[chi-appservers] \[chi-appservers\] app-01 ansible\_connection=local app-02
app-01 ansible_connection=local ansible\_connection=local
app-02 ansible_connection=local
[chi:children] \[chi:children\] chi-dbservers chi-appservers
chi-dbservers
chi-appservers
[dbservers:children] \[dbservers:children\] chi-dbservers
chi-dbservers
[appservers:children] \[appservers:children\] chi-appservers \`\`\`
chi-appservers
```
* `playbook_dir` (string) - a path to the complete ansible directory - `playbook_dir` (string) - a path to the complete ansible directory structure
structure on your local system to be copied to the remote machine on your local system to be copied to the remote machine as the
as the `staging_directory` before all other files and directories. `staging_directory` before all other files and directories.
* `playbook_paths` (array of strings) - An array of paths to playbook files on - `playbook_paths` (array of strings) - An array of paths to playbook files on
your local system. These will be uploaded to the remote machine under your local system. These will be uploaded to the remote machine under
`staging_directory`/playbooks. By default, this is empty. `staging_directory`/playbooks. By default, this is empty.
* `group_vars` (string) - a path to the directory containing ansible - `group_vars` (string) - a path to the directory containing ansible group
group variables on your local system to be copied to the variables on your local system to be copied to the remote machine. By
remote machine. By default, this is empty. default, this is empty.
* `host_vars` (string) - a path to the directory containing ansible - `host_vars` (string) - a path to the directory containing ansible host
host variables on your local system to be copied to the variables on your local system to be copied to the remote machine. By
remote machine. By default, this is empty. default, this is empty.
* `role_paths` (array of strings) - An array of paths to role directories on - `role_paths` (array of strings) - An array of paths to role directories on
your local system. These will be uploaded to the remote machine under your local system. These will be uploaded to the remote machine under
`staging_directory`/roles. By default, this is empty. `staging_directory`/roles. By default, this is empty.
* `staging_directory` (string) - The directory where all the configuration of - `staging_directory` (string) - The directory where all the configuration of
Ansible by Packer will be placed. By default this is "/tmp/packer-provisioner-ansible-local". Ansible by Packer will be placed. By default this
This directory doesn't need to exist but must have proper permissions so that is "/tmp/packer-provisioner-ansible-local". This directory doesn't need to
the SSH user that Packer uses is able to create directories and write into exist but must have proper permissions so that the SSH user that Packer uses
this folder. If the permissions are not correct, use a shell provisioner prior is able to create directories and write into this folder. If the permissions
to this to configure it properly. are not correct, use a shell provisioner prior to this to configure
it properly.
--- ---
layout: "docs" description: |
page_title: "Chef-Client Provisioner" The Chef Client Packer provisioner installs and configures software on machines
description: |- built by Packer using chef-client. Packer configures a Chef client to talk to a
The Chef Client Packer provisioner installs and configures software on machines built by Packer using chef-client. Packer configures a Chef client to talk to a remote Chef Server to provision the machine. remote Chef Server to provision the machine.
--- layout: docs
page_title: 'Chef-Client Provisioner'
...
# Chef Client Provisioner # Chef Client Provisioner
Type: `chef-client` Type: `chef-client`
The Chef Client Packer provisioner installs and configures software on machines built The Chef Client Packer provisioner installs and configures software on machines
by Packer using [chef-client](http://docs.opscode.com/chef_client.html). built by Packer using [chef-client](http://docs.opscode.com/chef_client.html).
Packer configures a Chef client to talk to a remote Chef Server to Packer configures a Chef client to talk to a remote Chef Server to provision the
provision the machine. machine.
The provisioner will even install Chef onto your machine if it isn't already The provisioner will even install Chef onto your machine if it isn't already
installed, using the official Chef installers provided by Opscode. installed, using the official Chef installers provided by Opscode.
## Basic Example ## Basic Example
The example below is fully functional. It will install Chef onto the The example below is fully functional. It will install Chef onto the remote
remote machine and run Chef client. machine and run Chef client.
```javascript ``` {.javascript}
{ {
"type": "chef-client", "type": "chef-client",
"server_url": "https://mychefserver.com/" "server_url": "https://mychefserver.com/"
} }
``` ```
Note: to properly clean up the Chef node and client the machine on which Note: to properly clean up the Chef node and client the machine on which packer
packer is running must have knife on the path and configured globally, is running must have knife on the path and configured globally, i.e,
i.e, ~/.chef/knife.rb must be present and configured for the target chef server \~/.chef/knife.rb must be present and configured for the target chef server
## Configuration Reference ## Configuration Reference
The reference of available configuration options is listed below. No The reference of available configuration options is listed below. No
configuration is actually required. configuration is actually required.
* `chef_environment` (string) - The name of the chef_environment sent to the - `chef_environment` (string) - The name of the chef\_environment sent to the
Chef server. By default this is empty and will not use an environment. Chef server. By default this is empty and will not use an environment.
* `config_template` (string) - Path to a template that will be used for - `config_template` (string) - Path to a template that will be used for the
the Chef configuration file. By default Packer only sets configuration Chef configuration file. By default Packer only sets configuration it needs
it needs to match the settings set in the provisioner configuration. If to match the settings set in the provisioner configuration. If you need to
you need to set configurations that the Packer provisioner doesn't support, set configurations that the Packer provisioner doesn't support, then you
then you should use a custom configuration template. See the dedicated should use a custom configuration template. See the dedicated "Chef
"Chef Configuration" section below for more details. Configuration" section below for more details.
* `execute_command` (string) - The command used to execute Chef. This has - `execute_command` (string) - The command used to execute Chef. This has
various [configuration template variables](/docs/templates/configuration-templates.html) various [configuration template
available. See below for more information. variables](/docs/templates/configuration-templates.html) available. See
below for more information.
* `install_command` (string) - The command used to install Chef. This has - `install_command` (string) - The command used to install Chef. This has
various [configuration template variables](/docs/templates/configuration-templates.html) various [configuration template
available. See below for more information. variables](/docs/templates/configuration-templates.html) available. See
below for more information.
* `json` (object) - An arbitrary mapping of JSON that will be available as - `json` (object) - An arbitrary mapping of JSON that will be available as
node attributes while running Chef. node attributes while running Chef.
* `node_name` (string) - The name of the node to register with the Chef - `node_name` (string) - The name of the node to register with the
Server. This is optional and by default is packer-{{uuid}}. Chef Server. This is optional and by default is packer-{{uuid}}.
* `prevent_sudo` (boolean) - By default, the configured commands that are - `prevent_sudo` (boolean) - By default, the configured commands that are
executed to install and run Chef are executed with `sudo`. If this is true, executed to install and run Chef are executed with `sudo`. If this is true,
then the sudo will be omitted. then the sudo will be omitted.
* `run_list` (array of strings) - The [run list](http://docs.opscode.com/essentials_node_object_run_lists.html) - `run_list` (array of strings) - The [run
for Chef. By default this is empty, and will use the run list sent list](http://docs.opscode.com/essentials_node_object_run_lists.html)
down by the Chef Server. for Chef. By default this is empty, and will use the run list sent down by
the Chef Server.
* `server_url` (string) - The URL to the Chef server. This is required. - `server_url` (string) - The URL to the Chef server. This is required.
* `skip_clean_client` (boolean) - If true, Packer won't remove the client - `skip_clean_client` (boolean) - If true, Packer won't remove the client from
from the Chef server after it is done running. By default, this is false. the Chef server after it is done running. By default, this is false.
* `skip_clean_node` (boolean) - If true, Packer won't remove the node - `skip_clean_node` (boolean) - If true, Packer won't remove the node from the
from the Chef server after it is done running. By default, this is false. Chef server after it is done running. By default, this is false.
* `skip_install` (boolean) - If true, Chef will not automatically be installed - `skip_install` (boolean) - If true, Chef will not automatically be installed
on the machine using the Opscode omnibus installers. on the machine using the Opscode omnibus installers.
* `staging_directory` (string) - This is the directory where all the configuration - `staging_directory` (string) - This is the directory where all the
of Chef by Packer will be placed. By default this is "/tmp/packer-chef-client". configuration of Chef by Packer will be placed. By default this
This directory doesn't need to exist but must have proper permissions so that is "/tmp/packer-chef-client". This directory doesn't need to exist but must
the SSH user that Packer uses is able to create directories and write into have proper permissions so that the SSH user that Packer uses is able to
this folder. If the permissions are not correct, use a shell provisioner create directories and write into this folder. If the permissions are not
prior to this to configure it properly. correct, use a shell provisioner prior to this to configure it properly.
* `client_key` (string) - Path to client key. If not set, this defaults to a file - `client_key` (string) - Path to client key. If not set, this defaults to a
named client.pem in `staging_directory`. file named client.pem in `staging_directory`.
* `validation_client_name` (string) - Name of the validation client. If - `validation_client_name` (string) - Name of the validation client. If not
not set, this won't be set in the configuration and the default that Chef set, this won't be set in the configuration and the default that Chef uses
uses will be used. will be used.
* `validation_key_path` (string) - Path to the validation key for communicating - `validation_key_path` (string) - Path to the validation key for
with the Chef Server. This will be uploaded to the remote machine. If this communicating with the Chef Server. This will be uploaded to the
is NOT set, then it is your responsibility via other means (shell provisioner, remote machine. If this is NOT set, then it is your responsibility via other
etc.) to get a validation key to where Chef expects it. means (shell provisioner, etc.) to get a validation key to where Chef
expects it.
## Chef Configuration ## Chef Configuration
By default, Packer uses a simple Chef configuration file in order to set By default, Packer uses a simple Chef configuration file in order to set the
the options specified for the provisioner. But Chef is a complex tool that options specified for the provisioner. But Chef is a complex tool that supports
supports many configuration options. Packer allows you to specify a custom many configuration options. Packer allows you to specify a custom configuration
configuration template if you'd like to set custom configurations. template if you'd like to set custom configurations.
The default value for the configuration template is: The default value for the configuration template is:
```liquid ``` {.liquid}
log_level :info log_level :info
log_location STDOUT log_location STDOUT
chef_server_url "{{.ServerUrl}}" chef_server_url "{{.ServerUrl}}"
...@@ -126,42 +132,42 @@ node_name "{{.NodeName}}" ...@@ -126,42 +132,42 @@ node_name "{{.NodeName}}"
{{end}} {{end}}
``` ```
This template is a [configuration template](/docs/templates/configuration-templates.html) This template is a [configuration
and has a set of variables available to use: template](/docs/templates/configuration-templates.html) and has a set of
variables available to use:
* `NodeName` - The node name set in the configuration. - `NodeName` - The node name set in the configuration.
* `ServerUrl` - The URL of the Chef Server set in the configuration. - `ServerUrl` - The URL of the Chef Server set in the configuration.
* `ValidationKeyPath` - Path to the validation key, if it is set. - `ValidationKeyPath` - Path to the validation key, if it is set.
## Execute Command ## Execute Command
By default, Packer uses the following command (broken across multiple lines By default, Packer uses the following command (broken across multiple lines for
for readability) to execute Chef: readability) to execute Chef:
```liquid ``` {.liquid}
{{if .Sudo}}sudo {{end}}chef-client \ {{if .Sudo}}sudo {{end}}chef-client \
--no-color \ --no-color \
-c {{.ConfigPath}} \ -c {{.ConfigPath}} \
-j {{.JsonPath}} -j {{.JsonPath}}
``` ```
This command can be customized using the `execute_command` configuration. This command can be customized using the `execute_command` configuration. As you
As you can see from the default value above, the value of this configuration can see from the default value above, the value of this configuration can
can contain various template variables, defined below: contain various template variables, defined below:
* `ConfigPath` - The path to the Chef configuration file. - `ConfigPath` - The path to the Chef configuration file. file.
file. - `JsonPath` - The path to the JSON attributes file for the node.
* `JsonPath` - The path to the JSON attributes file for the node. - `Sudo` - A boolean of whether to `sudo` the command or not, depending on the
* `Sudo` - A boolean of whether to `sudo` the command or not, depending on value of the `prevent_sudo` configuration.
the value of the `prevent_sudo` configuration.
## Install Command ## Install Command
By default, Packer uses the following command (broken across multiple lines By default, Packer uses the following command (broken across multiple lines for
for readability) to install Chef. This command can be customized if you want readability) to install Chef. This command can be customized if you want to
to install Chef in another way. install Chef in another way.
```text ``` {.text}
curl -L https://www.opscode.com/chef/install.sh | \ curl -L https://www.opscode.com/chef/install.sh | \
{{if .Sudo}}sudo{{end}} bash {{if .Sudo}}sudo{{end}} bash
``` ```
...@@ -170,9 +176,8 @@ This command can be customized using the `install_command` configuration. ...@@ -170,9 +176,8 @@ This command can be customized using the `install_command` configuration.
## Folder Permissions ## Folder Permissions
!> The `chef-client` provisioner will chmod the directory with your Chef !&gt; The `chef-client` provisioner will chmod the directory with your Chef keys
keys to 777. This is to ensure that Packer can upload and make use of that to 777. This is to ensure that Packer can upload and make use of that directory.
directory. However, once the machine is created, you usually don't However, once the machine is created, you usually don't want to keep these
want to keep these directories with those permissions. To change the directories with those permissions. To change the permissions on the
permissions on the directories, append a shell provisioner after Chef directories, append a shell provisioner after Chef to modify them.
to modify them.
--- ---
layout: "docs" description: |
page_title: "Chef-Solo Provisioner" The Chef solo Packer provisioner installs and configures software on machines
description: |- built by Packer using chef-solo. Cookbooks can be uploaded from your local
The Chef solo Packer provisioner installs and configures software on machines built by Packer using chef-solo. Cookbooks can be uploaded from your local machine to the remote machine or remote paths can be used. machine to the remote machine or remote paths can be used.
--- layout: docs
page_title: 'Chef-Solo Provisioner'
...
# Chef Solo Provisioner # Chef Solo Provisioner
Type: `chef-solo` Type: `chef-solo`
The Chef solo Packer provisioner installs and configures software on machines built The Chef solo Packer provisioner installs and configures software on machines
by Packer using [chef-solo](https://docs.chef.io/chef_solo.html). Cookbooks built by Packer using [chef-solo](https://docs.chef.io/chef_solo.html).
can be uploaded from your local machine to the remote machine or remote paths Cookbooks can be uploaded from your local machine to the remote machine or
can be used. remote paths can be used.
The provisioner will even install Chef onto your machine if it isn't already The provisioner will even install Chef onto your machine if it isn't already
installed, using the official Chef installers provided by Chef Inc. installed, using the official Chef installers provided by Chef Inc.
## Basic Example ## Basic Example
The example below is fully functional and expects cookbooks in the The example below is fully functional and expects cookbooks in the "cookbooks"
"cookbooks" directory relative to your working directory. directory relative to your working directory.
```javascript ``` {.javascript}
{ {
"type": "chef-solo", "type": "chef-solo",
"cookbook_paths": ["cookbooks"] "cookbook_paths": ["cookbooks"]
...@@ -34,124 +36,127 @@ The example below is fully functional and expects cookbooks in the ...@@ -34,124 +36,127 @@ The example below is fully functional and expects cookbooks in the
The reference of available configuration options is listed below. No The reference of available configuration options is listed below. No
configuration is actually required, but at least `run_list` is recommended. configuration is actually required, but at least `run_list` is recommended.
* `chef_environment` (string) - The name of the `chef_environment` sent to the - `chef_environment` (string) - The name of the `chef_environment` sent to the
Chef server. By default this is empty and will not use an environment Chef server. By default this is empty and will not use an environment
* `config_template` (string) - Path to a template that will be used for - `config_template` (string) - Path to a template that will be used for the
the Chef configuration file. By default Packer only sets configuration Chef configuration file. By default Packer only sets configuration it needs
it needs to match the settings set in the provisioner configuration. If to match the settings set in the provisioner configuration. If you need to
you need to set configurations that the Packer provisioner doesn't support, set configurations that the Packer provisioner doesn't support, then you
then you should use a custom configuration template. See the dedicated should use a custom configuration template. See the dedicated "Chef
"Chef Configuration" section below for more details. Configuration" section below for more details.
* `cookbook_paths` (array of strings) - This is an array of paths to - `cookbook_paths` (array of strings) - This is an array of paths to
"cookbooks" directories on your local filesystem. These will be uploaded "cookbooks" directories on your local filesystem. These will be uploaded to
to the remote machine in the directory specified by the `staging_directory`. the remote machine in the directory specified by the `staging_directory`. By
By default, this is empty. default, this is empty.
* `data_bags_path` (string) - The path to the "data\_bags" directory on your local filesystem. - `data_bags_path` (string) - The path to the "data\_bags" directory on your
These will be uploaded to the remote machine in the directory specified by the local filesystem. These will be uploaded to the remote machine in the
`staging_directory`. By default, this is empty. directory specified by the `staging_directory`. By default, this is empty.
* `encrypted_data_bag_secret_path` (string) - The path to the file containing - `encrypted_data_bag_secret_path` (string) - The path to the file containing
the secret for encrypted data bags. By default, this is empty, so no the secret for encrypted data bags. By default, this is empty, so no secret
secret will be available. will be available.
* `environments_path` (string) - The path to the "environments" directory on your local filesystem. - `environments_path` (string) - The path to the "environments" directory on
These will be uploaded to the remote machine in the directory specified by the your local filesystem. These will be uploaded to the remote machine in the
`staging_directory`. By default, this is empty. directory specified by the `staging_directory`. By default, this is empty.
* `execute_command` (string) - The command used to execute Chef. This has - `execute_command` (string) - The command used to execute Chef. This has
various [configuration template variables](/docs/templates/configuration-templates.html) various [configuration template
available. See below for more information. variables](/docs/templates/configuration-templates.html) available. See
below for more information.
* `install_command` (string) - The command used to install Chef. This has
various [configuration template variables](/docs/templates/configuration-templates.html) - `install_command` (string) - The command used to install Chef. This has
available. See below for more information. various [configuration template
variables](/docs/templates/configuration-templates.html) available. See
* `json` (object) - An arbitrary mapping of JSON that will be available as below for more information.
node attributes while running Chef.
- `json` (object) - An arbitrary mapping of JSON that will be available as
* `prevent_sudo` (boolean) - By default, the configured commands that are node attributes while running Chef.
executed to install and run Chef are executed with `sudo`. If this is true,
then the sudo will be omitted. - `prevent_sudo` (boolean) - By default, the configured commands that are
executed to install and run Chef are executed with `sudo`. If this is true,
* `remote_cookbook_paths` (array of strings) - A list of paths on the remote then the sudo will be omitted.
machine where cookbooks will already exist. These may exist from a previous
provisioner or step. If specified, Chef will be configured to look for - `remote_cookbook_paths` (array of strings) - A list of paths on the remote
cookbooks here. By default, this is empty. machine where cookbooks will already exist. These may exist from a previous
provisioner or step. If specified, Chef will be configured to look for
* `roles_path` (string) - The path to the "roles" directory on your local filesystem. cookbooks here. By default, this is empty.
These will be uploaded to the remote machine in the directory specified by the
`staging_directory`. By default, this is empty. - `roles_path` (string) - The path to the "roles" directory on your
local filesystem. These will be uploaded to the remote machine in the
* `run_list` (array of strings) - The [run list](https://docs.chef.io/run_lists.html) directory specified by the `staging_directory`. By default, this is empty.
for Chef. By default this is empty.
- `run_list` (array of strings) - The [run
* `skip_install` (boolean) - If true, Chef will not automatically be installed list](https://docs.chef.io/run_lists.html) for Chef. By default this
on the machine using the Chef omnibus installers. is empty.
* `staging_directory` (string) - This is the directory where all the configuration - `skip_install` (boolean) - If true, Chef will not automatically be installed
of Chef by Packer will be placed. By default this is "/tmp/packer-chef-solo". on the machine using the Chef omnibus installers.
This directory doesn't need to exist but must have proper permissions so that
the SSH user that Packer uses is able to create directories and write into - `staging_directory` (string) - This is the directory where all the
this folder. If the permissions are not correct, use a shell provisioner configuration of Chef by Packer will be placed. By default this
prior to this to configure it properly. is "/tmp/packer-chef-solo". This directory doesn't need to exist but must
have proper permissions so that the SSH user that Packer uses is able to
create directories and write into this folder. If the permissions are not
correct, use a shell provisioner prior to this to configure it properly.
## Chef Configuration ## Chef Configuration
By default, Packer uses a simple Chef configuration file in order to set By default, Packer uses a simple Chef configuration file in order to set the
the options specified for the provisioner. But Chef is a complex tool that options specified for the provisioner. But Chef is a complex tool that supports
supports many configuration options. Packer allows you to specify a custom many configuration options. Packer allows you to specify a custom configuration
configuration template if you'd like to set custom configurations. template if you'd like to set custom configurations.
The default value for the configuration template is: The default value for the configuration template is:
```liquid ``` {.liquid}
cookbook_path [{{.CookbookPaths}}] cookbook_path [{{.CookbookPaths}}]
``` ```
This template is a [configuration template](/docs/templates/configuration-templates.html) This template is a [configuration
and has a set of variables available to use: template](/docs/templates/configuration-templates.html) and has a set of
variables available to use:
* `ChefEnvironment` - The current enabled environment. Only non-empty - `ChefEnvironment` - The current enabled environment. Only non-empty if the
if the environment path is set. environment path is set.
* `CookbookPaths` is the set of cookbook paths ready to embedded directly - `CookbookPaths` is the set of cookbook paths ready to embedded directly into
into a Ruby array to configure Chef. a Ruby array to configure Chef.
* `DataBagsPath` is the path to the data bags folder. - `DataBagsPath` is the path to the data bags folder.
* `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret - `EncryptedDataBagSecretPath` - The path to the encrypted data bag secret
* `EnvironmentsPath` - The path to the environments folder. - `EnvironmentsPath` - The path to the environments folder.
* `RolesPath` - The path to the roles folder. - `RolesPath` - The path to the roles folder.
## Execute Command ## Execute Command
By default, Packer uses the following command (broken across multiple lines By default, Packer uses the following command (broken across multiple lines for
for readability) to execute Chef: readability) to execute Chef:
```liquid ``` {.liquid}
{{if .Sudo}}sudo {{end}}chef-solo \ {{if .Sudo}}sudo {{end}}chef-solo \
--no-color \ --no-color \
-c {{.ConfigPath}} \ -c {{.ConfigPath}} \
-j {{.JsonPath}} -j {{.JsonPath}}
``` ```
This command can be customized using the `execute_command` configuration. This command can be customized using the `execute_command` configuration. As you
As you can see from the default value above, the value of this configuration can see from the default value above, the value of this configuration can
can contain various template variables, defined below: contain various template variables, defined below:
* `ConfigPath` - The path to the Chef configuration file. - `ConfigPath` - The path to the Chef configuration file. file.
file. - `JsonPath` - The path to the JSON attributes file for the node.
* `JsonPath` - The path to the JSON attributes file for the node. - `Sudo` - A boolean of whether to `sudo` the command or not, depending on the
* `Sudo` - A boolean of whether to `sudo` the command or not, depending on value of the `prevent_sudo` configuration.
the value of the `prevent_sudo` configuration.
## Install Command ## Install Command
By default, Packer uses the following command (broken across multiple lines By default, Packer uses the following command (broken across multiple lines for
for readability) to install Chef. This command can be customized if you want readability) to install Chef. This command can be customized if you want to
to install Chef in another way. install Chef in another way.
```text ``` {.text}
curl -L https://www.chef.io/chef/install.sh | \ curl -L https://www.chef.io/chef/install.sh | \
{{if .Sudo}}sudo{{end}} bash {{if .Sudo}}sudo{{end}} bash
``` ```
......
--- ---
layout: "docs" description: |
page_title: "Custom Provisioner" Packer is extensible, allowing you to write new provisioners without having to
description: |- modify the core source code of Packer itself. Documentation for creating new
Packer is extensible, allowing you to write new provisioners without having to modify the core source code of Packer itself. Documentation for creating new provisioners is covered in the custom provisioners page of the Packer plugin section. provisioners is covered in the custom provisioners page of the Packer plugin
--- section.
layout: docs
page_title: Custom Provisioner
...
# Custom Provisioner # Custom Provisioner
Packer is extensible, allowing you to write new provisioners without having to Packer is extensible, allowing you to write new provisioners without having to
modify the core source code of Packer itself. Documentation for creating modify the core source code of Packer itself. Documentation for creating new
new provisioners is covered in the [custom provisioners](/docs/extend/provisioner.html) provisioners is covered in the [custom
page of the Packer plugin section. provisioners](/docs/extend/provisioner.html) page of the Packer plugin section.
--- ---
layout: "docs" description: |
page_title: "File Provisioner" The file Packer provisioner uploads files to machines built by Packer. The
description: |- recommended usage of the file provisioner is to use it to upload files, and then
The file Packer provisioner uploads files to machines built by Packer. The recommended usage of the file provisioner is to use it to upload files, and then use shell provisioner to move them to the proper place, set permissions, etc. use shell provisioner to move them to the proper place, set permissions, etc.
--- layout: docs
page_title: File Provisioner
...
# File Provisioner # File Provisioner
Type: `file` Type: `file`
The file Packer provisioner uploads files to machines built by Packer. The The file Packer provisioner uploads files to machines built by Packer. The
recommended usage of the file provisioner is to use it to upload files, recommended usage of the file provisioner is to use it to upload files, and then
and then use [shell provisioner](/docs/provisioners/shell.html) to move use [shell provisioner](/docs/provisioners/shell.html) to move them to the
them to the proper place, set permissions, etc. proper place, set permissions, etc.
The file provisioner can upload both single files and complete directories. The file provisioner can upload both single files and complete directories.
## Basic Example ## Basic Example
```javascript ``` {.javascript}
{ {
"type": "file", "type": "file",
"source": "app.tar.gz", "source": "app.tar.gz",
...@@ -30,42 +32,42 @@ The file provisioner can upload both single files and complete directories. ...@@ -30,42 +32,42 @@ The file provisioner can upload both single files and complete directories.
The available configuration options are listed below. All elements are required. The available configuration options are listed below. All elements are required.
* `source` (string) - The path to a local file or directory to upload to the - `source` (string) - The path to a local file or directory to upload to
machine. The path can be absolute or relative. If it is relative, it is the machine. The path can be absolute or relative. If it is relative, it is
relative to the working directory when Packer is executed. If this is a relative to the working directory when Packer is executed. If this is a
directory, the existence of a trailing slash is important. Read below on directory, the existence of a trailing slash is important. Read below on
uploading directories. uploading directories.
* `destination` (string) - The path where the file will be uploaded to in the - `destination` (string) - The path where the file will be uploaded to in
machine. This value must be a writable location and any parent directories the machine. This value must be a writable location and any parent
must already exist. directories must already exist.
* `direction` (string) - The direction of the file transfer. This defaults - `direction` (string) - The direction of the file transfer. This defaults to
to "upload." If it is set to "download" then the file "source" in "upload." If it is set to "download" then the file "source" in the machine
the machine wll be downloaded locally to "destination" wll be downloaded locally to "destination"
## Directory Uploads ## Directory Uploads
The file provisioner is also able to upload a complete directory to the The file provisioner is also able to upload a complete directory to the remote
remote machine. When uploading a directory, there are a few important things machine. When uploading a directory, there are a few important things you should
you should know. know.
First, the destination directory must already exist. If you need to First, the destination directory must already exist. If you need to create it,
create it, use a shell provisioner just prior to the file provisioner use a shell provisioner just prior to the file provisioner in order to create
in order to create the directory. the directory.
Next, the existence of a trailing slash on the source path will determine Next, the existence of a trailing slash on the source path will determine
whether the directory name will be embedded within the destination, or whether the directory name will be embedded within the destination, or whether
whether the destination will be created. An example explains this best: the destination will be created. An example explains this best:
If the source is `/foo` (no trailing slash), and the destination is If the source is `/foo` (no trailing slash), and the destination is `/tmp`, then
`/tmp`, then the contents of `/foo` on the local machine will be uploaded the contents of `/foo` on the local machine will be uploaded to `/tmp/foo` on
to `/tmp/foo` on the remote machine. The `foo` directory on the remote the remote machine. The `foo` directory on the remote machine will be created by
machine will be created by Packer. Packer.
If the source, however, is `/foo/` (a trailing slash is present), and If the source, however, is `/foo/` (a trailing slash is present), and the
the destination is `/tmp`, then the contents of `/foo` will be uploaded destination is `/tmp`, then the contents of `/foo` will be uploaded into `/tmp`
into `/tmp` directly. directly.
This behavior was adopted from the standard behavior of rsync. Note that This behavior was adopted from the standard behavior of rsync. Note that under
under the covers, rsync may or may not be used. the covers, rsync may or may not be used.
--- ---
layout: "docs" description: |
page_title: "PowerShell Provisioner" The shell Packer provisioner provisions machines built by Packer using shell
description: |- scripts. Shell provisioning is the easiest way to get software installed and
The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. configured on a machine.
--- layout: docs
page_title: PowerShell Provisioner
...
# PowerShell Provisioner # PowerShell Provisioner
...@@ -16,7 +18,7 @@ It assumes that the communicator in use is WinRM. ...@@ -16,7 +18,7 @@ It assumes that the communicator in use is WinRM.
The example below is fully functional. The example below is fully functional.
```javascript ``` {.javascript}
{ {
"type": "powershell", "type": "powershell",
"inline": ["dir c:\\"] "inline": ["dir c:\\"]
...@@ -28,55 +30,55 @@ The example below is fully functional. ...@@ -28,55 +30,55 @@ The example below is fully functional.
The reference of available configuration options is listed below. The only The reference of available configuration options is listed below. The only
required element is either "inline" or "script". Every other option is optional. required element is either "inline" or "script". Every other option is optional.
Exactly _one_ of the following is required: Exactly *one* of the following is required:
* `inline` (array of strings) - This is an array of commands to execute. - `inline` (array of strings) - This is an array of commands to execute. The
The commands are concatenated by newlines and turned into a single file, commands are concatenated by newlines and turned into a single file, so they
so they are all executed within the same context. This allows you to are all executed within the same context. This allows you to change
change directories in one command and use something in the directory in directories in one command and use something in the directory in the next
the next and so on. Inline scripts are the easiest way to pull off simple and so on. Inline scripts are the easiest way to pull off simple tasks
tasks within the machine. within the machine.
* `script` (string) - The path to a script to upload and execute in the machine. - `script` (string) - The path to a script to upload and execute in
This path can be absolute or relative. If it is relative, it is relative the machine. This path can be absolute or relative. If it is relative, it is
to the working directory when Packer is executed. relative to the working directory when Packer is executed.
* `scripts` (array of strings) - An array of scripts to execute. The scripts - `scripts` (array of strings) - An array of scripts to execute. The scripts
will be uploaded and executed in the order specified. Each script is executed will be uploaded and executed in the order specified. Each script is
in isolation, so state such as variables from one script won't carry on to executed in isolation, so state such as variables from one script won't
the next. carry on to the next.
Optional parameters: Optional parameters:
* `binary` (boolean) - If true, specifies that the script(s) are binary - `binary` (boolean) - If true, specifies that the script(s) are binary files,
files, and Packer should therefore not convert Windows line endings to and Packer should therefore not convert Windows line endings to Unix line
Unix line endings (if there are any). By default this is false. endings (if there are any). By default this is false.
* `environment_vars` (array of strings) - An array of key/value pairs - `environment_vars` (array of strings) - An array of key/value pairs to
to inject prior to the execute_command. The format should be inject prior to the execute\_command. The format should be `key=value`.
`key=value`. Packer injects some environmental variables by default Packer injects some environmental variables by default into the environment,
into the environment, as well, which are covered in the section below. as well, which are covered in the section below.
* `execute_command` (string) - The command to use to execute the script. - `execute_command` (string) - The command to use to execute the script. By
By default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`. default this is `powershell "& { {{.Vars}}{{.Path}}; exit $LastExitCode}"`.
The value of this is treated as [configuration template](/docs/templates/configuration-templates.html). The value of this is treated as [configuration
There are two available variables: `Path`, which is template](/docs/templates/configuration-templates.html). There are two
the path to the script to run, and `Vars`, which is the list of available variables: `Path`, which is the path to the script to run, and
`environment_vars`, if configured. `Vars`, which is the list of `environment_vars`, if configured.
* `elevated_user` and `elevated_password` (string) - If specified, - `elevated_user` and `elevated_password` (string) - If specified, the
the PowerShell script will be run with elevated privileges using PowerShell script will be run with elevated privileges using the given
the given Windows user. Windows user.
* `remote_path` (string) - The path where the script will be uploaded to - `remote_path` (string) - The path where the script will be uploaded to in
in the machine. This defaults to "/tmp/script.sh". This value must be the machine. This defaults to "/tmp/script.sh". This value must be a
a writable location and any parent directories must already exist. writable location and any parent directories must already exist.
* `start_retry_timeout` (string) - The amount of time to attempt to - `start_retry_timeout` (string) - The amount of time to attempt to *start*
_start_ the remote process. By default this is "5m" or 5 minutes. This the remote process. By default this is "5m" or 5 minutes. This setting
setting exists in order to deal with times when SSH may restart, such as exists in order to deal with times when SSH may restart, such as a
a system reboot. Set this to a higher value if reboots take a longer system reboot. Set this to a higher value if reboots take a longer amount
amount of time. of time.
* `valid_exit_codes` (list of ints) - Valid exit codes for the script. - `valid_exit_codes` (list of ints) - Valid exit codes for the script. By
By default this is just 0. default this is just 0.
--- ---
layout: "docs" description: |
page_title: "Puppet (Masterless) Provisioner" The masterless Puppet Packer provisioner configures Puppet to run on the
description: |- machines by Packer from local modules and manifest files. Modules and manifests
The masterless Puppet Packer provisioner configures Puppet to run on the machines by Packer from local modules and manifest files. Modules and manifests can be uploaded from your local machine to the remote machine or can simply use remote paths (perhaps obtained using something like the shell provisioner). Puppet is run in masterless mode, meaning it never communicates to a Puppet master. can be uploaded from your local machine to the remote machine or can simply use
--- remote paths (perhaps obtained using something like the shell provisioner).
Puppet is run in masterless mode, meaning it never communicates to a Puppet
master.
layout: docs
page_title: 'Puppet (Masterless) Provisioner'
...
# Puppet (Masterless) Provisioner # Puppet (Masterless) Provisioner
Type: `puppet-masterless` Type: `puppet-masterless`
The masterless Puppet Packer provisioner configures Puppet to run on the machines The masterless Puppet Packer provisioner configures Puppet to run on the
by Packer from local modules and manifest files. Modules and manifests machines by Packer from local modules and manifest files. Modules and manifests
can be uploaded from your local machine to the remote machine or can simply can be uploaded from your local machine to the remote machine or can simply use
use remote paths (perhaps obtained using something like the shell provisioner). remote paths (perhaps obtained using something like the shell provisioner).
Puppet is run in masterless mode, meaning it never communicates to a Puppet Puppet is run in masterless mode, meaning it never communicates to a Puppet
master. master.
-> **Note:** Puppet will _not_ be installed automatically -&gt; **Note:** Puppet will *not* be installed automatically by this
by this provisioner. This provisioner expects that Puppet is already provisioner. This provisioner expects that Puppet is already installed on the
installed on the machine. It is common practice to use the machine. It is common practice to use the [shell
[shell provisioner](/docs/provisioners/shell.html) before the provisioner](/docs/provisioners/shell.html) before the Puppet provisioner to do
Puppet provisioner to do this. this.
## Basic Example ## Basic Example
The example below is fully functional and expects the configured manifest The example below is fully functional and expects the configured manifest file
file to exist relative to your working directory: to exist relative to your working directory:
```javascript ``` {.javascript}
{ {
"type": "puppet-masterless", "type": "puppet-masterless",
"manifest_file": "site.pp" "manifest_file": "site.pp"
...@@ -40,63 +45,65 @@ The reference of available configuration options is listed below. ...@@ -40,63 +45,65 @@ The reference of available configuration options is listed below.
Required parameters: Required parameters:
* `manifest_file` (string) - This is either a path to a puppet manifest (`.pp` - `manifest_file` (string) - This is either a path to a puppet manifest
file) _or_ a directory containing multiple manifests that puppet will apply (`.pp` file) *or* a directory containing multiple manifests that puppet will
(the ["main manifest"][1]). These file(s) must exist on your local system and apply (the ["main
will be uploaded to the remote machine. manifest"](https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html)).
These file(s) must exist on your local system and will be uploaded to the
[1]: https://docs.puppetlabs.com/puppet/latest/reference/dirs_manifest.html remote machine.
Optional parameters: Optional parameters:
* `execute_command` (string) - The command used to execute Puppet. This has - `execute_command` (string) - The command used to execute Puppet. This has
various [configuration template variables](/docs/templates/configuration-templates.html) various [configuration template
available. See below for more information. variables](/docs/templates/configuration-templates.html) available. See
below for more information.
* `facter` (object of key/value strings) - Additional
[facts](http://puppetlabs.com/puppet/related-projects/facter) to make - `facter` (object of key/value strings) - Additional
available when Puppet is running. [facts](http://puppetlabs.com/puppet/related-projects/facter) to make
available when Puppet is running.
* `hiera_config_path` (string) - The path to a local file with hiera
configuration to be uploaded to the remote machine. Hiera data directories - `hiera_config_path` (string) - The path to a local file with hiera
must be uploaded using the file provisioner separately. configuration to be uploaded to the remote machine. Hiera data directories
must be uploaded using the file provisioner separately.
* `manifest_dir` (string) - The path to a local directory with manifests
to be uploaded to the remote machine. This is useful if your main - `manifest_dir` (string) - The path to a local directory with manifests to be
manifest file uses imports. This directory doesn't necessarily contain uploaded to the remote machine. This is useful if your main manifest file
the `manifest_file`. It is a separate directory that will be set as uses imports. This directory doesn't necessarily contain the
the "manifestdir" setting on Puppet. `manifest_file`. It is a separate directory that will be set as the
"manifestdir" setting on Puppet.
~> `manifest_dir` is passed to `puppet apply` as the `--manifestdir` option.
This option was deprecated in puppet 3.6, and removed in puppet 4.0. If you \~&gt; `manifest_dir` is passed to `puppet apply` as the `--manifestdir` option.
have multiple manifests you should use `manifest_file` instead. This option was deprecated in puppet 3.6, and removed in puppet 4.0. If you have
multiple manifests you should use `manifest_file` instead.
* `module_paths` (array of strings) - This is an array of paths to module
directories on your local filesystem. These will be uploaded to the remote - `module_paths` (array of strings) - This is an array of paths to module
machine. By default, this is empty. directories on your local filesystem. These will be uploaded to the
remote machine. By default, this is empty.
* `prevent_sudo` (boolean) - By default, the configured commands that are
executed to run Puppet are executed with `sudo`. If this is true, - `prevent_sudo` (boolean) - By default, the configured commands that are
then the sudo will be omitted. executed to run Puppet are executed with `sudo`. If this is true, then the
sudo will be omitted.
* `staging_directory` (string) - This is the directory where all the configuration
of Puppet by Packer will be placed. By default this is "/tmp/packer-puppet-masterless". - `staging_directory` (string) - This is the directory where all the
This directory doesn't need to exist but must have proper permissions so that configuration of Puppet by Packer will be placed. By default this
the SSH user that Packer uses is able to create directories and write into is "/tmp/packer-puppet-masterless". This directory doesn't need to exist but
this folder. If the permissions are not correct, use a shell provisioner must have proper permissions so that the SSH user that Packer uses is able
prior to this to configure it properly. to create directories and write into this folder. If the permissions are not
correct, use a shell provisioner prior to this to configure it properly.
* `working_directory` (string) - This is the directory from which the puppet command
will be run. When using hiera with a relative path, this option allows to ensure - `working_directory` (string) - This is the directory from which the puppet
that the paths are working properly. If not specified, defaults to the value of command will be run. When using hiera with a relative path, this option
specified `staging_directory` (or its default value if not specified either). allows to ensure that the paths are working properly. If not specified,
defaults to the value of specified `staging_directory` (or its default value
if not specified either).
## Execute Command ## Execute Command
By default, Packer uses the following command (broken across multiple lines By default, Packer uses the following command (broken across multiple lines for
for readability) to execute Puppet: readability) to execute Puppet:
```liquid ``` {.liquid}
cd {{.WorkingDir}} && \ cd {{.WorkingDir}} && \
{{.FacterVars}}{{if .Sudo}} sudo -E {{end}}puppet apply \ {{.FacterVars}}{{if .Sudo}} sudo -E {{end}}puppet apply \
--verbose \ --verbose \
...@@ -107,19 +114,19 @@ cd {{.WorkingDir}} && \ ...@@ -107,19 +114,19 @@ cd {{.WorkingDir}} && \
{{.ManifestFile}} {{.ManifestFile}}
``` ```
This command can be customized using the `execute_command` configuration. This command can be customized using the `execute_command` configuration. As you
As you can see from the default value above, the value of this configuration can see from the default value above, the value of this configuration can
can contain various template variables, defined below: contain various template variables, defined below:
* `WorkingDir` - The path from which Puppet will be executed. - `WorkingDir` - The path from which Puppet will be executed.
* `FacterVars` - Shell-friendly string of environmental variables used - `FacterVars` - Shell-friendly string of environmental variables used to set
to set custom facts configured for this provisioner. custom facts configured for this provisioner.
* `HieraConfigPath` - The path to a hiera configuration file. - `HieraConfigPath` - The path to a hiera configuration file.
* `ManifestFile` - The path on the remote machine to the manifest file - `ManifestFile` - The path on the remote machine to the manifest file for
for Puppet to use. Puppet to use.
* `ModulePath` - The paths to the module directories. - `ModulePath` - The paths to the module directories.
* `Sudo` - A boolean of whether to `sudo` the command or not, depending on - `Sudo` - A boolean of whether to `sudo` the command or not, depending on the
the value of the `prevent_sudo` configuration. value of the `prevent_sudo` configuration.
## Default Facts ## Default Facts
...@@ -127,10 +134,10 @@ In addition to being able to specify custom Facter facts using the `facter` ...@@ -127,10 +134,10 @@ In addition to being able to specify custom Facter facts using the `facter`
configuration, the provisioner automatically defines certain commonly useful configuration, the provisioner automatically defines certain commonly useful
facts: facts:
* `packer_build_name` is set to the name of the build that Packer is running. - `packer_build_name` is set to the name of the build that Packer is running.
This is most useful when Packer is making multiple builds and you want to This is most useful when Packer is making multiple builds and you want to
distinguish them in your Hiera hierarchy. distinguish them in your Hiera hierarchy.
* `packer_builder_type` is the type of the builder that was used to create the - `packer_builder_type` is the type of the builder that was used to create the
machine that Puppet is running on. This is useful if you want to run only machine that Puppet is running on. This is useful if you want to run only
certain parts of your Puppet code on systems built with certain builders. certain parts of your Puppet code on systems built with certain builders.
--- ---
layout: "docs" description: |
page_title: "Puppet Server Provisioner" The `puppet-server` Packer provisioner provisions Packer machines with Puppet by
description: |- connecting to a Puppet master.
The `puppet-server` Packer provisioner provisions Packer machines with Puppet by connecting to a Puppet master. layout: docs
--- page_title: Puppet Server Provisioner
...
# Puppet Server Provisioner # Puppet Server Provisioner
Type: `puppet-server` Type: `puppet-server`
The `puppet-server` Packer provisioner provisions Packer machines with Puppet The `puppet-server` Packer provisioner provisions Packer machines with Puppet by
by connecting to a Puppet master. connecting to a Puppet master.
-> **Note:** Puppet will _not_ be installed automatically -&gt; **Note:** Puppet will *not* be installed automatically by this
by this provisioner. This provisioner expects that Puppet is already provisioner. This provisioner expects that Puppet is already installed on the
installed on the machine. It is common practice to use the machine. It is common practice to use the [shell
[shell provisioner](/docs/provisioners/shell.html) before the provisioner](/docs/provisioners/shell.html) before the Puppet provisioner to do
Puppet provisioner to do this. this.
## Basic Example ## Basic Example
The example below is fully functional and expects a Puppet server to be accessible The example below is fully functional and expects a Puppet server to be
from your network.: accessible from your network.:
```javascript ``` {.javascript}
{ {
"type": "puppet-server", "type": "puppet-server",
"options": "--test --pluginsync", "options": "--test --pluginsync",
...@@ -37,39 +38,39 @@ from your network.: ...@@ -37,39 +38,39 @@ from your network.:
The reference of available configuration options is listed below. The reference of available configuration options is listed below.
The provisioner takes various options. None are strictly The provisioner takes various options. None are strictly required. They are
required. They are listed below: listed below:
* `client_cert_path` (string) - Path to the client certificate for the - `client_cert_path` (string) - Path to the client certificate for the node on
node on your disk. This defaults to nothing, in which case a client your disk. This defaults to nothing, in which case a client cert won't
cert won't be uploaded. be uploaded.
* `client_private_key_path` (string) - Path to the client private key for - `client_private_key_path` (string) - Path to the client private key for the
the node on your disk. This defaults to nothing, in which case a client node on your disk. This defaults to nothing, in which case a client private
private key won't be uploaded. key won't be uploaded.
* `facter` (object of key/value strings) - Additional Facter facts to make available to the - `facter` (object of key/value strings) - Additional Facter facts to make
Puppet run. available to the Puppet run.
* `ignore_exit_codes` (boolean) - If true, Packer will never consider the - `ignore_exit_codes` (boolean) - If true, Packer will never consider the
provisioner a failure. provisioner a failure.
* `options` (string) - Additional command line options to pass - `options` (string) - Additional command line options to pass to
to `puppet agent` when Puppet is ran. `puppet agent` when Puppet is ran.
* `prevent_sudo` (boolean) - By default, the configured commands that are - `prevent_sudo` (boolean) - By default, the configured commands that are
executed to run Puppet are executed with `sudo`. If this is true, executed to run Puppet are executed with `sudo`. If this is true, then the
then the sudo will be omitted. sudo will be omitted.
* `puppet_node` (string) - The name of the node. If this isn't set, - `puppet_node` (string) - The name of the node. If this isn't set, the fully
the fully qualified domain name will be used. qualified domain name will be used.
* `puppet_server` (string) - Hostname of the Puppet server. By default - `puppet_server` (string) - Hostname of the Puppet server. By default
"puppet" will be used. "puppet" will be used.
* `staging_directory` (string) - This is the directory where all the configuration - `staging_directory` (string) - This is the directory where all the
of Puppet by Packer will be placed. By default this is "/tmp/packer-puppet-server". configuration of Puppet by Packer will be placed. By default this
This directory doesn't need to exist but must have proper permissions so that is "/tmp/packer-puppet-server". This directory doesn't need to exist but
the SSH user that Packer uses is able to create directories and write into must have proper permissions so that the SSH user that Packer uses is able
this folder. If the permissions are not correct, use a shell provisioner to create directories and write into this folder. If the permissions are not
prior to this to configure it properly. correct, use a shell provisioner prior to this to configure it properly.
--- ---
layout: "docs" description: |
page_title: "Salt (Masterless) Provisioner" The `salt-masterless` Packer provisioner provisions machines built by Packer
description: |- using Salt states, without connecting to a Salt master.
The `salt-masterless` Packer provisioner provisions machines built by Packer using Salt states, without connecting to a Salt master. layout: docs
--- page_title: 'Salt (Masterless) Provisioner'
...
# Salt Masterless Provisioner # Salt Masterless Provisioner
Type: `salt-masterless` Type: `salt-masterless`
The `salt-masterless` Packer provisioner provisions machines built by Packer using The `salt-masterless` Packer provisioner provisions machines built by Packer
[Salt](http://saltstack.com/) states, without connecting to a Salt master. using [Salt](http://saltstack.com/) states, without connecting to a Salt master.
## Basic Example ## Basic Example
The example below is fully functional. The example below is fully functional.
```javascript ``` {.javascript}
{ {
"type": "salt-masterless", "type": "salt-masterless",
"local_state_tree": "/Users/me/salt" "local_state_tree": "/Users/me/salt"
...@@ -25,31 +26,33 @@ The example below is fully functional. ...@@ -25,31 +26,33 @@ The example below is fully functional.
## Configuration Reference ## Configuration Reference
The reference of available configuration options is listed below. The only required argument is the path to your local salt state tree. The reference of available configuration options is listed below. The only
required argument is the path to your local salt state tree.
Optional: Optional:
* `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage - `bootstrap_args` (string) - Arguments to send to the bootstrap script. Usage
is somewhat documented on [github](https://github.com/saltstack/salt-bootstrap), is somewhat documented on
but the [script itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh) [github](https://github.com/saltstack/salt-bootstrap), but the [script
has more detailed usage instructions. By default, no arguments are sent to itself](https://github.com/saltstack/salt-bootstrap/blob/develop/bootstrap-salt.sh)
the script. has more detailed usage instructions. By default, no arguments are sent to
the script.
* `local_pillar_roots` (string) - The path to your local - `local_pillar_roots` (string) - The path to your local [pillar
[pillar roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration). roots](http://docs.saltstack.com/ref/configuration/master.html#pillar-configuration).
This will be uploaded to the `/srv/pillar` on the remote. This will be uploaded to the `/srv/pillar` on the remote.
* `local_state_tree` (string) - The path to your local - `local_state_tree` (string) - The path to your local [state
[state tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree). tree](http://docs.saltstack.com/ref/states/highstate.html#the-salt-state-tree).
This will be uploaded to the `/srv/salt` on the remote. This will be uploaded to the `/srv/salt` on the remote.
* `minion_config` (string) - The path to your local - `minion_config` (string) - The path to your local [minion
[minion config](http://docs.saltstack.com/topics/configuration.html). config](http://docs.saltstack.com/topics/configuration.html). This will be
This will be uploaded to the `/etc/salt` on the remote. uploaded to the `/etc/salt` on the remote.
* `skip_bootstrap` (boolean) - By default the salt provisioner runs - `skip_bootstrap` (boolean) - By default the salt provisioner runs [salt
[salt bootstrap](https://github.com/saltstack/salt-bootstrap) to install bootstrap](https://github.com/saltstack/salt-bootstrap) to install salt. Set
salt. Set this to true to skip this step. this to true to skip this step.
* `temp_config_dir` (string) - Where your local state tree will be copied - `temp_config_dir` (string) - Where your local state tree will be copied
before moving to the `/srv/salt` directory. Default is `/tmp/salt`. before moving to the `/srv/salt` directory. Default is `/tmp/salt`.
--- ---
layout: "docs" description: |
page_title: "Shell Provisioner" The shell Packer provisioner provisions machines built by Packer using shell
description: |- scripts. Shell provisioning is the easiest way to get software installed and
The shell Packer provisioner provisions machines built by Packer using shell scripts. Shell provisioning is the easiest way to get software installed and configured on a machine. configured on a machine.
--- layout: docs
page_title: Shell Provisioner
...
# Shell Provisioner # Shell Provisioner
Type: `shell` Type: `shell`
The shell Packer provisioner provisions machines built by Packer using shell scripts. The shell Packer provisioner provisions machines built by Packer using shell
Shell provisioning is the easiest way to get software installed and configured scripts. Shell provisioning is the easiest way to get software installed and
on a machine. configured on a machine.
-> **Building Windows images?** You probably want to use the -&gt; **Building Windows images?** You probably want to use the
[PowerShell](/docs/provisioners/powershell.html) or [PowerShell](/docs/provisioners/powershell.html) or [Windows
[Windows Shell](/docs/provisioners/windows-shell.html) provisioners. Shell](/docs/provisioners/windows-shell.html) provisioners.
## Basic Example ## Basic Example
The example below is fully functional. The example below is fully functional.
```javascript ``` {.javascript}
{ {
"type": "shell", "type": "shell",
"inline": ["echo foo"] "inline": ["echo foo"]
...@@ -33,83 +35,83 @@ The example below is fully functional. ...@@ -33,83 +35,83 @@ The example below is fully functional.
The reference of available configuration options is listed below. The only The reference of available configuration options is listed below. The only
required element is either "inline" or "script". Every other option is optional. required element is either "inline" or "script". Every other option is optional.
Exactly _one_ of the following is required: Exactly *one* of the following is required:
* `inline` (array of strings) - This is an array of commands to execute. - `inline` (array of strings) - This is an array of commands to execute. The
The commands are concatenated by newlines and turned into a single file, commands are concatenated by newlines and turned into a single file, so they
so they are all executed within the same context. This allows you to are all executed within the same context. This allows you to change
change directories in one command and use something in the directory in directories in one command and use something in the directory in the next
the next and so on. Inline scripts are the easiest way to pull off simple and so on. Inline scripts are the easiest way to pull off simple tasks
tasks within the machine. within the machine.
* `script` (string) - The path to a script to upload and execute in the machine. - `script` (string) - The path to a script to upload and execute in
This path can be absolute or relative. If it is relative, it is relative the machine. This path can be absolute or relative. If it is relative, it is
to the working directory when Packer is executed. relative to the working directory when Packer is executed.
* `scripts` (array of strings) - An array of scripts to execute. The scripts - `scripts` (array of strings) - An array of scripts to execute. The scripts
will be uploaded and executed in the order specified. Each script is executed will be uploaded and executed in the order specified. Each script is
in isolation, so state such as variables from one script won't carry on to executed in isolation, so state such as variables from one script won't
the next. carry on to the next.
Optional parameters: Optional parameters:
* `binary` (boolean) - If true, specifies that the script(s) are binary - `binary` (boolean) - If true, specifies that the script(s) are binary files,
files, and Packer should therefore not convert Windows line endings to and Packer should therefore not convert Windows line endings to Unix line
Unix line endings (if there are any). By default this is false. endings (if there are any). By default this is false.
* `environment_vars` (array of strings) - An array of key/value pairs - `environment_vars` (array of strings) - An array of key/value pairs to
to inject prior to the execute_command. The format should be inject prior to the execute\_command. The format should be `key=value`.
`key=value`. Packer injects some environmental variables by default Packer injects some environmental variables by default into the environment,
into the environment, as well, which are covered in the section below. as well, which are covered in the section below.
* `execute_command` (string) - The command to use to execute the script. - `execute_command` (string) - The command to use to execute the script. By
By default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value of this is default this is `chmod +x {{ .Path }}; {{ .Vars }} {{ .Path }}`. The value
treated as [configuration template](/docs/templates/configuration-templates.html). There are two available variables: `Path`, which is of this is treated as [configuration
the path to the script to run, and `Vars`, which is the list of template](/docs/templates/configuration-templates.html). There are two
`environment_vars`, if configured. available variables: `Path`, which is the path to the script to run, and
`Vars`, which is the list of `environment_vars`, if configured.
* `inline_shebang` (string) - The
[shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when - `inline_shebang` (string) - The
running commands specified by `inline`. By default, this is `/bin/sh -e`. [shebang](http://en.wikipedia.org/wiki/Shebang_%28Unix%29) value to use when
If you're not using `inline`, then this configuration has no effect. running commands specified by `inline`. By default, this is `/bin/sh -e`. If
**Important:** If you customize this, be sure to include something like you're not using `inline`, then this configuration has no effect.
the `-e` flag, otherwise individual steps failing won't fail the provisioner. **Important:** If you customize this, be sure to include something like the
`-e` flag, otherwise individual steps failing won't fail the provisioner.
* `remote_path` (string) - The path where the script will be uploaded to
in the machine. This defaults to "/tmp/script.sh". This value must be - `remote_path` (string) - The path where the script will be uploaded to in
a writable location and any parent directories must already exist. the machine. This defaults to "/tmp/script.sh". This value must be a
writable location and any parent directories must already exist.
* `start_retry_timeout` (string) - The amount of time to attempt to
_start_ the remote process. By default this is "5m" or 5 minutes. This - `start_retry_timeout` (string) - The amount of time to attempt to *start*
setting exists in order to deal with times when SSH may restart, such as the remote process. By default this is "5m" or 5 minutes. This setting
a system reboot. Set this to a higher value if reboots take a longer exists in order to deal with times when SSH may restart, such as a
amount of time. system reboot. Set this to a higher value if reboots take a longer amount
of time.
## Execute Command Example ## Execute Command Example
To many new users, the `execute_command` is puzzling. However, it provides To many new users, the `execute_command` is puzzling. However, it provides an
an important function: customization of how the command is executed. The important function: customization of how the command is executed. The most
most common use case for this is dealing with **sudo password prompts**. You may common use case for this is dealing with **sudo password prompts**. You may also
also need to customize this if you use a non-POSIX shell, such as `tcsh` on need to customize this if you use a non-POSIX shell, such as `tcsh` on FreeBSD.
FreeBSD.
### Sudo Example ### Sudo Example
Some operating systems default to a non-root user. For example if you login Some operating systems default to a non-root user. For example if you login as
as `ubuntu` and can sudo using the password `packer`, then you'll want to `ubuntu` and can sudo using the password `packer`, then you'll want to change
change `execute_command` to be: `execute_command` to be:
```text ``` {.text}
"echo 'packer' | {{ .Vars }} sudo -E -S sh '{{ .Path }}'" "echo 'packer' | {{ .Vars }} sudo -E -S sh '{{ .Path }}'"
``` ```
The `-S` flag tells `sudo` to read the password from stdin, which in this The `-S` flag tells `sudo` to read the password from stdin, which in this case
case is being piped in with the value of `packer`. The `-E` flag tells `sudo` is being piped in with the value of `packer`. The `-E` flag tells `sudo` to
to preserve the environment, allowing our environmental variables to work preserve the environment, allowing our environmental variables to work within
within the script. the script.
By setting the `execute_command` to this, your script(s) can run with By setting the `execute_command` to this, your script(s) can run with root
root privileges without worrying about password prompts. privileges without worrying about password prompts.
### FreeBSD Example ### FreeBSD Example
...@@ -123,44 +125,44 @@ Note the addition of `env` before `{{ .Vars }}`. ...@@ -123,44 +125,44 @@ Note the addition of `env` before `{{ .Vars }}`.
## Default Environmental Variables ## Default Environmental Variables
In addition to being able to specify custom environmental variables using In addition to being able to specify custom environmental variables using the
the `environment_vars` configuration, the provisioner automatically `environment_vars` configuration, the provisioner automatically defines certain
defines certain commonly useful environmental variables: commonly useful environmental variables:
* `PACKER_BUILD_NAME` is set to the name of the build that Packer is running. - `PACKER_BUILD_NAME` is set to the name of the build that Packer is running.
This is most useful when Packer is making multiple builds and you want to This is most useful when Packer is making multiple builds and you want to
distinguish them slightly from a common provisioning script. distinguish them slightly from a common provisioning script.
* `PACKER_BUILDER_TYPE` is the type of the builder that was used to create - `PACKER_BUILDER_TYPE` is the type of the builder that was used to create the
the machine that the script is running on. This is useful if you want to machine that the script is running on. This is useful if you want to run
run only certain parts of the script on systems built with certain builders. only certain parts of the script on systems built with certain builders.
## Handling Reboots ## Handling Reboots
Provisioning sometimes involves restarts, usually when updating the operating Provisioning sometimes involves restarts, usually when updating the operating
system. Packer is able to tolerate restarts via the shell provisioner. system. Packer is able to tolerate restarts via the shell provisioner.
Packer handles this by retrying to start scripts for a period of time Packer handles this by retrying to start scripts for a period of time before
before failing. This allows time for the machine to start up and be ready failing. This allows time for the machine to start up and be ready to run
to run scripts. The amount of time the provisioner will wait is configured scripts. The amount of time the provisioner will wait is configured using
using `start_retry_timeout`, which defaults to a few minutes. `start_retry_timeout`, which defaults to a few minutes.
Sometimes, when executing a command like `reboot`, the shell script will Sometimes, when executing a command like `reboot`, the shell script will return
return and Packer will start executing the next one before SSH actually and Packer will start executing the next one before SSH actually quits and the
quits and the machine restarts. For this, put a long `sleep` after the machine restarts. For this, put a long `sleep` after the reboot so that SSH will
reboot so that SSH will eventually be killed automatically: eventually be killed automatically:
```text ``` {.text}
reboot reboot
sleep 60 sleep 60
``` ```
Some OS configurations don't properly kill all network connections on Some OS configurations don't properly kill all network connections on reboot,
reboot, causing the provisioner to hang despite a reboot occurring. causing the provisioner to hang despite a reboot occurring. In this case, make
In this case, make sure you shut down the network interfaces sure you shut down the network interfaces on reboot or in your shell script. For
on reboot or in your shell script. For example, on Gentoo: example, on Gentoo:
```text ``` {.text}
/etc/init.d/net.eth0 stop /etc/init.d/net.eth0 stop
``` ```
...@@ -170,59 +172,53 @@ Some provisioning requires connecting to remote SSH servers from within the ...@@ -170,59 +172,53 @@ Some provisioning requires connecting to remote SSH servers from within the
packer instance. The below example is for pulling code from a private git packer instance. The below example is for pulling code from a private git
repository utilizing openssh on the client. Make sure you are running repository utilizing openssh on the client. Make sure you are running
`ssh-agent` and add your git repo ssh keys into it using `ssh-add /path/to/key`. `ssh-agent` and add your git repo ssh keys into it using `ssh-add /path/to/key`.
When the packer instance needs access to the ssh keys the agent will forward When the packer instance needs access to the ssh keys the agent will forward the
the request back to your `ssh-agent`. request back to your `ssh-agent`.
Note: when provisioning via git you should add the git server keys into Note: when provisioning via git you should add the git server keys into the
the `~/.ssh/known_hosts` file otherwise the git command could hang awaiting `~/.ssh/known_hosts` file otherwise the git command could hang awaiting input.
input. This can be done by copying the file in via the This can be done by copying the file in via the [file
[file provisioner](/docs/provisioners/file.html) (more secure) provisioner](/docs/provisioners/file.html) (more secure) or using `ssh-keyscan`
or using `ssh-keyscan` to populate the file (less secure). An example of the to populate the file (less secure). An example of the latter accessing github
latter accessing github would be: would be:
``` { "type": "shell", "inline": \[ "sudo apt-get install -y git", "ssh-keyscan
{ github.com &gt;&gt; \~/.ssh/known\_hosts", "git clone
"type": "shell", git@github.com:exampleorg/myprivaterepo.git" \] }
"inline": [
"sudo apt-get install -y git",
"ssh-keyscan github.com >> ~/.ssh/known_hosts",
"git clone git@github.com:exampleorg/myprivaterepo.git"
]
}
```
## Troubleshooting ## Troubleshooting
*My shell script doesn't work correctly on Ubuntu* *My shell script doesn't work correctly on Ubuntu*
* On Ubuntu, the `/bin/sh` shell is - On Ubuntu, the `/bin/sh` shell is
[dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script has [dash](http://en.wikipedia.org/wiki/Debian_Almquist_shell). If your script
[bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands in it, has [bash](http://en.wikipedia.org/wiki/Bash_(Unix_shell))-specific commands
then put `#!/bin/bash` at the top of your script. Differences in it, then put `#!/bin/bash` at the top of your script. Differences between
between dash and bash can be found on the [DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page. dash and bash can be found on the
[DashAsBinSh](https://wiki.ubuntu.com/DashAsBinSh) Ubuntu wiki page.
*My shell works when I login but fails with the shell provisioner* *My shell works when I login but fails with the shell provisioner*
* See the above tip. More than likely, your login shell is using `/bin/bash` - See the above tip. More than likely, your login shell is using `/bin/bash`
while the provisioner is using `/bin/sh`. while the provisioner is using `/bin/sh`.
*My installs hang when using `apt-get` or `yum`* *My installs hang when using `apt-get` or `yum`*
* Make sure you add a `-y` to the command to prevent it from requiring - Make sure you add a `-y` to the command to prevent it from requiring user
user input before proceeding. input before proceeding.
*How do I tell what my shell script is doing?* *How do I tell what my shell script is doing?*
* Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`) - Adding a `-x` flag to the shebang at the top of the script (`#!/bin/sh -x`)
will echo the script statements as it is executing. will echo the script statements as it is executing.
*My builds don't always work the same* *My builds don't always work the same*
* Some distributions start the SSH daemon before other core services which - Some distributions start the SSH daemon before other core services which can
can create race conditions. Your first provisioner can tell the machine to create race conditions. Your first provisioner can tell the machine to wait
wait until it completely boots. until it completely boots.
```javascript ``` {.javascript}
{ {
"type": "shell", "type": "shell",
"inline": [ "sleep 10" ] "inline": [ "sleep 10" ]
......
--- ---
layout: "docs" description: |
page_title: "Templates: Builders" Within the template, the builders section contains an array of all the builders
description: |- that Packer should use to generate a machine images for the template.
Within the template, the builders section contains an array of all the builders that Packer should use to generate a machine images for the template. layout: docs
--- page_title: 'Templates: Builders'
...
# Templates: Builders # Templates: Builders
Within the template, the builders section contains an array of all the Within the template, the builders section contains an array of all the builders
builders that Packer should use to generate a machine images for the template. that Packer should use to generate a machine images for the template.
Builders are responsible for creating machines and generating images from Builders are responsible for creating machines and generating images from them
them for various platforms. For example, there are separate builders for for various platforms. For example, there are separate builders for EC2, VMware,
EC2, VMware, VirtualBox, etc. Packer comes with many builders by default, VirtualBox, etc. Packer comes with many builders by default, and can also be
and can also be extended to add new builders. extended to add new builders.
This documentation page will cover how to configure a builder in a template. This documentation page will cover how to configure a builder in a template. The
The specific configuration options available for each builder, however, specific configuration options available for each builder, however, must be
must be referenced from the documentation for that specific builder. referenced from the documentation for that specific builder.
Within a template, a section of builder definitions looks like this: Within a template, a section of builder definitions looks like this:
```javascript ``` {.javascript}
{ {
"builders": [ "builders": [
// ... one or more builder definitions here // ... one or more builder definitions here
...@@ -31,19 +32,19 @@ Within a template, a section of builder definitions looks like this: ...@@ -31,19 +32,19 @@ Within a template, a section of builder definitions looks like this:
## Builder Definition ## Builder Definition
A single builder definition maps to exactly one [build](/docs/basics/terminology.html#term-build). A single builder definition maps to exactly one
A builder definition is a JSON object that requires at least a `type` key. The [build](/docs/basics/terminology.html#term-build). A builder definition is a
`type` is the name of the builder that will be used to create a machine image JSON object that requires at least a `type` key. The `type` is the name of the
for the build. builder that will be used to create a machine image for the build.
In addition to the `type`, other keys configure the builder itself. For In addition to the `type`, other keys configure the builder itself. For example,
example, the AWS builder requires an `access_key`, `secret_key`, and the AWS builder requires an `access_key`, `secret_key`, and some other settings.
some other settings. These are placed directly within the builder definition. These are placed directly within the builder definition.
An example builder definition is shown below, in this case configuring An example builder definition is shown below, in this case configuring the AWS
the AWS builder: builder:
```javascript ``` {.javascript}
{ {
"type": "amazon-ebs", "type": "amazon-ebs",
"access_key": "...", "access_key": "...",
...@@ -53,23 +54,22 @@ the AWS builder: ...@@ -53,23 +54,22 @@ the AWS builder:
## Named Builds ## Named Builds
Each build in Packer has a name. By default, the name is just the name Each build in Packer has a name. By default, the name is just the name of the
of the builder being used. In general, this is good enough. Names only serve builder being used. In general, this is good enough. Names only serve as an
as an indicator in the output of what is happening. If you want, however, indicator in the output of what is happening. If you want, however, you can
you can specify a custom name using the `name` key within the builder definition. specify a custom name using the `name` key within the builder definition.
This is particularly useful if you have multiple builds defined that use This is particularly useful if you have multiple builds defined that use the
the same underlying builder. In this case, you must specify a name for at least same underlying builder. In this case, you must specify a name for at least one
one of them since the names must be unique. of them since the names must be unique.
## Communicators ## Communicators
Every build is associated with a single Every build is associated with a single
[communicator](/docs/templates/communicator.html). Communicators are [communicator](/docs/templates/communicator.html). Communicators are used to
used to establish a connection for provisioning a remote machine (such establish a connection for provisioning a remote machine (such as an AWS
as an AWS instance or local virtual machine). instance or local virtual machine).
All the examples for the various builders show some communicator (usually All the examples for the various builders show some communicator (usually SSH),
SSH), but the communicators are highly customizable so we recommend but the communicators are highly customizable so we recommend reading the
reading the
[communicator documentation](/docs/templates/communicator.html). [communicator documentation](/docs/templates/communicator.html).
--- ---
layout: "docs" description: |
page_title: "Configuration Templates" All strings within templates are processed by a common Packer templating engine,
description: |- where variables and functions can be used to modify the value of a configuration
All strings within templates are processed by a common Packer templating engine, where variables and functions can be used to modify the value of a configuration parameter at runtime. parameter at runtime.
--- layout: docs
page_title: Configuration Templates
...
# Configuration Templates # Configuration Templates
All strings within templates are processed by a common Packer templating All strings within templates are processed by a common Packer templating engine,
engine, where variables and functions can be used to modify the value of where variables and functions can be used to modify the value of a configuration
a configuration parameter at runtime. parameter at runtime.
For example, the `{{timestamp}}` function can be used in any string to For example, the `{{timestamp}}` function can be used in any string to generate
generate the current timestamp. This is useful for configurations that require the current timestamp. This is useful for configurations that require unique
unique keys, such as AMI names. By setting the AMI name to something like keys, such as AMI names. By setting the AMI name to something like
`My Packer AMI {{timestamp}}`, the AMI name will be unique down to the second. `My Packer AMI {{timestamp}}`, the AMI name will be unique down to the second.
In addition to globally available functions like timestamp shown before, In addition to globally available functions like timestamp shown before, some
some configurations have special local variables that are available only configurations have special local variables that are available only for that
for that configuration. These are recognizable because they're prefixed by configuration. These are recognizable because they're prefixed by a period, such
a period, such as `{{.Name}}`. as `{{.Name}}`.
The complete syntax is covered in the next section, followed by a reference The complete syntax is covered in the next section, followed by a reference of
of globally available functions. globally available functions.
## Syntax ## Syntax
The syntax of templates is extremely simple. Anything template related The syntax of templates is extremely simple. Anything template related happens
happens within double-braces: `{{ }}`. Variables are prefixed with a period within double-braces: `{{ }}`. Variables are prefixed with a period and
and capitalized, such as `{{.Variable}}` and functions are just directly capitalized, such as `{{.Variable}}` and functions are just directly within the
within the braces, such as `{{timestamp}}`. braces, such as `{{timestamp}}`.
Here is an example from the VMware VMX template that shows configuration Here is an example from the VMware VMX template that shows configuration
templates in action: templates in action:
```liquid ``` {.liquid}
.encoding = "UTF-8" .encoding = "UTF-8"
displayName = "{{ .Name }}" displayName = "{{ .Name }}"
guestOS = "{{ .GuestOS }}" guestOS = "{{ .GuestOS }}"
...@@ -43,7 +45,7 @@ guestOS = "{{ .GuestOS }}" ...@@ -43,7 +45,7 @@ guestOS = "{{ .GuestOS }}"
In this case, the "Name" and "GuestOS" variables will be replaced, potentially In this case, the "Name" and "GuestOS" variables will be replaced, potentially
resulting in a VMX that looks like this: resulting in a VMX that looks like this:
```liquid ``` {.liquid}
.encoding = "UTF-8" .encoding = "UTF-8"
displayName = "packer" displayName = "packer"
guestOS = "otherlinux" guestOS = "otherlinux"
...@@ -52,70 +54,132 @@ guestOS = "otherlinux" ...@@ -52,70 +54,132 @@ guestOS = "otherlinux"
## Global Functions ## Global Functions
While some configuration settings have local variables specific to only that While some configuration settings have local variables specific to only that
configuration, a set of functions are available globally for use in _any string_ configuration, a set of functions are available globally for use in *any string*
in Packer templates. These are listed below for reference. in Packer templates. These are listed below for reference.
* `build_name` - The name of the build being run. - `build_name` - The name of the build being run.
* `build_type` - The type of the builder being used currently. - `build_type` - The type of the builder being used currently.
* `isotime [FORMAT]` - UTC time, which can be [formatted](http://golang.org/pkg/time/#example_Time_Format). - `isotime [FORMAT]` - UTC time, which can be
See more examples below. [formatted](http://golang.org/pkg/time/#example_Time_Format). See more
* `lower` - Lowercases the string. examples below.
* `pwd` - The working directory while executing Packer. - `lower` - Lowercases the string.
* `template_dir` - The directory to the template for the build. - `pwd` - The working directory while executing Packer.
* `timestamp` - The current Unix timestamp in UTC. - `template_dir` - The directory to the template for the build.
* `uuid` - Returns a random UUID. - `timestamp` - The current Unix timestamp in UTC.
* `upper` - Uppercases the string. - `uuid` - Returns a random UUID.
- `upper` - Uppercases the string.
### isotime Format ### isotime Format
Formatting for the function `isotime` uses the magic reference date Formatting for the function `isotime` uses the magic reference date **Mon Jan 2
**Mon Jan 2 15:04:05 -0700 MST 2006**, which breaks down to the following: 15:04:05 -0700 MST 2006**, which breaks down to the following:
<div class="table-responsive"> <div class="table-responsive">
<table class="table table-bordered table-condensed"> <table class="table table-bordered table-condensed">
<thead> <thead>
<tr> <tr>
<th></th> <th>
<th align="center">Day of Week</th> </th>
<th align="center">Month</th> <th align="center">
<th align="center">Date</th> Day of Week
<th align="center">Hour</th> </th>
<th align="center">Minute</th> <th align="center">
<th align="center">Second</th> Month
<th align="center">Year</th> </th>
<th align="center">Timezone</th> <th align="center">
Date
</th>
<th align="center">
Hour
</th>
<th align="center">
Minute
</th>
<th align="center">
Second
</th>
<th align="center">
Year
</th>
<th align="center">
Timezone
</th>
</tr> </tr>
</thead> </thead>
<tr> <tr>
<th>Numeric</th> <th>
<td align="center">-</td> Numeric
<td align="center">01</td> </th>
<td align="center">02</td> <td align="center">
<td align="center">03 (15)</td> -
<td align="center">04</td>
<td align="center">05</td> </td>
<td align="center">06</td> <td align="center">
<td align="center">-0700</td> 01
</td>
<td align="center">
02
</td>
<td align="center">
03 (15)
</td>
<td align="center">
04
</td>
<td align="center">
05
</td>
<td align="center">
06
</td>
<td align="center">
-0700
</td>
</tr> </tr>
<tr> <tr>
<th>Textual</th> <th>
<td align="center">Monday (Mon)</td> Textual
<td align="center">January (Jan)</td> </th>
<td align="center">-</td> <td align="center">
<td align="center">-</td> Monday (Mon)
<td align="center">-</td> </td>
<td align="center">-</td> <td align="center">
<td align="center">-</td> January (Jan)
<td align="center">MST</td> </td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
-
</td>
<td align="center">
MST
</td>
</tr> </tr>
</table> </table>
</div> </div>
_The values in parentheses are the abbreviated, or 24-hour clock values_ *The values in parentheses are the abbreviated, or 24-hour clock values*
Here are some example formated time, using the above format options: Here are some example formated time, using the above format options:
```liquid ``` {.liquid}
isotime = June 7, 7:22:43pm 2014 isotime = June 7, 7:22:43pm 2014
{{isotime "2006-01-02"}} = 2014-06-07 {{isotime "2006-01-02"}} = 2014-06-07
...@@ -126,7 +190,7 @@ isotime = June 7, 7:22:43pm 2014 ...@@ -126,7 +190,7 @@ isotime = June 7, 7:22:43pm 2014
Please note that double quote characters need escaping inside of templates: Please note that double quote characters need escaping inside of templates:
```javascript ``` {.javascript}
{ {
"builders": [ "builders": [
{ {
...@@ -147,6 +211,6 @@ Please note that double quote characters need escaping inside of templates: ...@@ -147,6 +211,6 @@ Please note that double quote characters need escaping inside of templates:
Specific to Amazon builders: Specific to Amazon builders:
* ``clean_ami_name`` - AMI names can only contain certain characters. This - `clean_ami_name` - AMI names can only contain certain characters. This
function will replace illegal characters with a '-" character. Example usage function will replace illegal characters with a '-" character. Example usage
since ":" is not a legal AMI name is: `{{isotime | clean_ami_name}}`. since ":" is not a legal AMI name is: `{{isotime | clean_ami_name}}`.
--- ---
layout: "docs" description: |
page_title: "Templates" Templates are JSON files that configure the various components of Packer in
description: |- order to create one or more machine images. Templates are portable, static, and
Templates are JSON files that configure the various components of Packer in order to create one or more machine images. Templates are portable, static, and readable and writable by both humans and computers. This has the added benefit of being able to not only create and modify templates by hand, but also write scripts to dynamically create or modify templates. readable and writable by both humans and computers. This has the added benefit
--- of being able to not only create and modify templates by hand, but also write
scripts to dynamically create or modify templates.
layout: docs
page_title: Templates
...
# Templates # Templates
Templates are JSON files that configure the various components of Packer Templates are JSON files that configure the various components of Packer in
in order to create one or more machine images. Templates are portable, static, order to create one or more machine images. Templates are portable, static, and
and readable and writable by both humans and computers. This has the added readable and writable by both humans and computers. This has the added benefit
benefit of being able to not only create and modify templates by hand, but of being able to not only create and modify templates by hand, but also write
also write scripts to dynamically create or modify templates. scripts to dynamically create or modify templates.
Templates are given to commands such as `packer build`, which will Templates are given to commands such as `packer build`, which will take the
take the template and actually run the builds within it, producing template and actually run the builds within it, producing any resulting machine
any resulting machine images. images.
## Template Structure ## Template Structure
...@@ -23,64 +27,65 @@ A template is a JSON object that has a set of keys configuring various ...@@ -23,64 +27,65 @@ A template is a JSON object that has a set of keys configuring various
components of Packer. The available keys within a template are listed below. components of Packer. The available keys within a template are listed below.
Along with each key, it is noted whether it is required or not. Along with each key, it is noted whether it is required or not.
* `builders` (_required_) is an array of one or more objects that defines - `builders` (*required*) is an array of one or more objects that defines the
the builders that will be used to create machine images for this template, builders that will be used to create machine images for this template, and
and configures each of those builders. For more information on how to define configures each of those builders. For more information on how to define and
and configure a builder, read the sub-section on configure a builder, read the sub-section on [configuring builders in
[configuring builders in templates](/docs/templates/builders.html). templates](/docs/templates/builders.html).
* `description` (optional) is a string providing a description of what - `description` (optional) is a string providing a description of what the
the template does. This output is used only in the template does. This output is used only in the [inspect
[inspect command](/docs/command-line/inspect.html). command](/docs/command-line/inspect.html).
* `min_packer_version` (optional) is a string that has a minimum Packer - `min_packer_version` (optional) is a string that has a minimum Packer
version that is required to parse the template. This can be used to version that is required to parse the template. This can be used to ensure
ensure that proper versions of Packer are used with the template. A that proper versions of Packer are used with the template. A max version
max version can't be specified because Packer retains backwards can't be specified because Packer retains backwards compatibility with
compatibility with `packer fix`. `packer fix`.
* `post-processors` (optional) is an array of one or more objects that defines the - `post-processors` (optional) is an array of one or more objects that defines
various post-processing steps to take with the built images. If not specified, the various post-processing steps to take with the built images. If not
then no post-processing will be done. For more specified, then no post-processing will be done. For more information on
information on what post-processors do and how they're defined, read the what post-processors do and how they're defined, read the sub-section on
sub-section on [configuring post-processors in templates](/docs/templates/post-processors.html). [configuring post-processors in
templates](/docs/templates/post-processors.html).
* `provisioners` (optional) is an array of one or more objects that defines
the provisioners that will be used to install and configure software for - `provisioners` (optional) is an array of one or more objects that defines
the machines created by each of the builders. If it is not specified, the provisioners that will be used to install and configure software for the
then no provisioners will be run. For more machines created by each of the builders. If it is not specified, then no
information on how to define and configure a provisioner, read the provisioners will be run. For more information on how to define and
sub-section on [configuring provisioners in templates](/docs/templates/provisioners.html). configure a provisioner, read the sub-section on [configuring provisioners
in templates](/docs/templates/provisioners.html).
* `variables` (optional) is an array of one or more key/value strings that defines
user variables contained in the template. - `variables` (optional) is an array of one or more key/value strings that
If it is not specified, then no variables are defined. defines user variables contained in the template. If it is not specified,
For more information on how to define and use user variables, read the then no variables are defined. For more information on how to define and use
sub-section on [user variables in templates](/docs/templates/user-variables.html). user variables, read the sub-section on [user variables in
templates](/docs/templates/user-variables.html).
## Comments ## Comments
JSON doesn't support comments and Packer reports unknown keys as validation JSON doesn't support comments and Packer reports unknown keys as validation
errors. If you'd like to comment your template, you can prefix a _root level_ errors. If you'd like to comment your template, you can prefix a *root level*
key with an underscore. Example: key with an underscore. Example:
```javascript ``` {.javascript}
{ {
"_comment": "This is a comment", "_comment": "This is a comment",
"builders": [{}] "builders": [{}]
} }
``` ```
**Important:** Only _root level_ keys can be underscore prefixed. Keys within **Important:** Only *root level* keys can be underscore prefixed. Keys within
builders, provisioners, etc. will still result in validation errors. builders, provisioners, etc. will still result in validation errors.
## Example Template ## Example Template
Below is an example of a basic template that is nearly fully functional. It is just Below is an example of a basic template that is nearly fully functional. It is
missing valid AWS access keys. Otherwise, it would work properly with just missing valid AWS access keys. Otherwise, it would work properly with
`packer build`. `packer build`.
```javascript ``` {.javascript}
{ {
"builders": [ "builders": [
{ {
......
--- ---
layout: "docs" description: |
page_title: "Templates: Post-Processors" The post-processor section within a template configures any post-processing that
description: |- will be done to images built by the builders. Examples of post-processing would
The post-processor section within a template configures any post-processing that will be done to images built by the builders. Examples of post-processing would be compressing files, uploading artifacts, etc. be compressing files, uploading artifacts, etc.
--- layout: docs
page_title: 'Templates: Post-Processors'
...
# Templates: Post-Processors # Templates: Post-Processors
The post-processor section within a template configures any post-processing The post-processor section within a template configures any post-processing that
that will be done to images built by the builders. Examples of post-processing will be done to images built by the builders. Examples of post-processing would
would be compressing files, uploading artifacts, etc. be compressing files, uploading artifacts, etc.
Post-processors are _optional_. If no post-processors are defined within a template, Post-processors are *optional*. If no post-processors are defined within a
then no post-processing will be done to the image. The resulting artifact of template, then no post-processing will be done to the image. The resulting
a build is just the image outputted by the builder. artifact of a build is just the image outputted by the builder.
This documentation page will cover how to configure a post-processor in a This documentation page will cover how to configure a post-processor in a
template. The specific configuration options available for each post-processor, template. The specific configuration options available for each post-processor,
however, must be referenced from the documentation for that specific post-processor. however, must be referenced from the documentation for that specific
post-processor.
Within a template, a section of post-processor definitions looks like this: Within a template, a section of post-processor definitions looks like this:
```javascript ``` {.javascript}
{ {
"post-processors": [ "post-processors": [
// ... one or more post-processor definitions here // ... one or more post-processor definitions here
...@@ -38,29 +41,29 @@ apply to, if you wish. ...@@ -38,29 +41,29 @@ apply to, if you wish.
## Post-Processor Definition ## Post-Processor Definition
Within the `post-processors` array in a template, there are three ways to Within the `post-processors` array in a template, there are three ways to define
define a post-processor. There are _simple_ definitions, _detailed_ definitions, a post-processor. There are *simple* definitions, *detailed* definitions, and
and _sequence_ definitions. Don't worry, they're all very easy to understand, *sequence* definitions. Don't worry, they're all very easy to understand, and
and the "simple" and "detailed" definitions are simply shortcuts for the the "simple" and "detailed" definitions are simply shortcuts for the "sequence"
"sequence" definition. definition.
A **simple definition** is just a string; the name of the post-processor. An A **simple definition** is just a string; the name of the post-processor. An
example is shown below. Simple definitions are used when no additional configuration example is shown below. Simple definitions are used when no additional
is needed for the post-processor. configuration is needed for the post-processor.
```javascript ``` {.javascript}
{ {
"post-processors": ["compress"] "post-processors": ["compress"]
} }
``` ```
A **detailed definition** is a JSON object. It is very similar to a builder A **detailed definition** is a JSON object. It is very similar to a builder or
or provisioner definition. It contains a `type` field to denote the type of provisioner definition. It contains a `type` field to denote the type of the
the post-processor, but may also contain additional configuration for the post-processor, but may also contain additional configuration for the
post-processor. A detailed definition is used when additional configuration post-processor. A detailed definition is used when additional configuration is
is needed beyond simply the type for the post-processor. An example is shown below. needed beyond simply the type for the post-processor. An example is shown below.
```javascript ``` {.javascript}
{ {
"post-processors": [ "post-processors": [
{ {
...@@ -72,14 +75,14 @@ is needed beyond simply the type for the post-processor. An example is shown bel ...@@ -72,14 +75,14 @@ is needed beyond simply the type for the post-processor. An example is shown bel
``` ```
A **sequence definition** is a JSON array comprised of other **simple** or A **sequence definition** is a JSON array comprised of other **simple** or
**detailed** definitions. The post-processors defined in the array are run **detailed** definitions. The post-processors defined in the array are run in
in order, with the artifact of each feeding into the next, and any intermediary order, with the artifact of each feeding into the next, and any intermediary
artifacts being discarded. A sequence definition may not contain another artifacts being discarded. A sequence definition may not contain another
sequence definition. Sequence definitions are used to chain together multiple sequence definition. Sequence definitions are used to chain together multiple
post-processors. An example is shown below, where the artifact of a build is post-processors. An example is shown below, where the artifact of a build is
compressed then uploaded, but the compressed result is not kept. compressed then uploaded, but the compressed result is not kept.
```javascript ``` {.javascript}
{ {
"post-processors": [ "post-processors": [
[ [
...@@ -90,21 +93,21 @@ compressed then uploaded, but the compressed result is not kept. ...@@ -90,21 +93,21 @@ compressed then uploaded, but the compressed result is not kept.
} }
``` ```
As you may be able to imagine, the **simple** and **detailed** definitions As you may be able to imagine, the **simple** and **detailed** definitions are
are simply shortcuts for a **sequence** definition of only one element. simply shortcuts for a **sequence** definition of only one element.
## Input Artifacts ## Input Artifacts
When using post-processors, the input artifact (coming from a builder or When using post-processors, the input artifact (coming from a builder or another
another post-processor) is discarded by default after the post-processor runs. post-processor) is discarded by default after the post-processor runs. This is
This is because generally, you don't want the intermediary artifacts on the because generally, you don't want the intermediary artifacts on the way to the
way to the final artifact created. final artifact created.
In some cases, however, you may want to keep the intermediary artifacts. In some cases, however, you may want to keep the intermediary artifacts. You can
You can tell Packer to keep these artifacts by setting the tell Packer to keep these artifacts by setting the `keep_input_artifact`
`keep_input_artifact` configuration to `true`. An example is shown below: configuration to `true`. An example is shown below:
```javascript ``` {.javascript}
{ {
"post-processors": [ "post-processors": [
{ {
...@@ -115,39 +118,37 @@ You can tell Packer to keep these artifacts by setting the ...@@ -115,39 +118,37 @@ You can tell Packer to keep these artifacts by setting the
} }
``` ```
This setting will only keep the input artifact to _that specific_ This setting will only keep the input artifact to *that specific*
post-processor. If you're specifying a sequence of post-processors, then post-processor. If you're specifying a sequence of post-processors, then all
all intermediaries are discarded by default except for the input artifacts intermediaries are discarded by default except for the input artifacts to
to post-processors that explicitly state to keep the input artifact. post-processors that explicitly state to keep the input artifact.
-> **Note:** The intuitive reader may be wondering what happens -&gt; **Note:** The intuitive reader may be wondering what happens if multiple
if multiple post-processors are specified (not in a sequence). Does Packer require the post-processors are specified (not in a sequence). Does Packer require the
configuration to keep the input artifact on all the post-processors? configuration to keep the input artifact on all the post-processors? The answer
The answer is no, of course not. Packer is smart enough to figure out is no, of course not. Packer is smart enough to figure out that at least one
that at least one post-processor requested that the input be kept, so it will keep post-processor requested that the input be kept, so it will keep it around.
it around.
## Run on Specific Builds ## Run on Specific Builds
You can use the `only` or `except` configurations to run a post-processor You can use the `only` or `except` configurations to run a post-processor only
only with specific builds. These two configurations do what you expect: with specific builds. These two configurations do what you expect: `only` will
`only` will only run the post-processor on the specified builds and only run the post-processor on the specified builds and `except` will run the
`except` will run the post-processor on anything other than the specified post-processor on anything other than the specified builds.
builds.
An example of `only` being used is shown below, but the usage of `except` An example of `only` being used is shown below, but the usage of `except` is
is effectively the same. `only` and `except` can only be specified on "detailed" effectively the same. `only` and `except` can only be specified on "detailed"
configurations. If you have a sequence of post-processors to run, `only` configurations. If you have a sequence of post-processors to run, `only` and
and `except` will only affect that single post-processor in the sequence. `except` will only affect that single post-processor in the sequence.
```javascript ``` {.javascript}
{ {
"type": "vagrant", "type": "vagrant",
"only": ["virtualbox-iso"] "only": ["virtualbox-iso"]
} }
``` ```
The values within `only` or `except` are _build names_, not builder The values within `only` or `except` are *build names*, not builder types. If
types. If you recall, build names by default are just their builder type, you recall, build names by default are just their builder type, but if you
but if you specify a custom `name` parameter, then you should use that specify a custom `name` parameter, then you should use that as the value instead
as the value instead of the type. of the type.
--- ---
layout: "docs" description: |
page_title: "Templates: Provisioners" Within the template, the provisioners section contains an array of all the
description: |- provisioners that Packer should use to install and configure software within
Within the template, the provisioners section contains an array of all the provisioners that Packer should use to install and configure software within running machines prior to turning them into machine images. running machines prior to turning them into machine images.
--- layout: docs
page_title: 'Templates: Provisioners'
...
# Templates: Provisioners # Templates: Provisioners
...@@ -11,19 +13,18 @@ Within the template, the provisioners section contains an array of all the ...@@ -11,19 +13,18 @@ Within the template, the provisioners section contains an array of all the
provisioners that Packer should use to install and configure software within provisioners that Packer should use to install and configure software within
running machines prior to turning them into machine images. running machines prior to turning them into machine images.
Provisioners are _optional_. If no provisioners are defined within a template, Provisioners are *optional*. If no provisioners are defined within a template,
then no software other than the defaults will be installed within the then no software other than the defaults will be installed within the resulting
resulting machine images. This is not typical, however, since much of the machine images. This is not typical, however, since much of the value of Packer
value of Packer is to produce multiple identical images is to produce multiple identical images of pre-configured software.
of pre-configured software.
This documentation page will cover how to configure a provisioner in a template. This documentation page will cover how to configure a provisioner in a template.
The specific configuration options available for each provisioner, however, The specific configuration options available for each provisioner, however, must
must be referenced from the documentation for that specific provisioner. be referenced from the documentation for that specific provisioner.
Within a template, a section of provisioner definitions looks like this: Within a template, a section of provisioner definitions looks like this:
```javascript ``` {.javascript}
{ {
"provisioners": [ "provisioners": [
// ... one or more provisioner definitions here // ... one or more provisioner definitions here
...@@ -31,25 +32,24 @@ Within a template, a section of provisioner definitions looks like this: ...@@ -31,25 +32,24 @@ Within a template, a section of provisioner definitions looks like this:
} }
``` ```
For each of the definitions, Packer will run the provisioner for each For each of the definitions, Packer will run the provisioner for each of the
of the configured builds. The provisioners will be run in the order configured builds. The provisioners will be run in the order they are defined
they are defined within the template. within the template.
## Provisioner Definition ## Provisioner Definition
A provisioner definition is a JSON object that must contain at least A provisioner definition is a JSON object that must contain at least the `type`
the `type` key. This key specifies the name of the provisioner to use. key. This key specifies the name of the provisioner to use. Additional keys
Additional keys within the object are used to configure the provisioner, within the object are used to configure the provisioner, with the exception of a
with the exception of a handful of special keys, covered later. handful of special keys, covered later.
As an example, the "shell" provisioner requires a key such as `script` As an example, the "shell" provisioner requires a key such as `script` which
which specifies a path to a shell script to execute within the machines specifies a path to a shell script to execute within the machines being created.
being created.
An example provisioner definition is shown below, configuring the shell An example provisioner definition is shown below, configuring the shell
provisioner to run a local script within the machines: provisioner to run a local script within the machines:
```javascript ``` {.javascript}
{ {
"type": "shell", "type": "shell",
"script": "script.sh" "script": "script.sh"
...@@ -58,16 +58,15 @@ provisioner to run a local script within the machines: ...@@ -58,16 +58,15 @@ provisioner to run a local script within the machines:
## Run on Specific Builds ## Run on Specific Builds
You can use the `only` or `except` configurations to run a provisioner You can use the `only` or `except` configurations to run a provisioner only with
only with specific builds. These two configurations do what you expect: specific builds. These two configurations do what you expect: `only` will only
`only` will only run the provisioner on the specified builds and run the provisioner on the specified builds and `except` will run the
`except` will run the provisioner on anything other than the specified provisioner on anything other than the specified builds.
builds.
An example of `only` being used is shown below, but the usage of `except` An example of `only` being used is shown below, but the usage of `except` is
is effectively the same: effectively the same:
```javascript ``` {.javascript}
{ {
"type": "shell", "type": "shell",
"script": "script.sh", "script": "script.sh",
...@@ -75,21 +74,21 @@ is effectively the same: ...@@ -75,21 +74,21 @@ is effectively the same:
} }
``` ```
The values within `only` or `except` are _build names_, not builder The values within `only` or `except` are *build names*, not builder types. If
types. If you recall, build names by default are just their builder type, you recall, build names by default are just their builder type, but if you
but if you specify a custom `name` parameter, then you should use that specify a custom `name` parameter, then you should use that as the value instead
as the value instead of the type. of the type.
## Build-Specific Overrides ## Build-Specific Overrides
While the goal of Packer is to produce identical machine images, it While the goal of Packer is to produce identical machine images, it sometimes
sometimes requires periods of time where the machines are different before requires periods of time where the machines are different before they eventually
they eventually converge to be identical. In these cases, different configurations converge to be identical. In these cases, different configurations for
for provisioners may be necessary depending on the build. This can be done provisioners may be necessary depending on the build. This can be done using
using build-specific overrides. build-specific overrides.
An example of where this might be necessary is when building both an EC2 AMI An example of where this might be necessary is when building both an EC2 AMI and
and a VMware machine. The source EC2 AMI may setup a user with administrative a VMware machine. The source EC2 AMI may setup a user with administrative
privileges by default, whereas the VMware machine doesn't have these privileges. privileges by default, whereas the VMware machine doesn't have these privileges.
In this case, the shell script may need to be executed differently. Of course, In this case, the shell script may need to be executed differently. Of course,
the goal is that hopefully the shell script converges these two images to be the goal is that hopefully the shell script converges these two images to be
...@@ -97,7 +96,7 @@ identical. However, they may initially need to be run differently. ...@@ -97,7 +96,7 @@ identical. However, they may initially need to be run differently.
This example is shown below: This example is shown below:
```javascript ``` {.javascript}
{ {
"type": "shell", "type": "shell",
"script": "script.sh", "script": "script.sh",
...@@ -111,24 +110,23 @@ This example is shown below: ...@@ -111,24 +110,23 @@ This example is shown below:
``` ```
As you can see, the `override` key is used. The value of this key is another As you can see, the `override` key is used. The value of this key is another
JSON object where the key is the name of a [builder definition](/docs/templates/builders.html). JSON object where the key is the name of a [builder
The value of this is in turn another JSON object. This JSON object simply definition](/docs/templates/builders.html). The value of this is in turn another
contains the provisioner configuration as normal. This configuration is merged JSON object. This JSON object simply contains the provisioner configuration as
into the default provisioner configuration. normal. This configuration is merged into the default provisioner configuration.
## Pausing Before Running ## Pausing Before Running
With certain provisioners it is sometimes desirable to pause for some period With certain provisioners it is sometimes desirable to pause for some period of
of time before running it. Specifically, in cases where a provisioner reboots time before running it. Specifically, in cases where a provisioner reboots the
the machine, you may want to wait for some period of time before starting machine, you may want to wait for some period of time before starting the next
the next provisioner. provisioner.
Every provisioner definition in a Packer template can take a special Every provisioner definition in a Packer template can take a special
configuration `pause_before` that is the amount of time to pause before configuration `pause_before` that is the amount of time to pause before running
running that provisioner. By default, there is no pause. An example that provisioner. By default, there is no pause. An example is shown below:
is shown below:
```javascript ``` {.javascript}
{ {
"type": "shell", "type": "shell",
"script": "script.sh", "script": "script.sh",
...@@ -136,5 +134,5 @@ is shown below: ...@@ -136,5 +134,5 @@ is shown below:
} }
``` ```
For the above provisioner, Packer will wait 10 seconds before uploading For the above provisioner, Packer will wait 10 seconds before uploading and
and executing the shell script. executing the shell script.
--- ---
layout: "docs" description: |
page_title: "Templates: Push" Within the template, the push section configures how a template can be pushed to
description: |- a remote build service.
Within the template, the push section configures how a template can be layout: docs
pushed to a remote build service. page_title: 'Templates: Push'
--- ...
# Templates: Push # Templates: Push
Within the template, the push section configures how a template can be Within the template, the push section configures how a template can be
[pushed](/docs/command-line/push.html) to a remote build service. [pushed](/docs/command-line/push.html) to a remote build service.
Push configuration is responsible for defining what files are required Push configuration is responsible for defining what files are required to build
to build this template, what the name of build configuration is in the this template, what the name of build configuration is in the build service,
build service, etc. etc.
The only build service that Packer can currently push to is The only build service that Packer can currently push to is
[Atlas](https://atlas.hashicorp.com) by HashiCorp. Support for other build [Atlas](https://atlas.hashicorp.com) by HashiCorp. Support for other build
...@@ -21,7 +21,7 @@ services will come in the form of plugins in the future. ...@@ -21,7 +21,7 @@ services will come in the form of plugins in the future.
Within a template, a push configuration section looks like this: Within a template, a push configuration section looks like this:
```javascript ``` {.javascript}
{ {
"push": { "push": {
// ... push configuration here // ... push configuration here
...@@ -37,37 +37,37 @@ each category, the available configuration keys are alphabetized. ...@@ -37,37 +37,37 @@ each category, the available configuration keys are alphabetized.
### Required ### Required
* `name` (string) - Name of the build configuration in the build service. - `name` (string) - Name of the build configuration in the build service. If
If this doesn't exist, it will be created (by default). this doesn't exist, it will be created (by default).
### Optional ### Optional
* `address` (string) - The address of the build service to use. By default - `address` (string) - The address of the build service to use. By default
this is `https://atlas.hashicorp.com`. this is `https://atlas.hashicorp.com`.
* `base_dir` (string) - The base directory of the files to upload. This - `base_dir` (string) - The base directory of the files to upload. This will
will be the current working directory when the build service executes your be the current working directory when the build service executes
template. This path is relative to the template. your template. This path is relative to the template.
* `include` (array of strings) - Glob patterns to include relative to - `include` (array of strings) - Glob patterns to include relative to the
the `base_dir`. If this is specified, only files that match the include `base_dir`. If this is specified, only files that match the include pattern
pattern are included. are included.
* `exclude` (array of strings) - Glob patterns to exclude relative to - `exclude` (array of strings) - Glob patterns to exclude relative to the
the `base_dir`. `base_dir`.
* `token` (string) - An access token to use to authenticate to the build - `token` (string) - An access token to use to authenticate to the
service. build service.
* `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) - `vcs` (boolean) - If true, Packer will detect your VCS (if there is one) and
and only upload the files that are tracked by the VCS. This is useful only upload the files that are tracked by the VCS. This is useful for
for automatically excluding ignored files. This defaults to false. automatically excluding ignored files. This defaults to false.
## Examples ## Examples
A push configuration section with minimal options: A push configuration section with minimal options:
```javascript ``` {.javascript}
{ {
"push": { "push": {
"name": "hashicorp/precise64" "name": "hashicorp/precise64"
...@@ -78,7 +78,7 @@ A push configuration section with minimal options: ...@@ -78,7 +78,7 @@ A push configuration section with minimal options:
A push configuration specifying Packer to inspect the VCS and list individual A push configuration specifying Packer to inspect the VCS and list individual
files to include: files to include:
```javascript ``` {.javascript}
{ {
"push": { "push": {
"name": "hashicorp/precise64", "name": "hashicorp/precise64",
......
--- ---
layout: "docs" description: |
page_title: "User Variables in Templates" User variables allow your templates to be further configured with variables from
description: |- the command-line, environmental variables, or files. This lets you parameterize
User variables allow your templates to be further configured with variables from the command-line, environmental variables, or files. This lets you parameterize your templates so that you can keep secret tokens, environment-specific data, and other types of information out of your templates. This maximizes the portability and shareability of the template. your templates so that you can keep secret tokens, environment-specific data,
--- and other types of information out of your templates. This maximizes the
portability and shareability of the template.
layout: docs
page_title: User Variables in Templates
...
# User Variables # User Variables
User variables allow your templates to be further configured with variables User variables allow your templates to be further configured with variables from
from the command-line, environmental variables, or files. This lets you the command-line, environmental variables, or files. This lets you parameterize
parameterize your templates so that you can keep secret tokens, your templates so that you can keep secret tokens, environment-specific data,
environment-specific data, and other types of information out of your and other types of information out of your templates. This maximizes the
templates. This maximizes the portability and shareability of the template. portability and shareability of the template.
Using user variables expects you know how Using user variables expects you know how [configuration
[configuration templates](/docs/templates/configuration-templates.html) work. templates](/docs/templates/configuration-templates.html) work. If you don't know
If you don't know how configuration templates work yet, please read that how configuration templates work yet, please read that page first.
page first.
## Usage ## Usage
User variables must first be defined in a `variables` section within your User variables must first be defined in a `variables` section within your
template. Even if you want a variable to default to an empty string, it template. Even if you want a variable to default to an empty string, it must be
must be defined. This explicitness makes it easy for newcomers to your defined. This explicitness makes it easy for newcomers to your template to
template to understand what can be modified using variables in your template. understand what can be modified using variables in your template.
The `variables` section is a simple key/value mapping of the variable The `variables` section is a simple key/value mapping of the variable name to a
name to a default value. A default value can be the empty string. An default value. A default value can be the empty string. An example is shown
example is shown below: below:
```javascript ``` {.javascript}
{ {
"variables": { "variables": {
"aws_access_key": "", "aws_access_key": "",
...@@ -46,28 +49,27 @@ example is shown below: ...@@ -46,28 +49,27 @@ example is shown below:
``` ```
In the above example, the template defines two variables: `aws_access_key` and In the above example, the template defines two variables: `aws_access_key` and
`aws_secret_key`. They default to empty values. `aws_secret_key`. They default to empty values. Later, the variables are used
Later, the variables are used within the builder we defined in order to within the builder we defined in order to configure the actual keys for the
configure the actual keys for the Amazon builder. Amazon builder.
If the default value is `null`, then the user variable will be _required_. If the default value is `null`, then the user variable will be *required*. This
This means that the user must specify a value for this variable or template means that the user must specify a value for this variable or template
validation will fail. validation will fail.
Using the variables is extremely easy. Variables are used by calling Using the variables is extremely easy. Variables are used by calling the user
the user function in the form of <code>{{user &#96;variable&#96;}}</code>. function in the form of <code>{{user \`variable\`}}</code>. This function can be
This function can be used in _any value_ within the template, in used in *any value* within the template, in builders, provisioners, *anything*.
builders, provisioners, _anything_. The user variable is available globally The user variable is available globally within the template.
within the template.
## Environmental Variables ## Environmental Variables
Environmental variables can be used within your template using user Environmental variables can be used within your template using user variables.
variables. The `env` function is available _only_ within the default value The `env` function is available *only* within the default value of a user
of a user variable, allowing you to default a user variable to an variable, allowing you to default a user variable to an environmental variable.
environmental variable. An example is shown below: An example is shown below:
```javascript ``` {.javascript}
{ {
"variables": { "variables": {
"my_secret": "{{env `MY_SECRET`}}", "my_secret": "{{env `MY_SECRET`}}",
...@@ -77,73 +79,69 @@ environmental variable. An example is shown below: ...@@ -77,73 +79,69 @@ environmental variable. An example is shown below:
} }
``` ```
This will default "my\_secret" to be the value of the "MY\_SECRET" This will default "my\_secret" to be the value of the "MY\_SECRET" environmental
environmental variable (or the empty string if it does not exist). variable (or the empty string if it does not exist).
-> **Why can't I use environmental variables elsewhere?** -&gt; **Why can't I use environmental variables elsewhere?** User variables are
User variables are the single source of configurable input to a template. the single source of configurable input to a template. We felt that having
We felt that having environmental variables used _anywhere_ in a environmental variables used *anywhere* in a template would confuse the user
template would confuse the user about the possible inputs to a template. about the possible inputs to a template. By allowing environmental variables
By allowing environmental variables only within default values for user only within default values for user variables, user variables remain as the
variables, user variables remain as the single source of input to a template single source of input to a template that a user can easily discover using
that a user can easily discover using `packer inspect`. `packer inspect`.
## Setting Variables ## Setting Variables
Now that we covered how to define and use variables within a template, Now that we covered how to define and use variables within a template, the next
the next important point is how to actually set these variables. Packer important point is how to actually set these variables. Packer exposes two
exposes two methods for setting variables: from the command line or methods for setting variables: from the command line or from a file.
from a file.
### From the Command Line ### From the Command Line
To set variables from the command line, the `-var` flag is used as To set variables from the command line, the `-var` flag is used as a parameter
a parameter to `packer build` (and some other commands). Continuing our example to `packer build` (and some other commands). Continuing our example above, we
above, we could build our template using the command below. The command could build our template using the command below. The command is split across
is split across multiple lines for readability, but can of course be a single multiple lines for readability, but can of course be a single line.
line.
```text ``` {.text}
$ packer build \ $ packer build \
-var 'aws_access_key=foo' \ -var 'aws_access_key=foo' \
-var 'aws_secret_key=bar' \ -var 'aws_secret_key=bar' \
template.json template.json
``` ```
As you can see, the `-var` flag can be specified multiple times in order As you can see, the `-var` flag can be specified multiple times in order to set
to set multiple variables. Also, variables set later on the command-line multiple variables. Also, variables set later on the command-line override
override earlier set variables if it has already been set. earlier set variables if it has already been set.
Finally, variables set from the command-line override all other methods Finally, variables set from the command-line override all other methods of
of setting variables. So if you specify a variable in a file (the next setting variables. So if you specify a variable in a file (the next method
method shown), you can override it using the command-line. shown), you can override it using the command-line.
### From a File ### From a File
Variables can also be set from an external JSON file. The `-var-file` Variables can also be set from an external JSON file. The `-var-file` flag reads
flag reads a file containing a basic key/value mapping of variables to a file containing a basic key/value mapping of variables to values and sets
values and sets those variables. The JSON file is simple: those variables. The JSON file is simple:
```javascript ``` {.javascript}
{ {
"aws_access_key": "foo", "aws_access_key": "foo",
"aws_secret_key": "bar" "aws_secret_key": "bar"
} }
``` ```
It is a single JSON object where the keys are variables and the values are It is a single JSON object where the keys are variables and the values are the
the variable values. Assuming this file is in `variables.json`, we can variable values. Assuming this file is in `variables.json`, we can build our
build our template using the following command: template using the following command:
```text ``` {.text}
$ packer build -var-file=variables.json template.json $ packer build -var-file=variables.json template.json
``` ```
The `-var-file` flag can be specified multiple times and variables from The `-var-file` flag can be specified multiple times and variables from multiple
multiple files will be read and applied. As you'd expect, variables read files will be read and applied. As you'd expect, variables read from files
from files specified later override a variable set earlier if it has specified later override a variable set earlier if it has already been set.
already been set.
And as mentioned above, no matter where a `-var-file` is specified, a And as mentioned above, no matter where a `-var-file` is specified, a `-var`
`-var` flag on the command line will always override any variables from flag on the command line will always override any variables from a file.
a file.
--- ---
layout: "docs" description: |
page_title: "Convert Veewee Definitions to Packer Templates" If you are or were a user of Veewee, then there is an official tool called
description: |- veewee-to-packer that will convert your Veewee definition into an equivalent
If you are or were a user of Veewee, then there is an official tool called veewee-to-packer that will convert your Veewee definition into an equivalent Packer template. Even if you're not a Veewee user, Veewee has a large library of templates that can be readily used with Packer by simply converting them. Packer template. Even if you're not a Veewee user, Veewee has a large library of
--- templates that can be readily used with Packer by simply converting them.
layout: docs
page_title: Convert Veewee Definitions to Packer Templates
...
# Veewee-to-Packer # Veewee-to-Packer
If you are or were a user of [Veewee](https://github.com/jedi4ever/veewee), If you are or were a user of [Veewee](https://github.com/jedi4ever/veewee), then
then there is an official tool called [veewee-to-packer](https://github.com/mitchellh/veewee-to-packer) there is an official tool called
that will convert your Veewee definition into an equivalent Packer template. [veewee-to-packer](https://github.com/mitchellh/veewee-to-packer) that will
Even if you're not a Veewee user, Veewee has a convert your Veewee definition into an equivalent Packer template. Even if
[large library](https://github.com/jedi4ever/veewee/tree/master/templates) you're not a Veewee user, Veewee has a [large
of templates that can be readily used with Packer by simply converting them. library](https://github.com/jedi4ever/veewee/tree/master/templates) of templates
that can be readily used with Packer by simply converting them.
## Installation and Usage ## Installation and Usage
Since Veewee itself is a Ruby project, so too is the veewee-to-packer Since Veewee itself is a Ruby project, so too is the veewee-to-packer
application so that it can read the Veewee configurations. Install it using RubyGems: application so that it can read the Veewee configurations. Install it using
RubyGems:
```text ``` {.text}
$ gem install veewee-to-packer $ gem install veewee-to-packer
... ...
``` ```
Once installed, usage is easy! Just point `veewee-to-packer` Once installed, usage is easy! Just point `veewee-to-packer` at the
at the `definition.rb` file of any template. The converter will output `definition.rb` file of any template. The converter will output any warnings or
any warnings or messages about the conversion. The example below converts messages about the conversion. The example below converts a CentOS template:
a CentOS template:
```text ``` {.text}
$ veewee-to-packer templates/CentOS-6.4/definition.rb $ veewee-to-packer templates/CentOS-6.4/definition.rb
Success! Your Veewee definition was converted to a Packer Success! Your Veewee definition was converted to a Packer
template! The template can be found in the `template.json` file template! The template can be found in the `template.json` file
...@@ -41,22 +45,21 @@ first, since the template has relative paths that expect you to ...@@ -41,22 +45,21 @@ first, since the template has relative paths that expect you to
use it from the same working directory. use it from the same working directory.
``` ```
***Voila!*** By default, `veewee-to-packer` will output a template that ***Voila!*** By default, `veewee-to-packer` will output a template that contains
contains a builder for both VirtualBox and VMware. You can use the a builder for both VirtualBox and VMware. You can use the `-only` flag on
`-only` flag on `packer build` to only build one of them. Otherwise `packer build` to only build one of them. Otherwise you can use the `--builder`
you can use the `--builder` flag on `veewee-to-packer` to only output flag on `veewee-to-packer` to only output specific builder configurations.
specific builder configurations.
## Limitations ## Limitations
None, really. The tool will tell you if it can't convert a part of a None, really. The tool will tell you if it can't convert a part of a template,
template, and whether that is a critical error or just a warning. and whether that is a critical error or just a warning. Most of Veewee's
Most of Veewee's functions translate perfectly over to Packer. There are functions translate perfectly over to Packer. There are still a couple missing
still a couple missing features in Packer, but they're minimal. features in Packer, but they're minimal.
## Bugs ## Bugs
If you find any bugs, please report them to the If you find any bugs, please report them to the [veewee-to-packer issue
[veewee-to-packer issue tracker](https://github.com/mitchellh/veewee-to-packer). tracker](https://github.com/mitchellh/veewee-to-packer). I haven't been able to
I haven't been able to exhaustively test every Veewee template, so there exhaustively test every Veewee template, so there are certainly some edge cases
are certainly some edge cases out there. out there.
...@@ -3,47 +3,49 @@ page_title: "Downloads" ...@@ -3,47 +3,49 @@ page_title: "Downloads"
--- ---
<header class="dark-background"> <header class="dark-background">
<div class="container header text-center"> <div class="container header text-center">
<h1 class="text-green">Downloads</h1> <h1 class="text-green">Downloads</h1>
<span class="text-green">Latest version: <%= latest_version %></span> <span class="text-green">Latest version: <%= latest_version %></span>
</div> </div>
</header> </header>
<section class="downloads"> <section class="downloads">
<div class="container"> <div class="container">
<div class="description row"> <div class="description row">
<div class="col-md-8 col-md-offset-2"> <div class="col-md-8 col-md-offset-2">
<p> <p>
Below are all available downloads for the latest version of Packer Below are all available downloads for the latest version of Packer (
(<%= latest_version %>). Please download the proper package for your <%= latest_version %>). Please download the proper package for your operating system and architecture. You can find SHA256 checksums for packages <a href="https://dl.bintray.com/mitchellh/packer/packer_<%= latest_version %>_SHA256SUMS?direct">here</a>.
operating system and architecture. You can find SHA256 checksums </p>
for packages <a href="https://dl.bintray.com/mitchellh/packer/packer_<%= latest_version %>_SHA256SUMS?direct">here</a>. </div>
</p> </div>
</div> <% product_versions.each do |os, versions| %>
</div> <div class="row">
<% product_versions.each do |os, versions| %> <div class="col-md-8 col-md-offset-2 download">
<div class="row"> <div class="icon pull-left">
<div class="col-md-8 col-md-offset-2 download"> <%= system_icon(os) %>
<div class="icon pull-left"><%= system_icon(os) %></div> </div>
<div class="details"> <div class="details">
<h2 class="os-name"><%= os %></h2> <h2 class="os-name"><%= os %></h2>
<ul> <ul>
<% versions.each do |url| %> <% versions.each do |url| %>
<li><a href="<%= url %>"><%= arch_for_filename(url) %></a></li> <li>
<% end %> <a href="<%= url %>">
</ul> <%= arch_for_filename(url) %>
<div class="clearfix"></div> </a>
</div> </li>
</div> <% end %>
</div> </ul>
<% end %> <div class="clearfix"></div>
</div>
<div class="row"> </div>
<div class="col-md-8 col-md-offset-2 poweredby"> </div>
<a href='http://www.bintray.com'> <% end %>
<img src='https://www.bintray.com/docs/images/poweredByBintray_ColorTransparent.png'> <div class="row">
</a> <div class="col-md-8 col-md-offset-2 poweredby">
</div> <a href='http://www.bintray.com'>
</div> <img src='https://www.bintray.com/docs/images/poweredByBintray_ColorTransparent.png'>
</div> </a>
</div>
</div>
</div>
</section> </section>
--- ---
description: |- description: Packer is a free and open source tool for creating golden images
Packer is a free and open source tool for creating golden images for multiple platforms from a single source configuration. for multiple platforms from a single source configuration.
--- ---
<div class="home"> <div class="home">
<header class="dark-background">
<header class="dark-background"> <div class="container hero">
<div class="container hero"> <div class="row">
<div class="row"> <div class="col-md-4 col-md-offset-1">
<div class="col-md-4 col-md-offset-1"> <h2>
<h2> <span class="text-green">Packer</span> is a tool for creating machine and container images for multiple platforms from a single source configuration.
<span class="text-green">Packer</span> is a tool for creating machine and container images for multiple platforms from a single source configuration. </h2>
</h2> </div>
</div> </div>
</div> </div>
</div> </header>
</header> <section class="belt download">
<div class="container">
<section class="belt download"> <div class="row download-row">
<div class="container"> <div class="download-container">
<div class="row download-row"> <h2 class="uppercase"><a href="/downloads.html">Download v<%= latest_version %></a></h2>
<div class="download-container"> </div>
<h2 class="uppercase"><a href="/downloads.html">Download v<%= latest_version %></a></h2> </div>
</div> </div>
</div> </section>
</div> <section class="marketting padded-lg">
</div> <div class="container">
</section> <div class="row">
<div class="col-md-6">
<section class="marketting padded-lg"> <%= image_tag 'screenshots/vmware_and_virtualbox.png', class: 'img-responsive' %>
<div class="container"> </div>
<div class="row"> <div class="col-md-6">
<div class="col-md-6"> <h2 class="text-green text-center">Modern, Automated</h2>
<%= image_tag 'screenshots/vmware_and_virtualbox.png', class: 'img-responsive' %> <p>
</div> Packer is easy to use and automates the creation of any type of machine image. It embraces modern configuration management by encouraging you to use automated scripts to install and configure the software within your Packer-made images. Packer brings machine images into the modern age, unlocking untapped potential and opening new opportunities.
</p>
<div class="col-md-6"> </div>
<h2 class="text-green text-center">Modern, Automated</h2> </div>
<p> </div>
Packer is easy to use and automates the creation of any type </section>
of machine image. It embraces modern configuration management by <section class="marketting padded-lg">
encouraging you to use automated scripts to install and <div class="container">
configure the software within your Packer-made images. <div class="row">
<div class="col-md-6">
Packer brings machine images into the modern age, unlocking <h2 class="text-green text-center">Works Great With</h2>
untapped potential and opening new opportunities. <p>
</p> Out of the box Packer comes with support to build images for Amazon EC2, DigitalOcean, Docker, Google Compute Engine, QEMU, VirtualBox, VMware, and more. Support for more platforms is on the way, and anyone can add new platforms via plugins.
</div> </p>
</div> </div>
</div> <div class="col-md-6">
</section> <%= image_tag 'screenshots/works_with.png', class: 'img-responsive' %>
</div>
<section class="marketting padded-lg"> </div>
<div class="container"> </div>
<div class="row"> </section>
<div class="col-md-6"> </div>
<h2 class="text-green text-center">Works Great With</h2> <!-- /.home -->
<p>
Out of the box Packer comes with support to build images for
Amazon EC2, DigitalOcean, Docker, Google Compute Engine, QEMU,
VirtualBox, VMware, and more. Support for
more platforms is on the way, and anyone can add new platforms
via plugins.
</p>
</div>
<div class="col-md-6">
<%= image_tag 'screenshots/works_with.png', class: 'img-responsive' %>
</div>
</div>
</div>
</section>
</div> <!-- /.home -->
--- ---
layout: "intro" description: |
page_title: "Build an Image" With Packer installed, let's just dive right into it and build our first image.
prev_url: "/intro/getting-started/setup.html" Our first image will be an Amazon EC2 AMI with Redis pre-installed. This is just
next_url: "/intro/getting-started/provision.html" an example. Packer can create images for many platforms with anything
next_title: "Provision" pre-installed.
description: |- layout: intro
With Packer installed, let's just dive right into it and build our first image. Our first image will be an Amazon EC2 AMI with Redis pre-installed. This is just an example. Packer can create images for many platforms with anything pre-installed. next_title: Provision
--- next_url: '/intro/getting-started/provision.html'
page_title: Build an Image
prev_url: '/intro/getting-started/setup.html'
...
# Build an Image # Build an Image
With Packer installed, let's just dive right into it and build our first With Packer installed, let's just dive right into it and build our first image.
image. Our first image will be an [Amazon EC2 AMI](http://aws.amazon.com/ec2/) Our first image will be an [Amazon EC2 AMI](http://aws.amazon.com/ec2/) with
with Redis pre-installed. This is just an example. Packer can create images Redis pre-installed. This is just an example. Packer can create images for [many
for [many platforms](/intro/platforms.html) with anything pre-installed. platforms](/intro/platforms.html) with anything pre-installed.
If you don't have an AWS account, [create one now](http://aws.amazon.com/free/). If you don't have an AWS account, [create one now](http://aws.amazon.com/free/).
For the example, we'll use a "t2.micro" instance to build our image, which For the example, we'll use a "t2.micro" instance to build our image, which
qualifies under the AWS [free-tier](http://aws.amazon.com/free/), meaning qualifies under the AWS [free-tier](http://aws.amazon.com/free/), meaning it
it will be free. If you already have an AWS account, you may be charged some will be free. If you already have an AWS account, you may be charged some amount
amount of money, but it shouldn't be more than a few cents. of money, but it shouldn't be more than a few cents.
-> **Note:** If you're not using an account that qualifies under the AWS -&gt; **Note:** If you're not using an account that qualifies under the AWS
free-tier, you may be charged to run these examples. The charge should only be free-tier, you may be charged to run these examples. The charge should only be a
a few cents, but we're not responsible if it ends up being more. few cents, but we're not responsible if it ends up being more.
Packer can build images for [many platforms](/intro/platforms.html) other than Packer can build images for [many platforms](/intro/platforms.html) other than
AWS, but AWS requires no additional software installed on your computer and AWS, but AWS requires no additional software installed on your computer and
...@@ -34,16 +37,16 @@ apply to the other platforms as well. ...@@ -34,16 +37,16 @@ apply to the other platforms as well.
## The Template ## The Template
The configuration file used to define what image we want built and how The configuration file used to define what image we want built and how is called
is called a _template_ in Packer terminology. The format of a template a *template* in Packer terminology. The format of a template is simple
is simple [JSON](http://www.json.org/). JSON struck the best balance between [JSON](http://www.json.org/). JSON struck the best balance between
human-editable and machine-editable, allowing both hand-made templates as well human-editable and machine-editable, allowing both hand-made templates as well
as machine generated templates to easily be made. as machine generated templates to easily be made.
We'll start by creating the entire template, then we'll go over each section We'll start by creating the entire template, then we'll go over each section
briefly. Create a file `example.json` and fill it with the following contents: briefly. Create a file `example.json` and fill it with the following contents:
```javascript ``` {.javascript}
{ {
"variables": { "variables": {
"aws_access_key": "", "aws_access_key": "",
...@@ -62,55 +65,55 @@ briefly. Create a file `example.json` and fill it with the following contents: ...@@ -62,55 +65,55 @@ briefly. Create a file `example.json` and fill it with the following contents:
} }
``` ```
When building, you'll pass in the `aws_access_key` and `aws_secret_key` as When building, you'll pass in the `aws_access_key` and `aws_secret_key` as a
a [user variable](/docs/templates/user-variables.html), keeping your secret [user variable](/docs/templates/user-variables.html), keeping your secret keys
keys out of the template. You can create security credentials out of the template. You can create security credentials on [this
on [this page](https://console.aws.amazon.com/iam/home?#security_credential). page](https://console.aws.amazon.com/iam/home?#security_credential). An example
An example IAM policy document can be found in the [Amazon EC2 builder docs](/docs/builders/amazon.html). IAM policy document can be found in the [Amazon EC2 builder
docs](/docs/builders/amazon.html).
This is a basic template that is ready-to-go. It should be immediately recognizable
as a normal, basic JSON object. Within the object, the `builders` section This is a basic template that is ready-to-go. It should be immediately
contains an array of JSON objects configuring a specific _builder_. A recognizable as a normal, basic JSON object. Within the object, the `builders`
builder is a component of Packer that is responsible for creating a machine section contains an array of JSON objects configuring a specific *builder*. A
and turning that machine into an image. builder is a component of Packer that is responsible for creating a machine and
turning that machine into an image.
In this case, we're only configuring a single builder of type `amazon-ebs`.
This is the Amazon EC2 AMI builder that ships with Packer. This builder In this case, we're only configuring a single builder of type `amazon-ebs`. This
builds an EBS-backed AMI by launching a source AMI, provisioning on top of is the Amazon EC2 AMI builder that ships with Packer. This builder builds an
that, and re-packaging it into a new AMI. EBS-backed AMI by launching a source AMI, provisioning on top of that, and
re-packaging it into a new AMI.
The additional keys within the object are configuration for this builder, specifying things
such as access keys, the source AMI to build from, and more. The additional keys within the object are configuration for this builder,
The exact set of configuration variables available for a builder are specifying things such as access keys, the source AMI to build from, and more.
specific to each builder and can be found within the [documentation](/docs). The exact set of configuration variables available for a builder are specific to
each builder and can be found within the [documentation](/docs).
Before we take this template and build an image from it, let's validate the template
by running `packer validate example.json`. This command checks the syntax Before we take this template and build an image from it, let's validate the
as well as the configuration values to verify they look valid. The output should template by running `packer validate example.json`. This command checks the
look similar to below, because the template should be valid. If there are syntax as well as the configuration values to verify they look valid. The output
should look similar to below, because the template should be valid. If there are
any errors, this command will tell you. any errors, this command will tell you.
```text ``` {.text}
$ packer validate example.json $ packer validate example.json
Template validated successfully. Template validated successfully.
``` ```
Next, let's build the image from this template. Next, let's build the image from this template.
An astute reader may notice that we said earlier we'd be building an An astute reader may notice that we said earlier we'd be building an image with
image with Redis pre-installed, and yet the template we made doesn't reference Redis pre-installed, and yet the template we made doesn't reference Redis
Redis anywhere. In fact, this part of the documentation will only cover making anywhere. In fact, this part of the documentation will only cover making a first
a first basic, non-provisioned image. The next section on provisioning will basic, non-provisioned image. The next section on provisioning will cover
cover installing Redis. installing Redis.
## Your First Image ## Your First Image
With a properly validated template. It is time to build your first image. With a properly validated template. It is time to build your first image. This
This is done by calling `packer build` with the template file. The output is done by calling `packer build` with the template file. The output should look
should look similar to below. Note that this process typically takes a similar to below. Note that this process typically takes a few minutes.
few minutes.
```text ``` {.text}
$ packer build \ $ packer build \
-var 'aws_access_key=YOUR ACCESS KEY' \ -var 'aws_access_key=YOUR ACCESS KEY' \
-var 'aws_secret_key=YOUR SECRET KEY' \ -var 'aws_secret_key=YOUR SECRET KEY' \
...@@ -139,38 +142,36 @@ $ packer build \ ...@@ -139,38 +142,36 @@ $ packer build \
us-east-1: ami-19601070 us-east-1: ami-19601070
``` ```
At the end of running `packer build`, Packer outputs the _artifacts_ At the end of running `packer build`, Packer outputs the *artifacts* that were
that were created as part of the build. Artifacts are the results of a created as part of the build. Artifacts are the results of a build, and
build, and typically represent an ID (such as in the case of an AMI) or typically represent an ID (such as in the case of an AMI) or a set of files
a set of files (such as for a VMware virtual machine). In this example, (such as for a VMware virtual machine). In this example, we only have a single
we only have a single artifact: the AMI in us-east-1 that was created. artifact: the AMI in us-east-1 that was created.
This AMI is ready to use. If you wanted you can go and launch this AMI This AMI is ready to use. If you wanted you can go and launch this AMI right now
right now and it would work great. and it would work great.
-> **Note:** Your AMI ID will surely be different than the -&gt; **Note:** Your AMI ID will surely be different than the one above. If you
one above. If you try to launch the one in the example output above, you try to launch the one in the example output above, you will get an error. If you
will get an error. If you want to try to launch your AMI, get the ID from want to try to launch your AMI, get the ID from the Packer output.
the Packer output.
## Managing the Image ## Managing the Image
Packer only builds images. It does not attempt to manage them in any way. Packer only builds images. It does not attempt to manage them in any way. After
After they're built, it is up to you to launch or destroy them as you see they're built, it is up to you to launch or destroy them as you see fit. If you
fit. If you want to store and namespace images for easy reference, you want to store and namespace images for easy reference, you can use [Atlas by
can use [Atlas by HashiCorp](https://atlas.hashicorp.com). We'll cover HashiCorp](https://atlas.hashicorp.com). We'll cover remotely building and
remotely building and storing images at the end of this getting started guide. storing images at the end of this getting started guide.
After running the above example, your AWS account After running the above example, your AWS account now has an AMI associated with
now has an AMI associated with it. AMIs are stored in S3 by Amazon, it. AMIs are stored in S3 by Amazon, so unless you want to be charged about
so unless you want to be charged about $0.01 \$0.01 per month, you'll probably want to remove it. Remove the AMI by first
per month, you'll probably want to remove it. Remove the AMI by deregistering it on the [AWS AMI management
first deregistering it on the [AWS AMI management page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images). page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Images). Next,
Next, delete the associated snapshot on the delete the associated snapshot on the [AWS snapshot management
[AWS snapshot management page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Snapshots). page](https://console.aws.amazon.com/ec2/home?region=us-east-1#s=Snapshots).
Congratulations! You've just built your first image with Packer. Although Congratulations! You've just built your first image with Packer. Although the
the image was pretty useless in this case (nothing was changed about it), image was pretty useless in this case (nothing was changed about it), this page
this page should've given you a general idea of how Packer works, what should've given you a general idea of how Packer works, what templates are, and
templates are, and how to validate and build templates into machine how to validate and build templates into machine images.
images.
--- ---
layout: "intro" description: |
page_title: "Next Steps" That concludes the getting started guide for Packer. You should now be
description: |- comfortable with basic Packer usage, should understand templates, defining
That concludes the getting started guide for Packer. You should now be comfortable with basic Packer usage, should understand templates, defining builds, provisioners, etc. At this point you're ready to begin playing with and using Packer in real scenarios. builds, provisioners, etc. At this point you're ready to begin playing with and
--- using Packer in real scenarios.
layout: intro
page_title: Next Steps
...
# Next Steps # Next Steps
That concludes the getting started guide for Packer. You should now be comfortable That concludes the getting started guide for Packer. You should now be
with basic Packer usage, should understand templates, defining builds, provisioners, comfortable with basic Packer usage, should understand templates, defining
etc. At this point you're ready to begin playing with and using Packer builds, provisioners, etc. At this point you're ready to begin playing with and
in real scenarios. using Packer in real scenarios.
From this point forward, the most important reference for you will be From this point forward, the most important reference for you will be the
the [documentation](/docs). The documentation is less of a guide and [documentation](/docs). The documentation is less of a guide and more of a
more of a reference of all the overall features and options of Packer. reference of all the overall features and options of Packer.
If you're interested in learning more about how Packer fits into the If you're interested in learning more about how Packer fits into the HashiCorp
HashiCorp ecosystem of tools, read our [Atlas getting started overview](https://atlas.hashicorp.com/help/intro/getting-started). ecosystem of tools, read our [Atlas getting started
overview](https://atlas.hashicorp.com/help/intro/getting-started).
As you use Packer more, please voice your comments and concerns on As you use Packer more, please voice your comments and concerns on the [mailing
the [mailing list or IRC](/community). Additionally, Packer is list or IRC](/community). Additionally, Packer is [open
[open source](https://github.com/mitchellh/packer) so please contribute source](https://github.com/mitchellh/packer) so please contribute if you'd like
if you'd like to. Contributions are very welcome. to. Contributions are very welcome.
--- ---
layout: "intro" description: |
page_title: "Parallel Builds" So far we've shown how Packer can automatically build an image and provision it.
prev_url: "/intro/getting-started/provision.html" This on its own is already quite powerful. But Packer can do better than that.
next_url: "/intro/getting-started/vagrant.html" Packer can create multiple images for multiple platforms in parallel, all
next_title: "Vagrant Boxes" configured from a single template.
description: |- layout: intro
So far we've shown how Packer can automatically build an image and provision it. This on its own is already quite powerful. But Packer can do better than that. Packer can create multiple images for multiple platforms in parallel, all configured from a single template. next_title: Vagrant Boxes
--- next_url: '/intro/getting-started/vagrant.html'
page_title: Parallel Builds
prev_url: '/intro/getting-started/provision.html'
...
# Parallel Builds # Parallel Builds
So far we've shown how Packer can automatically build an image and provision it. So far we've shown how Packer can automatically build an image and provision it.
This on its own is already quite powerful. But Packer can do better than that. This on its own is already quite powerful. But Packer can do better than that.
Packer can create multiple images for multiple platforms _in parallel_, all Packer can create multiple images for multiple platforms *in parallel*, all
configured from a single template. configured from a single template.
This is a very useful and important feature of Packer. As an example, This is a very useful and important feature of Packer. As an example, Packer is
Packer is able to make an AMI and a VMware virtual machine able to make an AMI and a VMware virtual machine in parallel provisioned with
in parallel provisioned with the _same scripts_, resulting in near-identical the *same scripts*, resulting in near-identical images. The AMI can be used for
images. The AMI can be used for production, the VMware machine can be used production, the VMware machine can be used for development. Or, another example,
for development. Or, another example, if you're using Packer to build if you're using Packer to build [software
[software appliances](http://en.wikipedia.org/wiki/Software_appliance), appliances](http://en.wikipedia.org/wiki/Software_appliance), then you can build
then you can build the appliance for every supported platform all in the appliance for every supported platform all in parallel, all configured from
parallel, all configured from a single template. a single template.
Once you start taking advantage of this feature, the possibilities begin Once you start taking advantage of this feature, the possibilities begin to
to unfold in front of you. unfold in front of you.
Continuing on the example in this getting started guide, we'll build Continuing on the example in this getting started guide, we'll build a
a [DigitalOcean](http://www.digitalocean.com) image as well as an AMI. Both [DigitalOcean](http://www.digitalocean.com) image as well as an AMI. Both will
will be near-identical: bare bones Ubuntu OS with Redis pre-installed. be near-identical: bare bones Ubuntu OS with Redis pre-installed. However, since
However, since we're building for both platforms, you have the option of we're building for both platforms, you have the option of whether you want to
whether you want to use the AMI, or the DigitalOcean snapshot. Or use both. use the AMI, or the DigitalOcean snapshot. Or use both.
## Setting Up DigitalOcean ## Setting Up DigitalOcean
[DigitalOcean](https://www.digitalocean.com/) is a relatively new, but [DigitalOcean](https://www.digitalocean.com/) is a relatively new, but very
very popular VPS provider that has popped up. They have a quality offering popular VPS provider that has popped up. They have a quality offering of high
of high performance, low cost VPS servers. We'll be building a DigitalOcean performance, low cost VPS servers. We'll be building a DigitalOcean snapshot for
snapshot for this example. this example.
In order to do this, you'll need an account with DigitalOcean. In order to do this, you'll need an account with DigitalOcean. [Sign up for an
[Sign up for an account now](https://www.digitalocean.com/). It is free account now](https://www.digitalocean.com/). It is free to sign up. Because the
to sign up. Because the "droplets" (servers) are charged hourly, you "droplets" (servers) are charged hourly, you *will* be charged \$0.01 for every
_will_ be charged $0.01 for every image you create with Packer. If image you create with Packer. If you're not okay with this, just follow along.
you're not okay with this, just follow along.
!> **Warning!** You _will_ be charged $0.01 by DigitalOcean per image !&gt; **Warning!** You *will* be charged \$0.01 by DigitalOcean per image
created with Packer because of the time the "droplet" is running. created with Packer because of the time the "droplet" is running.
Once you sign up for an account, grab your API token from Once you sign up for an account, grab your API token from the [DigitalOcean API
the [DigitalOcean API access page](https://cloud.digitalocean.com/settings/applications). access page](https://cloud.digitalocean.com/settings/applications). Save these
Save these values somewhere; you'll need them in a second. values somewhere; you'll need them in a second.
## Modifying the Template ## Modifying the Template
...@@ -59,20 +61,20 @@ We now have to modify the template to add DigitalOcean to it. Modify the ...@@ -59,20 +61,20 @@ We now have to modify the template to add DigitalOcean to it. Modify the
template we've been using and add the following JSON object to the `builders` template we've been using and add the following JSON object to the `builders`
array. array.
```javascript ``` {.javascript}
{ {
"type": "digitalocean", "type": "digitalocean",
"api_token": "{{user `do_api_token`}}", "api_token": "{{user `do_api_token`}}",
"image": "ubuntu-14-04-x64", "image": "ubuntu-14-04-x64",
"region": "nyc3", "region": "nyc3",
"size": "512mb", "size": "512mb",
} }
``` ```
You'll also need to modify the `variables` section of the template You'll also need to modify the `variables` section of the template to include
to include the access keys for DigitalOcean. the access keys for DigitalOcean.
```javascript ``` {.javascript}
"variables": { "variables": {
"do_api_token": "", "do_api_token": "",
// ... // ...
...@@ -81,61 +83,61 @@ to include the access keys for DigitalOcean. ...@@ -81,61 +83,61 @@ to include the access keys for DigitalOcean.
The entire template should now look like this: The entire template should now look like this:
```javascript ``` {.javascript}
{ {
"variables": { "variables": {
"aws_access_key": "", "aws_access_key": "",
"aws_secret_key": "", "aws_secret_key": "",
"do_api_token": "" "do_api_token": ""
}, },
"builders": [{ "builders": [{
"type": "amazon-ebs", "type": "amazon-ebs",
"access_key": "{{user `aws_access_key`}}", "access_key": "{{user `aws_access_key`}}",
"secret_key": "{{user `aws_secret_key`}}", "secret_key": "{{user `aws_secret_key`}}",
"region": "us-east-1", "region": "us-east-1",
"source_ami": "ami-de0d9eb7", "source_ami": "ami-de0d9eb7",
"instance_type": "t1.micro", "instance_type": "t1.micro",
"ssh_username": "ubuntu", "ssh_username": "ubuntu",
"ami_name": "packer-example {{timestamp}}" "ami_name": "packer-example {{timestamp}}"
},{ },{
"type": "digitalocean", "type": "digitalocean",
"api_token": "{{user `do_api_token`}}", "api_token": "{{user `do_api_token`}}",
"image": "ubuntu-14-04-x64", "image": "ubuntu-14-04-x64",
"region": "nyc3", "region": "nyc3",
"size": "512mb" "size": "512mb"
}], }],
"provisioners": [{ "provisioners": [{
"type": "shell", "type": "shell",
"inline": [ "inline": [
"sleep 30", "sleep 30",
"sudo apt-get update", "sudo apt-get update",
"sudo apt-get install -y redis-server" "sudo apt-get install -y redis-server"
] ]
}] }]
} }
``` ```
Additional builders are simply added to the `builders` array in the template. Additional builders are simply added to the `builders` array in the template.
This tells Packer to build multiple images. The builder `type` values don't This tells Packer to build multiple images. The builder `type` values don't even
even need to be different! In fact, if you wanted to build multiple AMIs, need to be different! In fact, if you wanted to build multiple AMIs, you can do
you can do that as long as you specify a unique `name` for each build. that as long as you specify a unique `name` for each build.
Validate the template with `packer validate`. This is always a good practice. Validate the template with `packer validate`. This is always a good practice.
-> **Note:** If you're looking for more **DigitalOcean configuration options**, -&gt; **Note:** If you're looking for more **DigitalOcean configuration
you can find them on the options**, you can find them on the [DigitalOcean Builder
[DigitalOcean Builder page](/docs/builders/digitalocean.html) in the page](/docs/builders/digitalocean.html) in the documentation. The documentation
documentation. The documentation is more of a reference manual that contains a is more of a reference manual that contains a listing of all the available
listing of all the available configuration options. configuration options.
## Build ## Build
Now run `packer build` with your user variables. The output is too verbose to include Now run `packer build` with your user variables. The output is too verbose to
all of it, but a portion of it is reproduced below. Note that the ordering include all of it, but a portion of it is reproduced below. Note that the
and wording of the lines may be slightly different, but the effect is the ordering and wording of the lines may be slightly different, but the effect is
same. the same.
```text ``` {.text}
$ packer build \ $ packer build \
-var 'aws_access_key=YOUR ACCESS KEY' \ -var 'aws_access_key=YOUR ACCESS KEY' \
-var 'aws_secret_key=YOUR SECRET KEY' \ -var 'aws_secret_key=YOUR SECRET KEY' \
...@@ -162,10 +164,10 @@ us-east-1: ami-376d1d5e ...@@ -162,10 +164,10 @@ us-east-1: ami-376d1d5e
--> digitalocean: A snapshot was created: packer-1371870364 --> digitalocean: A snapshot was created: packer-1371870364
``` ```
As you can see, Packer builds both the Amazon and DigitalOcean images As you can see, Packer builds both the Amazon and DigitalOcean images in
in parallel. It outputs information about each in different colors parallel. It outputs information about each in different colors (although you
(although you can't see that in the block above) so that it is easy to identify. can't see that in the block above) so that it is easy to identify.
At the end of the build, Packer outputs both of the artifacts created At the end of the build, Packer outputs both of the artifacts created (an AMI
(an AMI and a DigitalOcean snapshot). Both images created are bare bones and a DigitalOcean snapshot). Both images created are bare bones Ubuntu
Ubuntu installations with Redis pre-installed. installations with Redis pre-installed.
--- ---
layout: "intro" description: |
page_title: "Provision" In the previous page of this guide, you created your first image with Packer.
prev_url: "/intro/getting-started/build-image.html" The image you just built, however, was basically just a repackaging of a
next_url: "/intro/getting-started/parallel-builds.html" previously existing base AMI. The real utility of Packer comes from being able
next_title: "Parallel Builds" to install and configure software into the images as well. This stage is also
description: |- known as the *provision* step. Packer fully supports automated provisioning in
In the previous page of this guide, you created your first image with Packer. The image you just built, however, was basically just a repackaging of a previously existing base AMI. The real utility of Packer comes from being able to install and configure software into the images as well. This stage is also known as the _provision_ step. Packer fully supports automated provisioning in order to install software onto the machines prior to turning them into images. order to install software onto the machines prior to turning them into images.
--- layout: intro
next_title: Parallel Builds
next_url: '/intro/getting-started/parallel-builds.html'
page_title: Provision
prev_url: '/intro/getting-started/build-image.html'
...
# Provision # Provision
In the previous page of this guide, you created your first image with In the previous page of this guide, you created your first image with Packer.
Packer. The image you just built, however, was basically just a repackaging The image you just built, however, was basically just a repackaging of a
of a previously existing base AMI. The real utility of Packer comes from previously existing base AMI. The real utility of Packer comes from being able
being able to install and configure software into the images as well. to install and configure software into the images as well. This stage is also
This stage is also known as the _provision_ step. Packer fully supports known as the *provision* step. Packer fully supports automated provisioning in
automated provisioning in order to install software onto the machines prior order to install software onto the machines prior to turning them into images.
to turning them into images.
In this section, we're going to complete our image by installing Redis on it.
In this section, we're going to complete our image by installing This way, the image we end up building actually contains Redis pre-installed.
Redis on it. This way, the image we end up building actually contains Although Redis is a small, simple example, this should give you an idea of what
Redis pre-installed. Although Redis is a small, simple example, this should it may be like to install many more packages into the image.
give you an idea of what it may be like to install many more packages into
the image. Historically, pre-baked images have been frowned upon because changing them has
been so tedious and slow. Because Packer is completely automated, including
Historically, pre-baked images have been frowned upon because changing provisioning, images can be changed quickly and integrated with modern
them has been so tedious and slow. Because Packer is completely automated, configuration management tools such as Chef or Puppet.
including provisioning, images can be changed quickly and integrated with
modern configuration management tools such as Chef or Puppet.
## Configuring Provisioners ## Configuring Provisioners
Provisioners are configured as part of the template. We'll use the built-in Provisioners are configured as part of the template. We'll use the built-in
shell provisioner that comes with Packer to install Redis. Modify the shell provisioner that comes with Packer to install Redis. Modify the
`example.json` template we made previously and add the following. We'll `example.json` template we made previously and add the following. We'll explain
explain the various parts of the new configuration following the code the various parts of the new configuration following the code block below.
block below.
```javascript ``` {.javascript}
{ {
"variables": ["..."], "variables": ["..."],
"builders": ["..."], "builders": ["..."],
...@@ -53,51 +55,51 @@ block below. ...@@ -53,51 +55,51 @@ block below.
} }
``` ```
-> **Note:** The `sleep 30` in the example above is -&gt; **Note:** The `sleep 30` in the example above is very important. Because
very important. Because Packer is able to detect and SSH into the instance Packer is able to detect and SSH into the instance as soon as SSH is available,
as soon as SSH is available, Ubuntu actually doesn't get proper amounts Ubuntu actually doesn't get proper amounts of time to initialize. The sleep
of time to initialize. The sleep makes sure that the OS properly initializes. makes sure that the OS properly initializes.
Hopefully it is obvious, but the `builders` section shouldn't actually Hopefully it is obvious, but the `builders` section shouldn't actually contain
contain "...", it should be the contents setup in the previous page "...", it should be the contents setup in the previous page of the getting
of the getting started guide. Also note the comma after the `"builders": [...]` started guide. Also note the comma after the `"builders": [...]` section, which
section, which was not present in the previous lesson. was not present in the previous lesson.
To configure the provisioners, we add a new section `provisioners` to the To configure the provisioners, we add a new section `provisioners` to the
template, alongside the `builders` configuration. The provisioners section template, alongside the `builders` configuration. The provisioners section is an
is an array of provisioners to run. If multiple provisioners are specified, they array of provisioners to run. If multiple provisioners are specified, they are
are run in the order given. run in the order given.
By default, each provisioner is run for every builder defined. So if we had By default, each provisioner is run for every builder defined. So if we had two
two builders defined in our template, such as both Amazon and DigitalOcean, then builders defined in our template, such as both Amazon and DigitalOcean, then the
the shell script would run as part of both builds. There are ways to restrict shell script would run as part of both builds. There are ways to restrict
provisioners to certain builds, but it is outside the scope of this getting provisioners to certain builds, but it is outside the scope of this getting
started guide. It is covered in more detail in the complete started guide. It is covered in more detail in the complete
[documentation](/docs). [documentation](/docs).
The one provisioner we defined has a type of `shell`. This provisioner The one provisioner we defined has a type of `shell`. This provisioner ships
ships with Packer and runs shell scripts on the running machine. In our with Packer and runs shell scripts on the running machine. In our case, we
case, we specify two inline commands to run in order to install Redis. specify two inline commands to run in order to install Redis.
## Build ## Build
With the provisioner configured, give it a pass once again through With the provisioner configured, give it a pass once again through
`packer validate` to verify everything is okay, then build it using `packer validate` to verify everything is okay, then build it using
`packer build example.json`. The output should look similar to when you `packer build example.json`. The output should look similar to when you built
built your first image, except this time there will be a new step where your first image, except this time there will be a new step where the
the provisioning is run. provisioning is run.
The output from the provisioner is too verbose to include in this The output from the provisioner is too verbose to include in this guide, since
guide, since it contains all the output from the shell scripts. But you it contains all the output from the shell scripts. But you should see Redis
should see Redis successfully install. After that, Packer once again successfully install. After that, Packer once again turns the machine into an
turns the machine into an AMI. AMI.
If you were to launch this AMI, Redis would be pre-installed. Cool! If you were to launch this AMI, Redis would be pre-installed. Cool!
This is just a basic example. In a real world use case, you may be provisioning This is just a basic example. In a real world use case, you may be provisioning
an image with the entire stack necessary to run your application. Or maybe an image with the entire stack necessary to run your application. Or maybe just
just the web stack so that you can have an image for web servers pre-built. the web stack so that you can have an image for web servers pre-built. This
This saves tons of time later as you launch these images since everything saves tons of time later as you launch these images since everything is
is pre-installed. Additionally, since everything is pre-installed, you pre-installed. Additionally, since everything is pre-installed, you can test the
can test the images as they're built and know that when they go into images as they're built and know that when they go into production, they'll be
production, they'll be functional. functional.
--- ---
layout: "intro" description: |
page_title: "Remote Builds and Storage" Up to this point in the guide, you have been running Packer on your local
prev_url: "/intro/getting-started/vagrant.html" machine to build and provision images on AWS and DigitalOcean. However, you can
next_url: "/intro/getting-started/next.html" use Atlas by HashiCorp to both run Packer builds remotely and store the output
next_title: "Next Steps" of builds.
description: |- layout: intro
Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use Atlas by HashiCorp to both run Packer builds remotely and store the output of builds. next_title: Next Steps
--- next_url: '/intro/getting-started/next.html'
page_title: Remote Builds and Storage
prev_url: '/intro/getting-started/vagrant.html'
...
# Remote Builds and Storage # Remote Builds and Storage
Up to this point in the guide, you have been running Packer on your local machine to build and provision images on AWS and DigitalOcean. However, you can use [Atlas by HashiCorp](https://atlas.hashicorp.com) to run Packer builds remotely and store the output of builds.
Up to this point in the guide, you have been running Packer on your local
machine to build and provision images on AWS and DigitalOcean. However, you can
use [Atlas by HashiCorp](https://atlas.hashicorp.com) to run Packer builds
remotely and store the output of builds.
## Why Build Remotely? ## Why Build Remotely?
By building remotely, you can move access credentials off of developer machines, release local machines from long-running Packer processes, and automatically start Packer builds from trigger sources such as `vagrant push`, a version control system, or CI tool.
By building remotely, you can move access credentials off of developer machines,
release local machines from long-running Packer processes, and automatically
start Packer builds from trigger sources such as `vagrant push`, a version
control system, or CI tool.
## Run Packer Builds Remotely ## Run Packer Builds Remotely
To run Packer remotely, there are two changes that must be made to the Packer template. The first is the addition of the `push` [configuration](https://www.packer.io/docs/templates/push.html), which sends the Packer template to Atlas so it can run Packer remotely. The second modification is updating the variables section to read variables from the Atlas environment rather than the local environment. Remove the `post-processors` section for now if it is still in your template.
```javascript To run Packer remotely, there are two changes that must be made to the Packer
template. The first is the addition of the `push`
[configuration](https://www.packer.io/docs/templates/push.html), which sends the
Packer template to Atlas so it can run Packer remotely. The second modification
is updating the variables section to read variables from the Atlas environment
rather than the local environment. Remove the `post-processors` section for now
if it is still in your template.
``` {.javascript}
{ {
"variables": { "variables": {
"aws_access_key": "{{env `aws_access_key`}}", "aws_access_key": "{{env `aws_access_key`}}",
...@@ -45,31 +63,35 @@ To run Packer remotely, there are two changes that must be made to the Packer te ...@@ -45,31 +63,35 @@ To run Packer remotely, there are two changes that must be made to the Packer te
"name": "ATLAS_USERNAME/packer-tutorial" "name": "ATLAS_USERNAME/packer-tutorial"
} }
} }
``` ```
To get an Atlas username, [create an account here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=packer). Replace "ATLAS_USERNAME" with your username, then run `packer push -create example.json` to send the configuration to Atlas, which automatically starts the build. To get an Atlas username, [create an account
here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=packer).
Replace "ATLAS\_USERNAME" with your username, then run
`packer push -create example.json` to send the configuration to Atlas, which
automatically starts the build.
This build will fail since neither `aws_access_key` or `aws_secret_key` are set in the Atlas environment. To set environment variables in Atlas, navigate to the [operations tab](https://atlas.hashicorp.com/operations), click the "packer-tutorial" build configuration that was just created, and then click 'variables' in the left navigation. Set `aws_access_key` and `aws_secret_key` with their respective values. Now restart the Packer build by either clicking 'rebuild' in the Atlas UI or by running `packer push example.json` again. Now when you click on the active build, you can view the logs in real-time. This build will fail since neither `aws_access_key` or `aws_secret_key` are set
in the Atlas environment. To set environment variables in Atlas, navigate to the
[operations tab](https://atlas.hashicorp.com/operations), click the
"packer-tutorial" build configuration that was just created, and then click
'variables' in the left navigation. Set `aws_access_key` and `aws_secret_key`
with their respective values. Now restart the Packer build by either clicking
'rebuild' in the Atlas UI or by running `packer push example.json` again. Now
when you click on the active build, you can view the logs in real-time.
-> **Note:** Whenever a change is made to the Packer template, you must `packer push` to update the configuration in Atlas. -&gt; **Note:** Whenever a change is made to the Packer template, you must
`packer push` to update the configuration in Atlas.
## Store Packer Outputs ## Store Packer Outputs
Now we have Atlas building an AMI with Redis pre-configured. This is great, but it's even better to store and version the AMI output so it can be easily deployed by a tool like [Terraform](https://terraform.io). The `atlas` [post-processor](/docs/post-processors/atlas.html) makes this process simple:
```javascript Now we have Atlas building an AMI with Redis pre-configured. This is great, but
{ it's even better to store and version the AMI output so it can be easily
"variables": ["..."], deployed by a tool like [Terraform](https://terraform.io). The `atlas`
"builders": ["..."], [post-processor](/docs/post-processors/atlas.html) makes this process simple:
"provisioners": ["..."],
"push": ["..."], `javascript { "variables": ["..."], "builders": ["..."], "provisioners": ["..."], "push": ["..."], "post-processors": [ { "type": "atlas", "artifact": "ATLAS_USERNAME/packer-tutorial", "artifact_type": "amazon.ami" } ] }`
"post-processors": [
{
"type": "atlas",
"artifact": "ATLAS_USERNAME/packer-tutorial",
"artifact_type": "amazon.ami"
}
]
}
```
Update the `post-processors` block with your Atlas username, then `packer push example.json` and watch the build kick off in Atlas! When the build completes, the resulting artifact will be saved and stored in Atlas. Update the `post-processors` block with your Atlas username, then
\ No newline at end of file `packer push example.json` and watch the build kick off in Atlas! When the build
completes, the resulting artifact will be saved and stored in Atlas.
--- ---
layout: "intro" description: |
page_title: "Install Packer" Packer must first be installed on the machine you want to run it on. To make
prev_url: "/intro/platforms.html" installation easy, Packer is distributed as a binary package for all supported
next_url: "/intro/getting-started/build-image.html" platforms and architectures. This page will not cover how to compile Packer from
next_title: "Build an Image" source, as that is covered in the README and is only recommended for advanced
description: |- users.
Packer must first be installed on the machine you want to run it on. To make installation easy, Packer is distributed as a binary package for all supported platforms and architectures. This page will not cover how to compile Packer from source, as that is covered in the README and is only recommended for advanced users. layout: intro
--- next_title: Build an Image
next_url: '/intro/getting-started/build-image.html'
page_title: Install Packer
prev_url: '/intro/platforms.html'
...
# Install Packer # Install Packer
Packer must first be installed on the machine you want to run it on. Packer must first be installed on the machine you want to run it on. To make
To make installation easy, Packer is distributed as a [binary package](/downloads.html) installation easy, Packer is distributed as a [binary package](/downloads.html)
for all supported platforms and architectures. This page will not cover how for all supported platforms and architectures. This page will not cover how to
to compile Packer from source, as that is covered in the compile Packer from source, as that is covered in the
[README](https://github.com/mitchellh/packer/blob/master/README.md) and is only [README](https://github.com/mitchellh/packer/blob/master/README.md) and is only
recommended for advanced users. recommended for advanced users.
## Installing Packer ## Installing Packer
To install packer, first find the [appropriate package](/downloads.html) To install packer, first find the [appropriate package](/downloads.html) for
for your system and download it. Packer is packaged as a "zip" file. your system and download it. Packer is packaged as a "zip" file.
Next, unzip the downloaded package into a directory where Packer will be Next, unzip the downloaded package into a directory where Packer will be
installed. On Unix systems, `~/packer` or `/usr/local/packer` is generally good, installed. On Unix systems, `~/packer` or `/usr/local/packer` is generally good,
depending on whether you want to restrict the install to just your user depending on whether you want to restrict the install to just your user or
or install it system-wide. On Windows systems, you can put it wherever you'd install it system-wide. On Windows systems, you can put it wherever you'd like.
like.
After unzipping the package, the directory should contain a set of binary After unzipping the package, the directory should contain a set of binary
programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step programs, such as `packer`, `packer-build-amazon-ebs`, etc. The final step to
to installation is to make sure the directory you installed Packer to installation is to make sure the directory you installed Packer to is on the
is on the PATH. See [this page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux) PATH. See [this
for instructions on setting the PATH on Linux and Mac. page](http://stackoverflow.com/questions/14637979/how-to-permanently-set-path-on-linux)
[This page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows) for instructions on setting the PATH on Linux and Mac. [This
page](http://stackoverflow.com/questions/1618280/where-can-i-set-path-to-make-exe-on-windows)
contains instructions for setting the PATH on Windows. contains instructions for setting the PATH on Windows.
## Verifying the Installation ## Verifying the Installation
After installing Packer, verify the installation worked by opening After installing Packer, verify the installation worked by opening a new command
a new command prompt or console, and checking that `packer` is available: prompt or console, and checking that `packer` is available:
```text ``` {.text}
$ packer $ packer
usage: packer [--version] [--help] <command> [<args>] usage: packer [--version] [--help] <command> [<args>]
...@@ -54,16 +58,16 @@ Available commands are: ...@@ -54,16 +58,16 @@ Available commands are:
version Prints the Packer version version Prints the Packer version
``` ```
If you get an error that `packer` could not be found, then your PATH If you get an error that `packer` could not be found, then your PATH environment
environment variable was not setup properly. Please go back and ensure variable was not setup properly. Please go back and ensure that your PATH
that your PATH variable contains the directory which has Packer installed. variable contains the directory which has Packer installed.
Otherwise, Packer is installed and you're ready to go! Otherwise, Packer is installed and you're ready to go!
## Alternative Installation Methods ## Alternative Installation Methods
While the binary packages is the only official method of installation, there While the binary packages is the only official method of installation, there are
are alternatives available. alternatives available.
### Homebrew ### Homebrew
...@@ -73,10 +77,14 @@ If you're using OS X and [Homebrew](http://brew.sh), you can install Packer: ...@@ -73,10 +77,14 @@ If you're using OS X and [Homebrew](http://brew.sh), you can install Packer:
## Troubleshooting ## Troubleshooting
On some RedHat-based Linux distributions there is another tool named `packer` installed by default. You can check for this using `which -a packer`. If you get an error like this it indicates there is a name conflict. On some RedHat-based Linux distributions there is another tool named `packer`
installed by default. You can check for this using `which -a packer`. If you get
an error like this it indicates there is a name conflict.
$ packer $ packer
/usr/share/cracklib/pw_dict.pwd: Permission denied /usr/share/cracklib/pw_dict.pwd: Permission denied
/usr/share/cracklib/pw_dict: Permission denied /usr/share/cracklib/pw_dict: Permission denied
To fix this, you can create a symlink to packer that uses a different name like `packer.io`, or invoke the `packer` binary you want using its absolute path, e.g. `/usr/local/packer`. To fix this, you can create a symlink to packer that uses a different name like
`packer.io`, or invoke the `packer` binary you want using its absolute path,
e.g. `/usr/local/packer`.
--- ---
layout: "intro" description: |
page_title: "Vagrant Boxes" Packer also has the ability to take the results of a builder (such as an AMI or
prev_url: "/intro/getting-started/parallel-builds.html" plain VMware image) and turn it into a Vagrant box.
next_url: "/intro/getting-started/remote-builds.html" layout: intro
next_title: "Remote Builds and Storage" next_title: Remote Builds and Storage
description: |- next_url: '/intro/getting-started/remote-builds.html'
Packer also has the ability to take the results of a builder (such as an AMI or plain VMware image) and turn it into a Vagrant box. page_title: Vagrant Boxes
--- prev_url: '/intro/getting-started/parallel-builds.html'
...
# Vagrant Boxes # Vagrant Boxes
Packer also has the ability to take the results of a builder (such as Packer also has the ability to take the results of a builder (such as an AMI or
an AMI or plain VMware image) and turn it into a [Vagrant](http://www.vagrantup.com) plain VMware image) and turn it into a [Vagrant](http://www.vagrantup.com) box.
box.
This is done using [post-processors](/docs/templates/post-processors.html). This is done using [post-processors](/docs/templates/post-processors.html).
These take an artifact created by a previous builder or post-processor and These take an artifact created by a previous builder or post-processor and
transforms it into a new one. In the case of the Vagrant post-processor, it transforms it into a new one. In the case of the Vagrant post-processor, it
takes an artifact from a builder and transforms it into a Vagrant box file. takes an artifact from a builder and transforms it into a Vagrant box file.
Post-processors are a generally very useful concept. While the example on Post-processors are a generally very useful concept. While the example on this
this getting-started page will be creating Vagrant images, post-processors getting-started page will be creating Vagrant images, post-processors have many
have many interesting use cases. For example, you can write a post-processor interesting use cases. For example, you can write a post-processor to compress
to compress artifacts, upload them, test them, etc. artifacts, upload them, test them, etc.
Let's modify our template to use the Vagrant post-processor to turn our Let's modify our template to use the Vagrant post-processor to turn our AWS AMI
AWS AMI into a Vagrant box usable with the [vagrant-aws plugin](https://github.com/mitchellh/vagrant-aws). If you followed along in the previous page and setup DigitalOcean, into a Vagrant box usable with the [vagrant-aws
Packer can't currently make Vagrant boxes for DigitalOcean, but will be able plugin](https://github.com/mitchellh/vagrant-aws). If you followed along in the
to soon. previous page and setup DigitalOcean, Packer can't currently make Vagrant boxes
for DigitalOcean, but will be able to soon.
## Enabling the Post-Processor ## Enabling the Post-Processor
...@@ -35,7 +36,7 @@ Post-processors are added in the `post-processors` section of a template, which ...@@ -35,7 +36,7 @@ Post-processors are added in the `post-processors` section of a template, which
we haven't created yet. Modify your `example.json` template and add the section. we haven't created yet. Modify your `example.json` template and add the section.
Your template should look like the following: Your template should look like the following:
```javascript ``` {.javascript}
{ {
"builders": ["..."], "builders": ["..."],
"provisioners": ["..."], "provisioners": ["..."],
...@@ -44,8 +45,8 @@ Your template should look like the following: ...@@ -44,8 +45,8 @@ Your template should look like the following:
``` ```
In this case, we're enabling a single post-processor named "vagrant". This In this case, we're enabling a single post-processor named "vagrant". This
post-processor is built-in to Packer and will create Vagrant boxes. You post-processor is built-in to Packer and will create Vagrant boxes. You can
can always create [new post-processors](/docs/extend/post-processor.html), however. always create [new post-processors](/docs/extend/post-processor.html), however.
The details on configuring post-processors is covered in the The details on configuring post-processors is covered in the
[post-processors](/docs/templates/post-processors.html) documentation. [post-processors](/docs/templates/post-processors.html) documentation.
...@@ -53,27 +54,26 @@ Validate the configuration using `packer validate`. ...@@ -53,27 +54,26 @@ Validate the configuration using `packer validate`.
## Using the Post-Processor ## Using the Post-Processor
Just run a normal `packer build` and it will now use the post-processor. Just run a normal `packer build` and it will now use the post-processor. Since
Since Packer can't currently make a Vagrant box for DigitalOcean anyways, Packer can't currently make a Vagrant box for DigitalOcean anyways, I recommend
I recommend passing the `-only=amazon-ebs` flag to `packer build` so it only passing the `-only=amazon-ebs` flag to `packer build` so it only builds the AMI.
builds the AMI. The command should look like the following: The command should look like the following:
```text ``` {.text}
$ packer build -only=amazon-ebs example.json $ packer build -only=amazon-ebs example.json
``` ```
As you watch the output, you'll notice at the end in the artifact listing As you watch the output, you'll notice at the end in the artifact listing that a
that a Vagrant box was made (by default at `packer_aws.box` in the current Vagrant box was made (by default at `packer_aws.box` in the current directory).
directory). Success! Success!
But where did the AMI go? When using post-processors, Vagrant removes But where did the AMI go? When using post-processors, Vagrant removes
intermediary artifacts since they're usually not wanted. Only the final intermediary artifacts since they're usually not wanted. Only the final artifact
artifact is preserved. This behavior can be changed, of course. Changing is preserved. This behavior can be changed, of course. Changing this behavior is
this behavior is covered [in the documentation](/docs/templates/post-processors.html). covered [in the documentation](/docs/templates/post-processors.html).
Typically when removing intermediary artifacts, the actual underlying Typically when removing intermediary artifacts, the actual underlying files or
files or resources of the artifact are also removed. For example, when resources of the artifact are also removed. For example, when building a VMware
building a VMware image, if you turn it into a Vagrant box, the files of image, if you turn it into a Vagrant box, the files of the VMware image will be
the VMware image will be deleted since they were compressed into the Vagrant deleted since they were compressed into the Vagrant box. With creating AWS
box. With creating AWS images, however, the AMI is kept around, since Vagrant images, however, the AMI is kept around, since Vagrant needs it to function.
needs it to function.
--- ---
layout: "intro" description: Learn how Packer fits in with the rest of the HashiCorp ecosystem of tools
page_title: "Packer and the HashiCorp Ecosystem" layout: intro
prev_url: "/intro/platforms.html" next_title: 'Getting Started: Install Packer'
next_url: "/intro/getting-started/setup.html" next_url: '/intro/getting-started/setup.html'
next_title: "Getting Started: Install Packer" page_title: Packer and the HashiCorp Ecosystem
description: |- prev_url: '/intro/platforms.html'
Learn how Packer fits in with the rest of the HashiCorp ecosystem of tools ...
---
# Packer and the HashiCorp Ecosystem # Packer and the HashiCorp Ecosystem
HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform, Serf, and Consul, and the commercial product Atlas. Packer is just one piece of the ecosystem HashiCorp has built to make application delivery a versioned, auditable, repeatable, and collaborative process. To learn more about our beliefs on the qualities of the modern datacenter and responsible application delivery, read [The Atlas Mindset: Version Control for Infrastructure](https://hashicorp.com/blog/atlas-mindset.html/?utm_source=packer&utm_campaign=HashicorpEcosystem). HashiCorp is the creator of the open source projects Vagrant, Packer, Terraform,
Serf, and Consul, and the commercial product Atlas. Packer is just one piece of
the ecosystem HashiCorp has built to make application delivery a versioned,
auditable, repeatable, and collaborative process. To learn more about our
beliefs on the qualities of the modern datacenter and responsible application
delivery, read [The Atlas Mindset: Version Control for
Infrastructure](https://hashicorp.com/blog/atlas-mindset.html/?utm_source=packer&utm_campaign=HashicorpEcosystem).
If you are using Packer to build machine images and deployable artifacts, it's likely that you need a solution for deploying those artifacts. Terraform is our tool for creating, combining, and modifying infrastructure. If you are using Packer to build machine images and deployable artifacts, it's
likely that you need a solution for deploying those artifacts. Terraform is our
tool for creating, combining, and modifying infrastructure.
Below are summaries of HashiCorp's open source projects and a graphic showing how Atlas connects them to create a full application delivery workflow. Below are summaries of HashiCorp's open source projects and a graphic showing
how Atlas connects them to create a full application delivery workflow.
# HashiCorp Ecosystem # HashiCorp Ecosystem
![Atlas Workflow](docs/atlas-workflow.png)
[Atlas](https://atlas.hashicorp.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is HashiCorp's only commercial product. It unites Packer, Terraform, and Consul to make application delivery a versioned, auditable, repeatable, and collaborative process.
[Packer](https://packer.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating machine images and deployable artifacts such as AMIs, OpenStack images, Docker containers, etc.
[Terraform](https://terraform.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for creating, combining, and modifying infrastructure. In the Atlas workflow Terraform reads from the artifact registry and provisions infrastructure. ![Atlas Workflow](docs/atlas-workflow.png)
[Consul](https://consul.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for service discovery, service registry, and health checks. In the Atlas workflow Consul is configured at the Packer build stage and identifies the service(s) contained in each artifact. Since Consul is configured at the build phase with Packer, when the artifact is deployed with Terraform, it is fully configured with dependencies and service discovery pre-baked. This greatly reduces the risk of an unhealthy node in production due to configuration failure at runtime.
[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for cluster membership and failure detection. Consul uses Serf's gossip protocol as the foundation for service discovery.
[Vagrant](https://www.vagrantup.com/?utm_source=packer&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for managing development environments that mirror production. Vagrant environments reduce the friction of developing a project and reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes can be built in parallel with production artifacts with Packer to maintain parity between development and production. [Atlas](https://atlas.hashicorp.com/?utm_source=packer&utm_campaign=HashicorpEcosystem)
is HashiCorp's only commercial product. It unites Packer, Terraform, and Consul
to make application delivery a versioned, auditable, repeatable, and
collaborative process.
[Packer](https://packer.io/?utm_source=packer&utm_campaign=HashicorpEcosystem)
is a HashiCorp tool for creating machine images and deployable artifacts such as
AMIs, OpenStack images, Docker containers, etc.
[Terraform](https://terraform.io/?utm_source=packer&utm_campaign=HashicorpEcosystem)
is a HashiCorp tool for creating, combining, and modifying infrastructure. In
the Atlas workflow Terraform reads from the artifact registry and provisions
infrastructure.
[Consul](https://consul.io/?utm_source=packer&utm_campaign=HashicorpEcosystem)
is a HashiCorp tool for service discovery, service registry, and health checks.
In the Atlas workflow Consul is configured at the Packer build stage and
identifies the service(s) contained in each artifact. Since Consul is configured
at the build phase with Packer, when the artifact is deployed with Terraform, it
is fully configured with dependencies and service discovery pre-baked. This
greatly reduces the risk of an unhealthy node in production due to configuration
failure at runtime.
[Serf](https://serfdom.io/?utm_source=packer&utm_campaign=HashicorpEcosystem) is
a HashiCorp tool for cluster membership and failure detection. Consul uses
Serf's gossip protocol as the foundation for service discovery.
[Vagrant](https://www.vagrantup.com/?utm_source=packer&utm_campaign=HashicorpEcosystem)
is a HashiCorp tool for managing development environments that mirror
production. Vagrant environments reduce the friction of developing a project and
reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes
can be built in parallel with production artifacts with Packer to maintain
parity between development and production.
--- ---
layout: "intro" description: |
page_title: "Introduction" Welcome to the world of Packer! This introduction guide will show you what
prev_url: "#" Packer is, explain why it exists, the benefits it has to offer, and how you can
next_url: "/intro/why.html" get started with it. If you're already familiar with Packer, the documentation
next_title: "Why Use Packer?" provides more of a reference for all available features.
description: |- layout: intro
Welcome to the world of Packer! This introduction guide will show you what Packer is, explain why it exists, the benefits it has to offer, and how you can get started with it. If you're already familiar with Packer, the documentation provides more of a reference for all available features. next_title: 'Why Use Packer?'
--- next_url: '/intro/why.html'
page_title: Introduction
prev_url: '# '
...
# Introduction to Packer # Introduction to Packer
Welcome to the world of Packer! This introduction guide will show you what Welcome to the world of Packer! This introduction guide will show you what
Packer is, explain why it exists, the benefits it has to offer, and how Packer is, explain why it exists, the benefits it has to offer, and how you can
you can get started with it. If you're already familiar with Packer, the get started with it. If you're already familiar with Packer, the
[documentation](/docs) provides more of a reference for all available features. [documentation](/docs) provides more of a reference for all available features.
## What is Packer? ## What is Packer?
Packer is an open source tool for creating identical machine images for multiple platforms Packer is an open source tool for creating identical machine images for multiple
from a single source configuration. Packer is lightweight, runs on every major platforms from a single source configuration. Packer is lightweight, runs on
operating system, and is highly performant, creating machine images for every major operating system, and is highly performant, creating machine images
multiple platforms in parallel. Packer does not replace configuration management for multiple platforms in parallel. Packer does not replace configuration
like Chef or Puppet. In fact, when building images, Packer is able to use tools management like Chef or Puppet. In fact, when building images, Packer is able to
like Chef or Puppet to install software onto the image. use tools like Chef or Puppet to install software onto the image.
A _machine image_ is a single static unit that contains a pre-configured operating A *machine image* is a single static unit that contains a pre-configured
system and installed software which is used to quickly create new running machines. operating system and installed software which is used to quickly create new
Machine image formats change for each platform. Some examples include running machines. Machine image formats change for each platform. Some examples
[AMIs](http://en.wikipedia.org/wiki/Amazon_Machine_Image) for EC2, include [AMIs](http://en.wikipedia.org/wiki/Amazon_Machine_Image) for EC2,
VMDK/VMX files for VMware, OVF exports for VirtualBox, etc. VMDK/VMX files for VMware, OVF exports for VirtualBox, etc.
--- ---
layout: "intro" description: |
page_title: "Supported Platforms" Packer can create machine images for any platform. Packer ships with support for
prev_url: "/intro/use-cases.html" a set of platforms, but can be extended through plugins to support any platform.
next_url: "/intro/hashicorp-ecosystem.html" This page documents the list of supported image types that Packer supports
next_title: "Packer & the HashiCorp Ecosystem" creating.
description: |- layout: intro
Packer can create machine images for any platform. Packer ships with support for a set of platforms, but can be extended through plugins to support any platform. This page documents the list of supported image types that Packer supports creating. next_title: 'Packer & the HashiCorp Ecosystem'
--- next_url: '/intro/hashicorp-ecosystem.html'
page_title: Supported Platforms
prev_url: '/intro/use-cases.html'
...
# Supported Platforms # Supported Platforms
Packer can create machine images for any platform. Packer ships with Packer can create machine images for any platform. Packer ships with support for
support for a set of platforms, but can be [extended through plugins](/docs/extend/builder.html) a set of platforms, but can be [extended through
to support any platform. This page documents the list of supported image plugins](/docs/extend/builder.html) to support any platform. This page documents
types that Packer supports creating. the list of supported image types that Packer supports creating.
If you were looking to see what platforms Packer is able to run on, see If you were looking to see what platforms Packer is able to run on, see the page
the page on [installing Packer](/intro/getting-started/setup.html). on [installing Packer](/intro/getting-started/setup.html).
-> **Note:** We're always looking to officially support more -&gt; **Note:** We're always looking to officially support more target
target platforms. If you're interested in adding support for another platforms. If you're interested in adding support for another platform, please
platform, please help by opening an issue or pull request within help by opening an issue or pull request within
[GitHub](https://github.com/mitchellh/packer) so we can discuss [GitHub](https://github.com/mitchellh/packer) so we can discuss how to make it
how to make it happen. happen.
Packer supports creating images for the following platforms or targets. Packer supports creating images for the following platforms or targets. The
The format of the resulting image and any high-level information about the format of the resulting image and any high-level information about the platform
platform is noted. They are listed in alphabetical order. For more detailed is noted. They are listed in alphabetical order. For more detailed information
information on supported configuration parameters and usage, please see on supported configuration parameters and usage, please see the appropriate
the appropriate [documentation page within the documentation section](/docs). [documentation page within the documentation section](/docs).
* ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within - ***Amazon EC2 (AMI)***. Both EBS-backed and instance-store AMIs within
[EC2](http://aws.amazon.com/ec2/), optionally distributed to multiple regions. [EC2](http://aws.amazon.com/ec2/), optionally distributed to
multiple regions.
* ***DigitalOcean***. Snapshots for [DigitalOcean](http://www.digitalocean.com/) - ***DigitalOcean***. Snapshots for
that can be used to start a pre-configured DigitalOcean instance of any size. [DigitalOcean](http://www.digitalocean.com/) that can be used to start a
pre-configured DigitalOcean instance of any size.
* ***Docker***. Snapshots for [Docker](http://www.docker.io/) - ***Docker***. Snapshots for [Docker](http://www.docker.io/) that can be used
that can be used to start a pre-configured Docker instance. to start a pre-configured Docker instance.
* ***Google Compute Engine***. Snapshots for [Google Compute Engine](https://cloud.google.com/products/compute-engine) - ***Google Compute Engine***. Snapshots for [Google Compute
that can be used to start a pre-configured Google Compute Engine instance. Engine](https://cloud.google.com/products/compute-engine) that can be used
to start a pre-configured Google Compute Engine instance.
* ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) - ***OpenStack***. Images for [OpenStack](http://www.openstack.org/) that can
that can be used to start pre-configured OpenStack servers. be used to start pre-configured OpenStack servers.
* ***Parallels (PVM)***. Exported virtual machines for [Parallels](http://www.parallels.com/downloads/desktop/), - ***Parallels (PVM)***. Exported virtual machines for
including virtual machine metadata such as RAM, CPUs, etc. These virtual [Parallels](http://www.parallels.com/downloads/desktop/), including virtual
machines are portable and can be started on any platform Parallels runs on. machine metadata such as RAM, CPUs, etc. These virtual machines are portable
and can be started on any platform Parallels runs on.
* ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or [Xen](http://www.xenproject.org/) - ***QEMU***. Images for [KVM](http://www.linux-kvm.org/) or
that can be used to start pre-configured KVM or Xen instances. [Xen](http://www.xenproject.org/) that can be used to start pre-configured
KVM or Xen instances.
* ***VirtualBox (OVF)***. Exported virtual machines for [VirtualBox](https://www.virtualbox.org/), - ***VirtualBox (OVF)***. Exported virtual machines for
including virtual machine metadata such as RAM, CPUs, etc. These virtual [VirtualBox](https://www.virtualbox.org/), including virtual machine
machines are portable and can be started on any platform VirtualBox runs on. metadata such as RAM, CPUs, etc. These virtual machines are portable and can
be started on any platform VirtualBox runs on.
* ***VMware (VMX)***. Exported virtual machines for [VMware](http://www.vmware.com/) - ***VMware (VMX)***. Exported virtual machines for
that can be run within any desktop products such as Fusion, Player, or [VMware](http://www.vmware.com/) that can be run within any desktop products
Workstation, as well as server products such as vSphere. such as Fusion, Player, or Workstation, as well as server products such
as vSphere.
As previously mentioned, these are just the target image types that Packer As previously mentioned, these are just the target image types that Packer ships
ships with out of the box. You can always [extend Packer through plugins](/docs/extend/builder.html) with out of the box. You can always [extend Packer through
to support more. plugins](/docs/extend/builder.html) to support more.
--- ---
layout: "intro" description: |
page_title: "Use Cases" By now you should know what Packer does and what the benefits of image creation
prev_url: "/intro/why.html" are. In this section, we'll enumerate *some* of the use cases for Packer. Note
next_url: "/intro/platforms.html" that this is not an exhaustive list by any means. There are definitely use cases
next_title: "Supported Platforms" for Packer not listed here. This list is just meant to give you an idea of how
description: |- Packer may improve your processes.
By now you should know what Packer does and what the benefits of image creation are. In this section, we'll enumerate _some_ of the use cases for Packer. Note that this is not an exhaustive list by any means. There are definitely use cases for Packer not listed here. This list is just meant to give you an idea of how Packer may improve your processes. layout: intro
--- next_title: Supported Platforms
next_url: '/intro/platforms.html'
page_title: Use Cases
prev_url: '/intro/why.html'
...
# Use Cases # Use Cases
By now you should know what Packer does and what the benefits of image By now you should know what Packer does and what the benefits of image creation
creation are. In this section, we'll enumerate _some_ of the use cases are. In this section, we'll enumerate *some* of the use cases for Packer. Note
for Packer. Note that this is not an exhaustive list by any means. There are that this is not an exhaustive list by any means. There are definitely use cases
definitely use cases for Packer not listed here. This list is just meant for Packer not listed here. This list is just meant to give you an idea of how
to give you an idea of how Packer may improve your processes. Packer may improve your processes.
### Continuous Delivery ### Continuous Delivery
...@@ -24,30 +28,31 @@ can be used to generate new machine images for multiple platforms on every ...@@ -24,30 +28,31 @@ can be used to generate new machine images for multiple platforms on every
change to Chef/Puppet. change to Chef/Puppet.
As part of this pipeline, the newly created images can then be launched and As part of this pipeline, the newly created images can then be launched and
tested, verifying the infrastructure changes work. If the tests pass, you can tested, verifying the infrastructure changes work. If the tests pass, you can be
be confident that that image will work when deployed. This brings a new level confident that that image will work when deployed. This brings a new level of
of stability and testability to infrastructure changes. stability and testability to infrastructure changes.
### Dev/Prod Parity ### Dev/Prod Parity
Packer helps [keep development, staging, and production as similar as possible](http://www.12factor.net/dev-prod-parity). Packer helps [keep development, staging, and production as similar as
Packer can be used to generate images for multiple platforms at the same time. possible](http://www.12factor.net/dev-prod-parity). Packer can be used to
So if you use AWS for production and VMware (perhaps with [Vagrant](http://www.vagrantup.com)) generate images for multiple platforms at the same time. So if you use AWS for
for development, you can generate both an AMI and a VMware machine using production and VMware (perhaps with [Vagrant](http://www.vagrantup.com)) for
Packer at the same time from the same template. development, you can generate both an AMI and a VMware machine using Packer at
the same time from the same template.
Mix this in with the continuous delivery use case above, and you have a pretty Mix this in with the continuous delivery use case above, and you have a pretty
slick system for consistent work environments from development all the slick system for consistent work environments from development all the way
way through to production. through to production.
### Appliance/Demo Creation ### Appliance/Demo Creation
Since Packer creates consistent images for multiple platforms in parallel, Since Packer creates consistent images for multiple platforms in parallel, it is
it is perfect for creating [appliances](http://en.wikipedia.org/wiki/Software_appliance) perfect for creating
and disposable product demos. As your software changes, you can automatically [appliances](http://en.wikipedia.org/wiki/Software_appliance) and disposable
create appliances with the software pre-installed. Potential users can then product demos. As your software changes, you can automatically create appliances
get started with your software by deploying it to the environment of their with the software pre-installed. Potential users can then get started with your
choice. software by deploying it to the environment of their choice.
Packaging up software with complex requirements has never been so easy. Packaging up software with complex requirements has never been so easy. Or
Or enjoyable, if you ask me. enjoyable, if you ask me.
--- ---
layout: "intro" description: |
page_title: "Why Use Packer?" Pre-baked machine images have a lot of advantages, but most have been unable to
prev_url: "/intro/index.html" benefit from them because images have been too tedious to create and manage.
next_url: "/intro/use-cases.html" There were either no existing tools to automate the creation of machine images
next_title: "Packer Use Cases" or they had too high of a learning curve. The result is that, prior to Packer,
description: |- creating machine images threatened the agility of operations teams, and
Pre-baked machine images have a lot of advantages, but most have been unable to benefit from them because images have been too tedious to create and manage. There were either no existing tools to automate the creation of machine images or they had too high of a learning curve. The result is that, prior to Packer, creating machine images threatened the agility of operations teams, and therefore aren't used, despite the massive benefits. therefore aren't used, despite the massive benefits.
--- layout: intro
next_title: Packer Use Cases
next_url: '/intro/use-cases.html'
page_title: 'Why Use Packer?'
prev_url: '/intro/index.html'
...
# Why Use Packer? # Why Use Packer?
Pre-baked machine images have a lot of advantages, but most have been unable Pre-baked machine images have a lot of advantages, but most have been unable to
to benefit from them because images have been too tedious to create and manage. benefit from them because images have been too tedious to create and manage.
There were either no existing tools to automate the creation of machine images or There were either no existing tools to automate the creation of machine images
they had too high of a learning curve. The result is that, prior to Packer, or they had too high of a learning curve. The result is that, prior to Packer,
creating machine images threatened the agility of operations teams, and therefore creating machine images threatened the agility of operations teams, and
aren't used, despite the massive benefits. therefore aren't used, despite the massive benefits.
Packer changes all of this. Packer is easy to use and automates the creation Packer changes all of this. Packer is easy to use and automates the creation of
of any type of machine image. It embraces modern configuration management by any type of machine image. It embraces modern configuration management by
encouraging you to use a framework such as Chef or Puppet to install and encouraging you to use a framework such as Chef or Puppet to install and
configure the software within your Packer-made images. configure the software within your Packer-made images.
...@@ -28,25 +33,26 @@ untapped potential and opening new opportunities. ...@@ -28,25 +33,26 @@ untapped potential and opening new opportunities.
## Advantages of Using Packer ## Advantages of Using Packer
***Super fast infrastructure deployment***. Packer images allow you to launch ***Super fast infrastructure deployment***. Packer images allow you to launch
completely provisioned and configured machines in seconds, rather than completely provisioned and configured machines in seconds, rather than several
several minutes or hours. This benefits not only production, but development as well, minutes or hours. This benefits not only production, but development as well,
since development virtual machines can also be launched in seconds, without waiting since development virtual machines can also be launched in seconds, without
for a typically much longer provisioning time. waiting for a typically much longer provisioning time.
***Multi-provider portability***. Because Packer creates identical images for ***Multi-provider portability***. Because Packer creates identical images for
multiple platforms, you can run production in AWS, staging/QA in a private multiple platforms, you can run production in AWS, staging/QA in a private cloud
cloud like OpenStack, and development in desktop virtualization solutions like OpenStack, and development in desktop virtualization solutions such as
such as VMware or VirtualBox. Each environment is running an identical VMware or VirtualBox. Each environment is running an identical machine image,
machine image, giving ultimate portability. giving ultimate portability.
***Improved stability***. Packer installs and configures all the software for ***Improved stability***. Packer installs and configures all the software for a
a machine at the time the image is built. If there are bugs in these scripts, machine at the time the image is built. If there are bugs in these scripts,
they'll be caught early, rather than several minutes after a machine is launched. they'll be caught early, rather than several minutes after a machine is
launched.
***Greater testability***. After a machine image is built, that machine image ***Greater testability***. After a machine image is built, that machine image
can be quickly launched and smoke tested to verify that things appear to be can be quickly launched and smoke tested to verify that things appear to be
working. If they are, you can be confident that any other machines launched working. If they are, you can be confident that any other machines launched from
from that image will function properly. that image will function properly.
Packer makes it extremely easy to take advantage of all these benefits. Packer makes it extremely easy to take advantage of all these benefits.
......
<% wrap_layout :inner do %> <% wrap_layout :inner do %>
<% content_for :sidebar do %> <% content_for :sidebar do %>
<h2></h2> <h2></h2>
<% end %> <% end %>
<%= yield %> <%= yield %>
<% end %> <% end %>
<% wrap_layout :inner do %> <% wrap_layout :inner do %>
<% content_for :sidebar do %> <% content_for :sidebar do %>
<h2>Docs</h2> <h2>Docs</h2>
<ul>
<ul> <li><a href="/docs/installation.html">Installation</a></li>
<li><a href="/docs/installation.html">Installation</a></li> <li><a href="/docs/basics/terminology.html">Terminology</a></li>
<li><a href="/docs/basics/terminology.html">Terminology</a></li> </ul>
</ul> <ul>
<li>
<ul> <h4>Command-Line</h4>
<li><h4>Command-Line</h4></li> </li>
<li><a href="/docs/command-line/introduction.html">Introduction</a></li> <li><a href="/docs/command-line/introduction.html">Introduction</a></li>
<li><a href="/docs/command-line/build.html">Build</a></li> <li><a href="/docs/command-line/build.html">Build</a></li>
<li><a href="/docs/command-line/fix.html">Fix</a></li> <li><a href="/docs/command-line/fix.html">Fix</a></li>
<li><a href="/docs/command-line/inspect.html">Inspect</a></li> <li><a href="/docs/command-line/inspect.html">Inspect</a></li>
<li><a href="/docs/command-line/push.html">Push</a></li> <li><a href="/docs/command-line/push.html">Push</a></li>
<li><a href="/docs/command-line/validate.html">Validate</a></li> <li><a href="/docs/command-line/validate.html">Validate</a></li>
<li><a href="/docs/command-line/machine-readable.html">Machine-Readable Output</a></li> <li><a href="/docs/command-line/machine-readable.html">Machine-Readable Output</a></li>
</ul> </ul>
<ul>
<ul> <li>
<li><h4>Templates</h4></li> <h4>Templates</h4>
<li><a href="/docs/templates/introduction.html">Introduction</a></li> </li>
<li><a href="/docs/templates/builders.html">Builders</a></li> <li><a href="/docs/templates/introduction.html">Introduction</a></li>
<li><a href="/docs/templates/provisioners.html">Provisioners</a></li> <li><a href="/docs/templates/builders.html">Builders</a></li>
<li><a href="/docs/templates/post-processors.html">Post-Processors</a></li> <li><a href="/docs/templates/provisioners.html">Provisioners</a></li>
<li><a href="/docs/templates/push.html">Push</a></li> <li><a href="/docs/templates/post-processors.html">Post-Processors</a></li>
<li><a href="/docs/templates/communicator.html">Communicators</a></li> <li><a href="/docs/templates/push.html">Push</a></li>
<li><a href="/docs/templates/configuration-templates.html">Configuration Templates</a></li> <li><a href="/docs/templates/communicator.html">Communicators</a></li>
<li><a href="/docs/templates/user-variables.html">User Variables</a></li> <li><a href="/docs/templates/configuration-templates.html">Configuration Templates</a></li>
<li><a href="/docs/templates/veewee-to-packer.html">Veewee-to-Packer</a></li> <li><a href="/docs/templates/user-variables.html">User Variables</a></li>
</ul> <li><a href="/docs/templates/veewee-to-packer.html">Veewee-to-Packer</a></li>
</ul>
<ul> <ul>
<li><h4>Builders</h4></li> <li>
<li><a href="/docs/builders/amazon.html">Amazon EC2 (AMI)</a></li> <h4>Builders</h4>
<li><a href="/docs/builders/digitalocean.html">DigitalOcean</a></li> </li>
<li><a href="/docs/builders/docker.html">Docker</a></li> <li><a href="/docs/builders/amazon.html">Amazon EC2 (AMI)</a></li>
<li><a href="/docs/builders/googlecompute.html">Google Compute Engine</a></li> <li><a href="/docs/builders/digitalocean.html">DigitalOcean</a></li>
<li><a href="/docs/builders/null.html">Null</a></li> <li><a href="/docs/builders/docker.html">Docker</a></li>
<li><a href="/docs/builders/openstack.html">OpenStack</a></li> <li><a href="/docs/builders/googlecompute.html">Google Compute Engine</a></li>
<li><a href="/docs/builders/parallels.html">Parallels</a></li> <li><a href="/docs/builders/null.html">Null</a></li>
<li><a href="/docs/builders/qemu.html">QEMU</a></li> <li><a href="/docs/builders/openstack.html">OpenStack</a></li>
<li><a href="/docs/builders/virtualbox.html">VirtualBox</a></li> <li><a href="/docs/builders/parallels.html">Parallels</a></li>
<li><a href="/docs/builders/vmware.html">VMware</a></li> <li><a href="/docs/builders/qemu.html">QEMU</a></li>
<li><a href="/docs/builders/custom.html">Custom</a></li> <li><a href="/docs/builders/virtualbox.html">VirtualBox</a></li>
</ul> <li><a href="/docs/builders/vmware.html">VMware</a></li>
<li><a href="/docs/builders/custom.html">Custom</a></li>
<ul> </ul>
<li><h4>Provisioners</h4></li> <ul>
<li><a href="/docs/provisioners/shell.html">Remote Shell</a></li> <li>
<li><a href="/docs/provisioners/shell-local.html">Local Shell</a></li> <h4>Provisioners</h4>
<li><a href="/docs/provisioners/file.html">File Uploads</a></li> </li>
<li><a href="/docs/provisioners/powershell.html">PowerShell</a></li> <li><a href="/docs/provisioners/shell.html">Remote Shell</a></li>
<li><a href="/docs/provisioners/windows-shell.html">Windows Shell</a></li> <li><a href="/docs/provisioners/shell-local.html">Local Shell</a></li>
<li><a href="/docs/provisioners/ansible-local.html">Ansible</a></li> <li><a href="/docs/provisioners/file.html">File Uploads</a></li>
<li><a href="/docs/provisioners/chef-client.html">Chef Client</a></li> <li><a href="/docs/provisioners/powershell.html">PowerShell</a></li>
<li><a href="/docs/provisioners/chef-solo.html">Chef Solo</a></li> <li><a href="/docs/provisioners/windows-shell.html">Windows Shell</a></li>
<li><a href="/docs/provisioners/puppet-masterless.html">Puppet Masterless</a></li> <li><a href="/docs/provisioners/ansible-local.html">Ansible</a></li>
<li><a href="/docs/provisioners/puppet-server.html">Puppet Server</a></li> <li><a href="/docs/provisioners/chef-client.html">Chef Client</a></li>
<li><a href="/docs/provisioners/salt-masterless.html">Salt</a></li> <li><a href="/docs/provisioners/chef-solo.html">Chef Solo</a></li>
<li><a href="/docs/provisioners/windows-restart.html">Windows Restart</a></li> <li><a href="/docs/provisioners/puppet-masterless.html">Puppet Masterless</a></li>
<li><a href="/docs/provisioners/custom.html">Custom</a></li> <li><a href="/docs/provisioners/puppet-server.html">Puppet Server</a></li>
</ul> <li><a href="/docs/provisioners/salt-masterless.html">Salt</a></li>
<li><a href="/docs/provisioners/windows-restart.html">Windows Restart</a></li>
<ul> <li><a href="/docs/provisioners/custom.html">Custom</a></li>
<li><h4>Post-Processors</h4></li> </ul>
<li><a href="/docs/post-processors/atlas.html">Atlas</a></li> <ul>
<li><a href="/docs/post-processors/compress.html">compress</a></li> <li>
<li><a href="/docs/post-processors/docker-import.html">docker-import</a></li> <h4>Post-Processors</h4>
<li><a href="/docs/post-processors/docker-push.html">docker-push</a></li> </li>
<li><a href="/docs/post-processors/docker-save.html">docker-save</a></li> <li><a href="/docs/post-processors/atlas.html">Atlas</a></li>
<li><a href="/docs/post-processors/docker-tag.html">docker-tag</a></li> <li><a href="/docs/post-processors/compress.html">compress</a></li>
<li><a href="/docs/post-processors/vagrant.html">Vagrant</a></li> <li><a href="/docs/post-processors/docker-import.html">docker-import</a></li>
<li><a href="/docs/post-processors/vagrant-cloud.html">Vagrant Cloud</a></li> <li><a href="/docs/post-processors/docker-push.html">docker-push</a></li>
<li><a href="/docs/post-processors/vsphere.html">vSphere</a></li> <li><a href="/docs/post-processors/docker-save.html">docker-save</a></li>
</ul> <li><a href="/docs/post-processors/docker-tag.html">docker-tag</a></li>
<li><a href="/docs/post-processors/vagrant.html">Vagrant</a></li>
<ul> <li><a href="/docs/post-processors/vagrant-cloud.html">Vagrant Cloud</a></li>
<li><h4>Other</h4></li> <li><a href="/docs/post-processors/vsphere.html">vSphere</a></li>
<li><a href="/docs/other/core-configuration.html">Core Configuration</a></li> </ul>
<li><a href="/docs/other/debugging.html">Debugging</a></li> <ul>
<li><a href="/docs/other/environmental-variables.html">Environmental Variables</a></li> <li>
</ul> <h4>Other</h4>
</li>
<ul> <li><a href="/docs/other/core-configuration.html">Core Configuration</a></li>
<li><h4>Extend Packer</h4></li> <li><a href="/docs/other/debugging.html">Debugging</a></li>
<li><a href="/docs/extend/plugins.html">Packer Plugins</a></li> <li><a href="/docs/other/environmental-variables.html">Environmental Variables</a></li>
<li><a href="/docs/extend/developing-plugins.html">Developing Plugins</a></li> </ul>
<li><a href="/docs/extend/builder.html">Custom Builder</a></li> <ul>
<li><a href="/docs/extend/command.html">Custom Command</a></li> <li>
<li><a href="/docs/extend/post-processor.html">Custom Post-Processor</a></li> <h4>Extend Packer</h4>
<li><a href="/docs/extend/provisioner.html">Custom Provisioner</a></li> </li>
</ul> <li><a href="/docs/extend/plugins.html">Packer Plugins</a></li>
<% end %> <li><a href="/docs/extend/developing-plugins.html">Developing Plugins</a></li>
<%= yield %> <li><a href="/docs/extend/builder.html">Custom Builder</a></li>
<li><a href="/docs/extend/command.html">Custom Command</a></li>
<li><a href="/docs/extend/post-processor.html">Custom Post-Processor</a></li>
<li><a href="/docs/extend/provisioner.html">Custom Provisioner</a></li>
</ul>
<% end %>
<%= yield %>
<% end %> <% end %>
<% wrap_layout :inner do %> <% wrap_layout :inner do %>
<% content_for :sidebar do %> <% content_for :sidebar do %>
<h2>Docs</h2> <h2>Docs</h2>
<ul>
<ul> <li>
<li><h4>Machine-Readable Reference</h4></li> <h4>Machine-Readable Reference</h4>
<li><a href="/docs/index.html">&laquo; Back to Docs</a></li> </li>
<li><a href="/docs/machine-readable/general.html">General Types</a></li> <li><a href="/docs/index.html">&laquo; Back to Docs</a></li>
<li><a href="/docs/machine-readable/command-build.html">Command: build</a></li> <li><a href="/docs/machine-readable/general.html">General Types</a></li>
<li><a href="/docs/machine-readable/command-inspect.html">Command: inspect</a></li> <li><a href="/docs/machine-readable/command-build.html">Command: build</a></li>
<li><a href="/docs/machine-readable/command-version.html">Command: version</a></li> <li><a href="/docs/machine-readable/command-inspect.html">Command: inspect</a></li>
</ul> <li><a href="/docs/machine-readable/command-version.html">Command: version</a></li>
<% end %> </ul>
<%= yield %> <% end %>
<%= yield %>
<% end %> <% end %>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-43075859-1', 'packer.io');
ga('send', 'pageview');
</script>
<% wrap_layout :layout do %> <% wrap_layout :layout do %>
<div class="docs-wrapper"> <div class="docs-wrapper">
<div class="row row-lg-height"> <div class="row row-lg-height">
<div class="sidebar col-sm-3 col-md-3 col-lg-height col-md-height col-sm-height"> <div class="sidebar col-sm-3 col-md-3 col-lg-height col-md-height col-sm-height">
<%= yield_content :sidebar %> <%= yield_content :sidebar %>
</div>
<div class="docs-body col-sm-9 col-md-9 col-lg-height col-md-height col-sm-height">
<div class="docs-content">
<%= yield %>
</div> </div>
<div class="docs-body col-sm-9 col-md-9 col-lg-height col-md-height col-sm-height">
<% if current_page.data.next_url %> <div class="docs-content">
<div class="pagination"> <%= yield %>
<a class="previous-section" href="<%= current_page.data.prev_url %>"></a>
<% if current_page.data.next_url == "" %>
<h4><span class="text-green uppercase">End</span></h4>
<% else %>
<a class="next-section" href="<%= current_page.data.next_url %>">
<h4>
<span class="text-green uppercase">next</span>
<%= current_page.data.next_title %>
</h4>
</a>
<% end %>
</div> </div>
<% end %> <% if current_page.data.next_url %>
<div class="pagination">
<a class="previous-section" href="<%= current_page.data.prev_url %>"></a>
<% if current_page.data.next_url == "" %>
<h4><span class="text-green uppercase">End</span></h4>
<% else %>
<a class="next-section" href="<%= current_page.data.next_url %>">
<h4>
<span class="text-green uppercase">next</span>
<%= current_page.data.next_title %>
</h4>
</a>
<% end %>
</div>
<% end %>
</div>
</div> </div>
</div> </div>
</div>
<% end %> <% end %>
<% wrap_layout :inner do %> <% wrap_layout :inner do %>
<% content_for :sidebar do %> <% content_for :sidebar do %>
<h2>Intro</h2> <h2>Intro</h2>
<ul>
<ul> <li>
<li><h4>About</h4></li> <h4>About</h4>
<li><a href="/intro/index.html">What is Packer?</a></li> </li>
<li><a href="/intro/why.html">Why Use Packer?</a></li> <li><a href="/intro/index.html">What is Packer?</a></li>
<li><a href="/intro/use-cases.html">Use Cases</a></li> <li><a href="/intro/why.html">Why Use Packer?</a></li>
<li><a href="/intro/platforms.html">Supported Platforms</a></li> <li><a href="/intro/use-cases.html">Use Cases</a></li>
<li><a href="/intro/hashicorp-ecosystem.html">Packer & the HashiCorp Ecosystem</a></li> <li><a href="/intro/platforms.html">Supported Platforms</a></li>
</ul> <li><a href="/intro/hashicorp-ecosystem.html">Packer & the HashiCorp Ecosystem</a></li>
</ul>
<ul> <ul>
<li><h4>Getting Started</h4></li> <li>
<li><a href="/intro/getting-started/setup.html">Install Packer</a></li> <h4>Getting Started</h4>
<li><a href="/intro/getting-started/build-image.html">Build an Image</a></li> </li>
<li><a href="/intro/getting-started/provision.html">Provision</a></li> <li><a href="/intro/getting-started/setup.html">Install Packer</a></li>
<li><a href="/intro/getting-started/parallel-builds.html">Parallel Builds</a></li> <li><a href="/intro/getting-started/build-image.html">Build an Image</a></li>
<li><a href="/intro/getting-started/vagrant.html">Vagrant Boxes</a></li> <li><a href="/intro/getting-started/provision.html">Provision</a></li>
<li><a href="/intro/getting-started/remote-builds.html">Remote Builds</a></li> <li><a href="/intro/getting-started/parallel-builds.html">Parallel Builds</a></li>
<li><a href="/intro/getting-started/next.html">Next Steps</a></li> <li><a href="/intro/getting-started/vagrant.html">Vagrant Boxes</a></li>
</ul> <li><a href="/intro/getting-started/remote-builds.html">Remote Builds</a></li>
<% end %> <li><a href="/intro/getting-started/next.html">Next Steps</a></li>
<%= yield %> </ul>
<% end %>
<%= yield %>
<% end %> <% end %>
<!DOCTYPE html> <!DOCTYPE html>
<html> <html>
<head> <head>
<title><%= [current_page.data.page_title, "Packer by HashiCorp"].compact.join(" - ") %></title> <title>
<%= [current_page.data.page_title, "Packer by HashiCorp"].compact.join(" - ") %>
</title>
<meta name="description" content="<%= current_page.data.description %>" /> <meta name="description" content="<%= current_page.data.description %>" />
<%= stylesheet_link_tag "application" %>
<%= stylesheet_link_tag "application" %>
<meta name="HandheldFriendly" content="True" /> <meta name="HandheldFriendly" content="True" />
<meta name="MobileOptimized" content="320" /> <meta name="MobileOptimized" content="320" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta name="apple-mobile-web-app-capable" content="yes"> <meta name="apple-mobile-web-app-capable" content="yes">
<meta name="apple-mobile-web-app-status-bar-style" content="black"> <meta name="apple-mobile-web-app-status-bar-style" content="black">
<link rel="shortcut icon" href="<%= image_path(" favicon.ico ") %>" type="image/x-icon">
<link rel="shortcut icon" href="<%= image_path("favicon.ico") %>" type="image/x-icon"> <link rel="icon" href="<%= image_path(" favicon.ico ") %>" type="image/x-icon">
<link rel="icon" href="<%= image_path("favicon.ico") %>" type="image/x-icon"> <script type="text/javascript" src="//use.typekit.net/apr3jjs.js"></script>
<script type="text/javascript">
<script type="text/javascript" src="//use.typekit.net/apr3jjs.js"></script> try {
<script type="text/javascript">try{Typekit.load();}catch(e){}</script> Typekit.load();
</head> } catch (e) {}
<body>
<nav class="dark-background">
<div class="container-fluid">
<div class="row">
<div class="col-md-12">
<a href="/" class="packer-logo pull-left">Packer</a>
<ul>
<li class="featured">
<a href="https://github.com/mitchellh/packer" class="primary">GitHub</a>
</li>
<li class="featured">
<a href="/downloads.html" class="primary">Download</a>
</li>
<li><a href="/intro">Intro</a></li>
<li><a href="/docs">Documentation</a></li>
<li><a href="/community">Community</a></li>
</ul>
</div>
</div>
</div>
</nav>
<%= yield %>
<div class="clearfix"></div>
<footer class="dark-background">
<div class="container-fluid">
<div class="row">
<div class="col-md-12">
<ul>
<li class="packer"><a href="/">Packer</a></li>
<li>
A <a href="http://www.hashicorp.com/">HashiCorp</a> project.
</li>
<li>
<a href="https://github.com/mitchellh/packer/blob/master/website/source/<%= current_page.path %>.markdown">Edit this page</a>
</li>
<a href="http://www.hashicorp.com">
<li class="pull-right hashi-logo">&nbps;</li>
</a>
</ul>
</div>
</div>
</div>
</footer>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-43075859-1', 'packer.io');
ga('send', 'pageview');
</script> </script>
</body> </head>
<body>
<nav class="dark-background">
<div class="container-fluid">
<div class="row">
<div class="col-md-12">
<a href="/" class="packer-logo pull-left">Packer</a>
<ul>
<li class="featured">
<a href="https://github.com/mitchellh/packer" class="primary">GitHub</a>
</li>
<li class="featured">
<a href="/downloads.html" class="primary">Download</a>
</li>
<li><a href="/intro">Intro</a></li>
<li><a href="/docs">Documentation</a></li>
<li><a href="/community">Community</a></li>
</ul>
</div>
</div>
</div>
</nav>
<%= yield %>
<div class="clearfix"></div>
<footer class="dark-background">
<div class="container-fluid">
<div class="row">
<div class="col-md-12">
<ul>
<li class="packer"><a href="/">Packer</a></li>
<li>
A <a href="http://www.hashicorp.com/">HashiCorp</a> project.
</li>
<% # current_page.path does not have an extension, but
# current_page.source_file does. Also, we don't want to show
# this on the homepage.
if current_page.url != "/"
current_page_source = current_page.path + \
current_page.source_file.split(current_page.path)[1] %>
<li>
<a href="https://github.com/mitchellh/packer/blob/master/website/source/<%= current_page_source %>">Edit this page</a>
</li>
<% end %>
<a href="http://www.hashicorp.com">
<li class="pull-right hashi-logo">&nbps;</li>
</a>
</ul>
</div>
</div>
</div>
</footer>
<%= partial "layouts/google-analytics.html" %>
</body>
</html> </html>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment