diff --git a/CHANGELOG.md b/CHANGELOG.md
index b8b2ed1e0c368f6c79aca88b5dd5de5805b83f91..944776ee2cdfa20c1a362c0bedb11fea6fbc2e98 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,7 @@ and this project will adhere to [Semantic Versioning](http://semver.org/spec/v2.
 ### Added
 
 * Support for alias while asking for facets.
+* Support for general configuration via environment variables and configuration files.
 
 ### Fixed
 
diff --git a/contrib/tlstest/README.md b/contrib/tlstest/README.md
index 3f78146f048bedcc1aeb23b59239d7f2abd0e75f..d55e4c10a968f907eaed8ecee72459c90eab7bdc 100644
--- a/contrib/tlstest/README.md
+++ b/contrib/tlstest/README.md
@@ -15,6 +15,6 @@ This directory contains several scripts, that helps with testing of tls function
 ## Notes
 Go x509 package supports only encrypted private keys conaining "DEK-Info". By default, openssl doesn't include it in generated keys. Fortunately, if encryption method is explicitly set in the command line, openssl adds "DEK-Info" header.
 
-`server_pass.sh` should be used with `client_pass.sh`. This enable testing of `tls.server_name` configuration option. Mixing `_pass` and `_nopass` client/server shows that server name is verified by the client.
+`server_pass.sh` should be used with `client_pass.sh`. This enable testing of `tls_server_name` configuration option. Mixing `_pass` and `_nopass` client/server shows that server name is verified by the client.
 
 For testing purposes, DNS names for server1.dgraph.io and server2.dgraph.io has to be resolvable. Editing /etc/hosts is the simplest way to achieve this. 
diff --git a/contrib/tlstest/client_12.sh b/contrib/tlstest/client_12.sh
index 76b02ff33fd48050d636678caa4281b46e64339e..488b3f85f8047b2a037a3bd9a6dd13b2022a3d77 100755
--- a/contrib/tlstest/client_12.sh
+++ b/contrib/tlstest/client_12.sh
@@ -1,3 +1,3 @@
 #!/bin/bash
 
-../../dgraph/dgraph live -d server1.dgraph.io:9080 --tls.on --tls.ca_certs ca.crt --tls.cert client.crt --tls.cert_key client.key --tls.server_name server1.dgraph.io --tls.min_version=TLS12 -r data.rdf.gz -z 127.0.0.1:7081
+../../dgraph/dgraph live -d server1.dgraph.io:9080 --tls_on --tls_ca_certs ca.crt --tls_cert client.crt --tls_cert_key client.key --tls_server_name server1.dgraph.io --tls_min_version=TLS12 -r data.rdf.gz -z 127.0.0.1:7081
diff --git a/contrib/tlstest/client_nocert.sh b/contrib/tlstest/client_nocert.sh
index c16a1834064186aa0a83712680f863987002e89f..89f91fa466800c17ee310ecc9ba4db159c318d23 100755
--- a/contrib/tlstest/client_nocert.sh
+++ b/contrib/tlstest/client_nocert.sh
@@ -1,3 +1,3 @@
 #!/bin/bash
 
-../../dgraph/dgraph live -d server1.dgraph.io:9080 --tls.on --tls.ca_certs ca.crt --tls.server_name server1.dgraph.io -r data.rdf.gz -z 127.0.0.1:7081
+../../dgraph/dgraph live -d server1.dgraph.io:9080 --tls_on --tls_ca_certs ca.crt --tls_server_name server1.dgraph.io -r data.rdf.gz -z 127.0.0.1:7081
diff --git a/contrib/tlstest/client_nopass.sh b/contrib/tlstest/client_nopass.sh
index 7c926ba5f792fee911da2b19b80fd487a422da48..f7faff4c4677a06f95f0729abfa4975ea4f7ca63 100755
--- a/contrib/tlstest/client_nopass.sh
+++ b/contrib/tlstest/client_nopass.sh
@@ -1,3 +1,3 @@
 #!/bin/bash
 
-../../dgraph/dgraph live -d server1.dgraph.io:9080 --tls.on --tls.ca_certs ca.crt --tls.cert client.crt --tls.cert_key client.key --tls.server_name server1.dgraph.io -r data.rdf.gz -z 127.0.0.1:7081
+../../dgraph/dgraph live -d server1.dgraph.io:9080 --tls_on --tls_ca_certs ca.crt --tls_cert client.crt --tls_cert_key client.key --tls_server_name server1.dgraph.io -r data.rdf.gz -z 127.0.0.1:7081
diff --git a/contrib/tlstest/client_pass.sh b/contrib/tlstest/client_pass.sh
index 8cd8fa30680b85d5313426fcc405eba4e26ee702..9c3a7a98f45ddecd2aa38460f8c591549cabf779 100755
--- a/contrib/tlstest/client_pass.sh
+++ b/contrib/tlstest/client_pass.sh
@@ -1,3 +1,3 @@
 #!/bin/bash
 
-../../dgraph/dgraph live -d server2.dgraph.io:9080 --tls.on --tls.ca_certs ca.crt --tls.cert client_pass.crt --tls.cert_key client_pass.key --tls.cert_key_passphrase secret --tls.server_name server2.dgraph.io -r data.rdf.gz -z 127.0.0.1:7081
+../../dgraph/dgraph live -d server2.dgraph.io:9080 --tls_on --tls_ca_certs ca.crt --tls_cert client_pass.crt --tls_cert_key client_pass.key --tls_cert_key_passphrase secret --tls_server_name server2.dgraph.io -r data.rdf.gz -z 127.0.0.1:7081
diff --git a/contrib/tlstest/run.sh b/contrib/tlstest/run.sh
index cada101921860f4553d3ee0bc9c35149d2fb688c..7b6357f1452fe2e4ead2d3c808c31d92fe0c4cfd 100755
--- a/contrib/tlstest/run.sh
+++ b/contrib/tlstest/run.sh
@@ -1,5 +1,6 @@
 #!/bin/bash
 
 pushd $GOPATH/src/github.com/dgraph-io/dgraph/contrib/tlstest
-make test
+set -e
+make test || [[ $TRAVIS_OS_NAME == "osx" ]] # don't fail for mac
 popd
diff --git a/contrib/tlstest/server_11.sh b/contrib/tlstest/server_11.sh
index 1f64080c076b1843bf77f852d36bea725b1537ba..b511303e5b5a4e5b8a657a84f7ac1a5f0da832d9 100755
--- a/contrib/tlstest/server_11.sh
+++ b/contrib/tlstest/server_11.sh
@@ -1,3 +1,3 @@
 #!/bin/bash
 
-../../dgraph/dgraph server --tls.on --tls.ca_certs ca.crt --tls.cert server.crt --tls.cert_key server.key --tls.max_version=TLS11 --memory_mb 2048 --zero 127.0.0.1:8888
+../../dgraph/dgraph server --tls_on --tls_ca_certs ca.crt --tls_cert server.crt --tls_cert_key server.key --tls_max_version=TLS11 --memory_mb 2048 --zero 127.0.0.1:8888
diff --git a/contrib/tlstest/server_nopass.sh b/contrib/tlstest/server_nopass.sh
index eaa80344e33ee31cd1448186f8e2c27f299982a9..111e609747ee93a6760823c3b302f97e33d7b102 100755
--- a/contrib/tlstest/server_nopass.sh
+++ b/contrib/tlstest/server_nopass.sh
@@ -1,4 +1,4 @@
 #!/bin/bash
 
-../../dgraph/dgraph server --tls.on --tls.ca_certs ca.crt --tls.cert server.crt --tls.cert_key server.key \
+../../dgraph/dgraph server --tls_on --tls_ca_certs ca.crt --tls_cert server.crt --tls_cert_key server.key \
 --memory_mb 2048 --zero 127.0.0.1:7081 &> dgraph.log
diff --git a/contrib/tlstest/server_nopass_client_auth.sh b/contrib/tlstest/server_nopass_client_auth.sh
index b7e61dfa6a02dbe85c530fb3636ba3814e5af09c..4f5e5e9304c4e3105c8bbad39ef9ba2b1c051533 100755
--- a/contrib/tlstest/server_nopass_client_auth.sh
+++ b/contrib/tlstest/server_nopass_client_auth.sh
@@ -1,3 +1,3 @@
 #!/bin/bash
 
-../../dgraph/dgraph server --tls.on --tls.ca_certs ca.crt --tls.cert server.crt --tls.cert_key server.key --tls.client_auth REQUIREANDVERIFY --memory_mb 2048 --zero 127.0.0.1:7081
+../../dgraph/dgraph server --tls_on --tls_ca_certs ca.crt --tls_cert server.crt --tls_cert_key server.key --tls_client_auth REQUIREANDVERIFY --memory_mb 2048 --zero 127.0.0.1:7081
diff --git a/contrib/tlstest/server_pass.sh b/contrib/tlstest/server_pass.sh
index 068de85e4eeaee2121aa26b3c9032ae5cfdd7aa6..cf77f4505ed6b2b4b0380130599c60b798402180 100755
--- a/contrib/tlstest/server_pass.sh
+++ b/contrib/tlstest/server_pass.sh
@@ -1,3 +1,3 @@
 #!/bin/bash
 
-../../dgraph/dgraph server --tls.on --tls.ca_certs ca.crt --tls.cert server_pass.crt --tls.cert_key server_pass.key --tls.cert_key_passphrase secret --memory_mb 2048 --zero 127.0.0.1:7081 &> dgraph.log
+../../dgraph/dgraph server --tls_on --tls_ca_certs ca.crt --tls_cert server_pass.crt --tls_cert_key server_pass.key --tls_cert_key_passphrase secret --memory_mb 2048 --zero 127.0.0.1:7081 &> dgraph.log
diff --git a/contrib/tlstest/server_reload.sh b/contrib/tlstest/server_reload.sh
index 240cb61c997f8bdc51d7460dbf1c432a5928429c..a0ab08eacd8a265738c33c22b11c0c1f2dd0feeb 100755
--- a/contrib/tlstest/server_reload.sh
+++ b/contrib/tlstest/server_reload.sh
@@ -1,4 +1,4 @@
 #!/bin/bash
 
-../../cmd/dgraph/dgraph -tls.on -tls.ca_certs ca.crt -tls.cert server_reload.crt -tls.cert_key server_reload.key \
+../../cmd/dgraph/dgraph -tls_on -tls_ca_certs ca.crt -tls_cert server_reload.crt -tls_cert_key server_reload.key \
 	--memory_mb 2048 --zero 127.0.0.1:8888
diff --git a/dgraph/cmd/bulk/run.go b/dgraph/cmd/bulk/run.go
index e4d18ca64735b98c55c15fa057ed366759364a84..6853ccc2af0c393daa479e56e251830045664f04 100644
--- a/dgraph/cmd/bulk/run.go
+++ b/dgraph/cmd/bulk/run.go
@@ -33,63 +33,80 @@ import (
 	"github.com/spf13/cobra"
 )
 
-var opt options
+var Bulk x.SubCommand
 
 func init() {
-	flag := BulkCmd.Flags()
-	flag.StringVarP(&opt.RDFDir, "rdfs", "r", "",
+	Bulk.Cmd = &cobra.Command{
+		Use:   "bulk",
+		Short: "Run Dgraph bulk loader",
+		Run: func(cmd *cobra.Command, args []string) {
+			defer x.StartProfile(Bulk.Conf).Stop()
+			run()
+		},
+	}
+	Bulk.EnvPrefix = "DGRAPH_BULK"
+
+	flag := Bulk.Cmd.Flags()
+	flag.StringP("rdfs", "r", "",
 		"Directory containing *.rdf or *.rdf.gz files to load.")
-	flag.StringVarP(&opt.SchemaFile, "schema_file", "s", "",
+	flag.StringP("schema_file", "s", "",
 		"Location of schema file to load.")
-	flag.StringVar(&opt.DgraphsDir, "out", "out",
+	flag.String("out", "out",
 		"Location to write the final dgraph data directories.")
-	flag.StringVar(&opt.TmpDir, "tmp", "tmp",
+	flag.String("tmp", "tmp",
 		"Temp directory used to use for on-disk scratch space. Requires free space proportional"+
 			" to the size of the RDF file and the amount of indexing used.")
-	flag.IntVarP(&opt.NumGoroutines, "num_go_routines", "j", runtime.NumCPU(),
+	flag.IntP("num_go_routines", "j", runtime.NumCPU(),
 		"Number of worker threads to use (defaults to the number of logical CPUs)")
-	flag.Int64Var(&opt.MapBufSize, "mapoutput_mb", 64,
+	flag.Int64("mapoutput_mb", 64,
 		"The estimated size of each map file output. Increasing this increases memory usage.")
-	// TODO: Potentially move http server to main.
-	flag.StringVar(&opt.HttpAddr, "http", "localhost:8080",
-		"Address to serve http (pprof).")
-	flag.BoolVar(&opt.SkipMapPhase, "skip_map_phase", false,
+	flag.Bool("expand_edges", true,
+		"Generate edges that allow nodes to be expanded using _predicate_ or expand(...). "+
+			"Disable to increase loading speed.")
+	flag.Bool("skip_map_phase", false,
 		"Skip the map phase (assumes that map output files already exist).")
-	flag.BoolVar(&opt.CleanupTmp, "cleanup_tmp", true,
+	flag.Bool("cleanup_tmp", true,
 		"Clean up the tmp directory after the loader finishes. Setting this to false allows the"+
 			" bulk loader can be re-run while skipping the map phase.")
-	flag.BoolVar(&opt.ExpandEdges, "expand_edges", true,
-		"Generate edges that allow nodes to be expanded using _predicate_ or expand(...). "+
-			"Disable to increase loading speed.")
-	flag.IntVar(&opt.NumShufflers, "shufflers", 1,
+	flag.Int("shufflers", 1,
 		"Number of shufflers to run concurrently. Increasing this can improve performance, and "+
 			"must be less than or equal to the number of reduce shards.")
-	flag.IntVar(&opt.MapShards, "map_shards", 1,
+	flag.Bool("version", false, "Prints the version of dgraph-bulk-loader.")
+	flag.BoolP("store_xids", "x", false, "Generate an xid edge for each node.")
+	flag.StringP("zero_addr", "z", "localhost:8888", "gRPC address for dgraphzero")
+	// TODO: Potentially move http server to main.
+	flag.String("http", "localhost:8080",
+		"Address to serve http (pprof).")
+	flag.Int("map_shards", 1,
 		"Number of map output shards. Must be greater than or equal to the number of reduce "+
 			"shards. Increasing allows more evenly sized reduce shards, at the expense of "+
 			"increased memory usage.")
-	flag.IntVar(&opt.ReduceShards, "reduce_shards", 1,
+	flag.Int("reduce_shards", 1,
 		"Number of reduce shards. This determines the number of dgraph instances in the final "+
 			"cluster. Increasing this potentially decreases the reduce stage runtime by using "+
 			"more parallelism, but increases memory usage.")
-	flag.BoolVar(&opt.Version, "version", false, "Prints the version of dgraph-bulk-loader.")
-	flag.BoolVarP(&opt.StoreXids, "store_xids", "x", false, "Generate an xid edge for each node.")
-	flag.StringVarP(&opt.ZeroAddr, "zero_addr", "z", "localhost:8888", "gRPC address for dgraphzero")
-}
-
-var BulkCmd = &cobra.Command{
-	Use:   "bulk",
-	Short: "Run Dgraph bulk loader",
-	Run: func(cmd *cobra.Command, args []string) {
-		if len(args) > 0 {
-			fmt.Fprintf(os.Stderr, "No free args allowed, but got: %v\n", args)
-			os.Exit(1)
-		}
-		run()
-	},
 }
 
 func run() {
+	opt := options{
+		RDFDir:        Bulk.Conf.GetString("rdfs"),
+		SchemaFile:    Bulk.Conf.GetString("schema_file"),
+		DgraphsDir:    Bulk.Conf.GetString("out"),
+		TmpDir:        Bulk.Conf.GetString("tmp"),
+		NumGoroutines: Bulk.Conf.GetInt("num_go_routines"),
+		MapBufSize:    int64(Bulk.Conf.GetInt("mapoutput_mb")),
+		ExpandEdges:   Bulk.Conf.GetBool("expand_edges"),
+		SkipMapPhase:  Bulk.Conf.GetBool("skip_map_phase"),
+		CleanupTmp:    Bulk.Conf.GetBool("cleanup_tmp"),
+		NumShufflers:  Bulk.Conf.GetInt("shufflers"),
+		Version:       Bulk.Conf.GetBool("version"),
+		StoreXids:     Bulk.Conf.GetBool("store_xids"),
+		ZeroAddr:      Bulk.Conf.GetString("zero_addr"),
+		HttpAddr:      Bulk.Conf.GetString("http"),
+		MapShards:     Bulk.Conf.GetInt("map_shards"),
+		ReduceShards:  Bulk.Conf.GetInt("reduce_shards"),
+	}
+
 	if opt.Version {
 		x.PrintVersionOnly()
 	}
diff --git a/dgraph/cmd/live/run.go b/dgraph/cmd/live/run.go
index fc9f36e9ede8ae3946f20037545012fe916ad9fb..e323e328f6a087b08390eecdba3e28ebbbce2898 100644
--- a/dgraph/cmd/live/run.go
+++ b/dgraph/cmd/live/run.go
@@ -65,25 +65,37 @@ type options struct {
 var opt options
 var tlsConf x.TLSHelperConfig
 
+var Live x.SubCommand
+
 func init() {
-	flag := LiveCmd.Flags()
-	flag.StringVarP(&opt.files, "rdfs", "r", "", "Location of rdf files to load")
-	flag.StringVarP(&opt.schemaFile, "schema", "s", "", "Location of schema file")
-	flag.StringVarP(&opt.dgraph, "dgraph", "d", "127.0.0.1:9080", "Dgraph gRPC server address")
-	flag.StringVarP(&opt.zero, "zero", "z", "127.0.0.1:7080", "Dgraphzero gRPC server address")
-	flag.StringVarP(&opt.clientDir, "xidmap", "x", "x", "Directory to store xid to uid mapping")
-	flag.IntVarP(&opt.concurrent, "conc", "c", 1,
+	Live.Cmd = &cobra.Command{
+		Use:   "live",
+		Short: "Run Dgraph live loader",
+		Run: func(cmd *cobra.Command, args []string) {
+			defer x.StartProfile(Live.Conf).Stop()
+			run()
+		},
+	}
+	Live.EnvPrefix = "DGRAPH_LIVE"
+
+	flag := Live.Cmd.Flags()
+	flag.StringP("rdfs", "r", "", "Location of rdf files to load")
+	flag.StringP("schema", "s", "", "Location of schema file")
+	flag.StringP("dgraph", "d", "127.0.0.1:9080", "Dgraph gRPC server address")
+	flag.StringP("zero", "z", "127.0.0.1:7080", "Dgraphzero gRPC server address")
+	flag.IntP("conc", "c", 1,
 		"Number of concurrent requests to make to Dgraph")
-	flag.IntVarP(&opt.numRdf, "batch", "b", 10000,
+	flag.IntP("batch", "b", 10000,
 		"Number of RDF N-Quads to send as part of a mutation.")
-	flag.BoolVarP(&opt.ignoreIndexConflict, "ignore_index_conflict", "i", true,
+	flag.StringP("xidmap", "x", "x", "Directory to store xid to uid mapping")
+	flag.BoolP("ignore_index_conflict", "i", true,
 		"Ignores conflicts on index keys during transaction")
 
 	// TLS configuration
-	x.SetTLSFlags(&tlsConf, flag)
-	flag.BoolVar(&tlsConf.Insecure, "tls.insecure", false, "Skip certificate validation (insecure)")
-	flag.StringVar(&tlsConf.RootCACerts, "tls.ca_certs", "", "CA Certs file path.")
-	flag.StringVar(&tlsConf.ServerName, "tls.server_name", "", "Server name.")
+	x.RegisterTLSFlags(flag)
+	flag.Bool("tls_insecure", false, "Skip certificate validation (insecure)")
+	flag.String("tls_ca_certs", "", "CA Certs file path.")
+	flag.String("tls_server_name", "", "Server name.")
 }
 
 // Reads a single line from a buffered reader. The line is read into the
@@ -303,15 +315,22 @@ func setup(opts batchMutationOptions, dc *client.Dgraph) *loader {
 	return l
 }
 
-var LiveCmd = &cobra.Command{
-	Use:   "live",
-	Short: "Run Dgraph live loader",
-	Run: func(cmd *cobra.Command, args []string) {
-		run()
-	},
-}
-
 func run() {
+	opt = options{
+		files:               Live.Conf.GetString("rdfs"),
+		schemaFile:          Live.Conf.GetString("schema"),
+		dgraph:              Live.Conf.GetString("dgraph"),
+		zero:                Live.Conf.GetString("zero"),
+		concurrent:          Live.Conf.GetInt("conc"),
+		numRdf:              Live.Conf.GetInt("batch"),
+		clientDir:           Live.Conf.GetString("xidmap"),
+		ignoreIndexConflict: Live.Conf.GetBool("ignore_index_conflict"),
+	}
+	x.LoadTLSConfig(&tlsConf, Live.Conf)
+	tlsConf.Insecure = Live.Conf.GetBool("tls_insecure")
+	tlsConf.RootCACerts = Live.Conf.GetString("tls_ca_certs")
+	tlsConf.ServerName = Live.Conf.GetString("tls_server_name")
+
 	go http.ListenAndServe("localhost:6060", nil)
 	ctx := context.Background()
 	bmOpts := batchMutationOptions{
diff --git a/dgraph/cmd/root.go b/dgraph/cmd/root.go
index ffce971a3bfb26525f894ca73f47c8eb9f2cfc03..7063574c57d061c69e0d2630955ab3f83cb48e0e 100644
--- a/dgraph/cmd/root.go
+++ b/dgraph/cmd/root.go
@@ -20,21 +20,16 @@ package cmd
 import (
 	"fmt"
 	"os"
-	"runtime"
 
 	"github.com/dgraph-io/dgraph/dgraph/cmd/bulk"
 	"github.com/dgraph-io/dgraph/dgraph/cmd/live"
 	"github.com/dgraph-io/dgraph/dgraph/cmd/server"
 	"github.com/dgraph-io/dgraph/dgraph/cmd/zero"
 	"github.com/dgraph-io/dgraph/x"
-	"github.com/pkg/profile"
 	"github.com/spf13/cobra"
+	"github.com/spf13/viper"
 )
 
-var profileMode string
-var blockRate int
-var version bool
-
 // RootCmd represents the base command when called without any subcommands
 var RootCmd = &cobra.Command{
 	Use:   "dgraph",
@@ -47,43 +42,49 @@ graph database, it tightly controls how the data is arranged on disk to optimize
 for query performance and throughput, reducing disk seeks and network calls in a
 cluster.
 ` + x.BuildDetails(),
+	PersistentPreRunE: cobra.NoArgs,
 }
 
 // Execute adds all child commands to the root command and sets flags appropriately.
 // This is called by main.main(). It only needs to happen once to the rootCmd.
 func Execute() {
-	// TODO: Fix me, Flags are parsed in RootCmd.Execute
-	runtime.SetBlockProfileRate(blockRate)
-	switch profileMode {
-	case "cpu":
-		defer profile.Start(profile.CPUProfile).Stop()
-	case "mem":
-		defer profile.Start(profile.MemProfile).Stop()
-	case "mutex":
-		defer profile.Start(profile.MutexProfile).Stop()
-	case "block":
-		defer profile.Start(profile.BlockProfile).Stop()
-	case "":
-		// do nothing
-	default:
-		fmt.Printf("Invalid profile mode: %q\n", profileMode)
-		os.Exit(1)
-	}
 	if err := RootCmd.Execute(); err != nil {
 		fmt.Println(err)
 		os.Exit(1)
 	}
 }
 
-func init() {
-	cobra.OnInitialize()
-	RootCmd.AddCommand(bulk.BulkCmd)
-	RootCmd.AddCommand(live.LiveCmd)
-	RootCmd.AddCommand(server.ServerCmd)
-	RootCmd.AddCommand(zero.ZeroCmd)
+var rootConf = viper.New()
 
-	RootCmd.PersistentFlags().StringVar(&profileMode, "profile.mode", "",
+func init() {
+	RootCmd.PersistentFlags().String("profile_mode", "",
 		"Enable profiling mode, one of [cpu, mem, mutex, block]")
-	RootCmd.PersistentFlags().IntVar(&blockRate, "block.rate", 0,
-		"Block profiling rate. Must be used along with block profile.mode")
+	RootCmd.PersistentFlags().Int("block_rate", 0,
+		"Block profiling rate. Must be used along with block profile_mode")
+	RootCmd.PersistentFlags().String("config", "",
+		"Configuration file. Takes precedence over default values, but is "+
+			"overridden to values set with environment variables and flags.")
+	rootConf.BindPFlags(RootCmd.PersistentFlags())
+
+	var subcommands = []*x.SubCommand{
+		&bulk.Bulk, &live.Live, &server.Server, &zero.Zero,
+	}
+	for _, sc := range subcommands {
+		RootCmd.AddCommand(sc.Cmd)
+		sc.Conf = viper.New()
+		sc.Conf.BindPFlags(sc.Cmd.Flags())
+		sc.Conf.BindPFlags(RootCmd.PersistentFlags())
+		sc.Conf.AutomaticEnv()
+		sc.Conf.SetEnvPrefix(sc.EnvPrefix)
+	}
+	cobra.OnInitialize(func() {
+		cfg := rootConf.GetString("config")
+		if cfg == "" {
+			return
+		}
+		for _, sc := range subcommands {
+			sc.Conf.SetConfigFile(cfg)
+			x.Check(x.Wrapf(sc.Conf.ReadInConfig(), "reading config"))
+		}
+	})
 }
diff --git a/dgraph/cmd/server/run.go b/dgraph/cmd/server/run.go
index f105734893eca3c27baae4b835248469f07b3297..ddb9547a7b63f496da9016cfdd318b66417ec402 100644
--- a/dgraph/cmd/server/run.go
+++ b/dgraph/cmd/server/run.go
@@ -47,91 +47,90 @@ import (
 )
 
 var (
-	bindall          bool
-	exposeTrace      bool
-	customTokenizers string
-	config           edgraph.Options
-	tlsConf          x.TLSHelperConfig
-	uiDir            string
+	bindall bool
+	config  edgraph.Options
+	tlsConf x.TLSHelperConfig
+	uiDir   string
 )
 
+var Server x.SubCommand
+
 func init() {
+	Server.Cmd = &cobra.Command{
+		Use:   "server",
+		Short: "Run Dgraph data server",
+		Long:  "Run Dgraph data server",
+		Run: func(cmd *cobra.Command, args []string) {
+			defer x.StartProfile(Server.Conf).Stop()
+			run()
+		},
+	}
+	Server.EnvPrefix = "DGRAPH_SERVER"
+
 	defaults := edgraph.DefaultConfig
-	flag := ServerCmd.Flags()
-	flag.StringVarP(&config.PostingDir, "postings", "p", defaults.PostingDir,
+	flag := Server.Cmd.Flags()
+	flag.StringP("postings", "p", defaults.PostingDir,
 		"Directory to store posting lists.")
-	flag.StringVar(&config.PostingTables, "posting_tables", defaults.PostingTables,
+	flag.String("posting_tables", defaults.PostingTables,
 		"Specifies how Badger LSM tree is stored. Options are loadtoram, memorymap and "+
 			"fileio; which consume most to least RAM while providing best to worst "+
 			"performance respectively.")
-	flag.StringVarP(&config.WALDir, "wal", "w", defaults.WALDir,
+	flag.StringP("wal", "w", defaults.WALDir,
 		"Directory to store raft write-ahead logs.")
-	flag.BoolVar(&config.Nomutations, "nomutations", defaults.Nomutations,
+	flag.Bool("nomutations", defaults.Nomutations,
 		"Don't allow mutations on this server.")
 
-	flag.StringVar(&config.ExportPath, "export", defaults.ExportPath,
+	flag.String("export", defaults.ExportPath,
 		"Folder in which to store exports.")
-	flag.IntVar(&config.NumPendingProposals, "pending_proposals", defaults.NumPendingProposals,
+	flag.Int("pending_proposals", defaults.NumPendingProposals,
 		"Number of pending mutation proposals. Useful for rate limiting.")
-	flag.Float64Var(&config.Tracing, "trace", defaults.Tracing,
+	flag.Float64("trace", defaults.Tracing,
 		"The ratio of queries to trace.")
-	flag.StringVar(&config.MyAddr, "my", defaults.MyAddr,
+	flag.String("my", defaults.MyAddr,
 		"IP_ADDRESS:PORT of this server, so other Dgraph servers can talk to this.")
-	flag.StringVar(&config.ZeroAddr, "zero", defaults.ZeroAddr,
+	flag.String("zero", defaults.ZeroAddr,
 		"IP_ADDRESS:PORT of Dgraph zero.")
-	flag.Uint64Var(&config.RaftId, "idx", 0,
+	flag.Uint64("idx", 0,
 		"Optional Raft ID that this server will use to join RAFT groups.")
-	flag.Uint64Var(&config.MaxPendingCount, "sc", defaults.MaxPendingCount,
+	flag.Uint64("sc", defaults.MaxPendingCount,
 		"Max number of pending entries in wal after which snapshot is taken")
-	flag.BoolVar(&config.ExpandEdge, "expand_edge", defaults.ExpandEdge,
+	flag.Bool("expand_edge", defaults.ExpandEdge,
 		"Enables the expand() feature. This is very expensive for large data loads because it"+
 			" doubles the number of mutations going on in the system.")
 
-	flag.Float64Var(&config.AllottedMemory, "memory_mb", defaults.AllottedMemory,
+	flag.Float64("memory_mb", defaults.AllottedMemory,
 		"Estimated memory the process can take. "+
 			"Actual usage would be slightly more than specified here.")
 
-	flag.StringVar(&config.ConfigFile, "config", defaults.ConfigFile,
-		"YAML configuration file containing dgraph settings.")
-	flag.BoolVar(&config.DebugMode, "debugmode", defaults.DebugMode,
+	flag.Bool("debugmode", defaults.DebugMode,
 		"enable debug mode for more debug information")
 
 	// Useful for running multiple servers on the same machine.
-	flag.IntVarP(&x.Config.PortOffset, "port_offset", "o", 0,
+	flag.IntP("port_offset", "o", 0,
 		"Value added to all listening port numbers. [Internal=7080, HTTP=8080, Grpc=9080]")
 
-	flag.BoolVar(&bindall, "bindall", true,
+	flag.Bool("bindall", true,
 		"Use 0.0.0.0 instead of localhost to bind to all addresses on local machine.")
-	flag.BoolVar(&exposeTrace, "expose_trace", false,
+	flag.Bool("expose_trace", false,
 		"Allow trace endpoint to be accessible from remote")
 
 	// TLS configurations
-	x.SetTLSFlags(&tlsConf, flag)
-	flag.StringVar(&tlsConf.ClientAuth, "tls.client_auth", "", "Enable TLS client authentication")
-	flag.StringVar(&tlsConf.ClientCACerts, "tls.ca_certs", "", "CA Certs file path.")
+	x.RegisterTLSFlags(flag)
+	flag.String("tls_client_auth", "", "Enable TLS client authentication")
+	flag.String("tls_ca_certs", "", "CA Certs file path.")
 	tlsConf.ConfigType = x.TLSServerConfig
 
 	//Custom plugins.
-	flag.StringVar(&customTokenizers, "custom_tokenizers", "",
+	flag.String("custom_tokenizers", "",
 		"Comma separated list of tokenizer plugins")
 
 	// UI assets dir
-	flag.StringVar(&uiDir, "ui", "/usr/local/share/dgraph/assets",
+	flag.String("ui", "/usr/local/share/dgraph/assets",
 		"Directory which contains assets for the user interface")
-
-	// Read from config file before setting config.
-	if config.ConfigFile != "" {
-		x.Println("Loading configuration from file:", config.ConfigFile)
-		x.LoadConfigFromYAML(config.ConfigFile)
-	}
-	// Lets check version flag before we SetConfiguration because we validate AllottedMemory in
-	// SetConfiguration.
-	if x.Config.Version {
-		x.PrintVersionOnly()
-	}
 }
 
 func setupCustomTokenizers() {
+	customTokenizers := Server.Conf.GetString("custom_tokenizers")
 	if customTokenizers == "" {
 		return
 	}
@@ -286,16 +285,30 @@ func setupServer() {
 
 var sdCh chan os.Signal
 
-var ServerCmd = &cobra.Command{
-	Use:   "server",
-	Short: "Run Dgraph data server",
-	Long:  "Run Dgraph data server",
-	Run: func(cmd *cobra.Command, args []string) {
-		run()
-	},
-}
-
 func run() {
+	config := edgraph.Options{
+		PostingDir:          Server.Conf.GetString("postings"),
+		PostingTables:       Server.Conf.GetString("posting_tables"),
+		WALDir:              Server.Conf.GetString("wal"),
+		Nomutations:         Server.Conf.GetBool("nomutations"),
+		AllottedMemory:      Server.Conf.GetFloat64("memory_mb"),
+		ExportPath:          Server.Conf.GetString("export"),
+		NumPendingProposals: Server.Conf.GetInt("pending_proposals"),
+		Tracing:             Server.Conf.GetFloat64("trace"),
+		MyAddr:              Server.Conf.GetString("my"),
+		ZeroAddr:            Server.Conf.GetString("zero"),
+		RaftId:              uint64(Server.Conf.GetInt("idx")),
+		MaxPendingCount:     uint64(Server.Conf.GetInt("sc")),
+		ExpandEdge:          Server.Conf.GetBool("expand_edge"),
+		DebugMode:           Server.Conf.GetBool("debugmode"),
+	}
+	x.Config.PortOffset = Server.Conf.GetInt("port_offset")
+	bindall = Server.Conf.GetBool("bindall")
+	x.LoadTLSConfig(&tlsConf, Server.Conf)
+	tlsConf.ClientAuth = Server.Conf.GetString("tls_client_auth")
+	tlsConf.ClientCACerts = Server.Conf.GetString("tls_ca_certs")
+	uiDir = Server.Conf.GetString("ui")
+
 	edgraph.SetConfiguration(config)
 	setupCustomTokenizers()
 	x.Init(edgraph.Config.DebugMode)
@@ -305,7 +318,7 @@ func run() {
 		x.Check(edgraph.State.Dispose())
 	}()
 
-	if exposeTrace {
+	if Server.Conf.GetBool("expose_trace") {
 		trace.AuthRequest = func(req *http.Request) (any, sensitive bool) {
 			return true, true
 		}
diff --git a/dgraph/cmd/zero/run.go b/dgraph/cmd/zero/run.go
index e4ab94fbb986bed050e79751b5364ea3a09d1a57..00994c15d8bb7a6ae0f816ae9101269843e29e4d 100644
--- a/dgraph/cmd/zero/run.go
+++ b/dgraph/cmd/zero/run.go
@@ -54,19 +54,36 @@ type options struct {
 
 var opts options
 
+var Zero x.SubCommand
+
 func init() {
-	flag := ZeroCmd.Flags()
-	flag.BoolVar(&opts.bindall, "bindall", true,
+	Zero.Cmd = &cobra.Command{
+		Use:   "zero",
+		Short: "Run Dgraph zero server",
+		Long: `
+A Dgraph zero instance manages the Dgraph cluster.  Typically, a single Zero
+instance is sufficient for the cluster; however, one can run multiple Zero
+instances to achieve high-availability.
+`,
+		Run: func(cmd *cobra.Command, args []string) {
+			defer x.StartProfile(Zero.Conf).Stop()
+			run()
+		},
+	}
+	Zero.EnvPrefix = "DGRAPH_ZERO"
+
+	flag := Zero.Cmd.Flags()
+	flag.Bool("bindall", true,
 		"Use 0.0.0.0 instead of localhost to bind to all addresses on local machine.")
-	flag.StringVar(&opts.myAddr, "my", "",
+	flag.String("my", "",
 		"addr:port of this server, so other Dgraph servers can talk to this.")
-	flag.IntVarP(&opts.portOffset, "port_offset", "o", 0,
+	flag.IntP("port_offset", "o", 0,
 		"Value added to all listening port numbers. [Grpc=7080, HTTP=8080]")
-	flag.Uint64Var(&opts.nodeId, "idx", 1, "Unique node index for this server.")
-	flag.IntVar(&opts.numReplicas, "replicas", 1, "How many replicas to run per data shard."+
+	flag.Uint64("idx", 1, "Unique node index for this server.")
+	flag.Int("replicas", 1, "How many replicas to run per data shard."+
 		" The count includes the original shard.")
-	flag.StringVar(&opts.peer, "peer", "", "Address of another dgraphzero server.")
-	flag.StringVarP(&opts.w, "wal", "w", "zw", "Directory storing WAL.")
+	flag.String("peer", "", "Address of another dgraphzero server.")
+	flag.StringP("wal", "w", "zw", "Directory storing WAL.")
 }
 
 func setupListener(addr string, port int) (listener net.Listener, err error) {
@@ -185,20 +202,17 @@ func (st *state) getState(w http.ResponseWriter, r *http.Request) {
 	}
 }
 
-var ZeroCmd = &cobra.Command{
-	Use:   "zero",
-	Short: "Run Dgraph zero server",
-	Long: `
-A Dgraph zero instance manages the Dgraph cluster.  Typically, a single Zero
-instance is sufficient for the cluster; however, one can run multiple Zero
-instances to achieve high-availability.
-`,
-	Run: func(cmd *cobra.Command, args []string) {
-		run()
-	},
-}
-
 func run() {
+	opts = options{
+		bindall:     Zero.Conf.GetBool("bindall"),
+		myAddr:      Zero.Conf.GetString("my"),
+		portOffset:  Zero.Conf.GetInt("port_offset"),
+		nodeId:      uint64(Zero.Conf.GetInt("idx")),
+		numReplicas: Zero.Conf.GetInt("replicas"),
+		peer:        Zero.Conf.GetString("peer"),
+		w:           Zero.Conf.GetString("wal"),
+	}
+
 	grpc.EnableTracing = false
 
 	addr := "localhost"
diff --git a/edgraph/config.go b/edgraph/config.go
index 36357f97a966561380fc008990399bec19cee046..131b5a94e8b5389b1d3e5a3ae4c20a748aa59836 100644
--- a/edgraph/config.go
+++ b/edgraph/config.go
@@ -42,8 +42,7 @@ type Options struct {
 	MaxPendingCount     uint64
 	ExpandEdge          bool
 
-	ConfigFile string
-	DebugMode  bool
+	DebugMode bool
 }
 
 // TODO(tzdybal) - remove global
@@ -66,8 +65,7 @@ var DefaultConfig = Options{
 	MaxPendingCount:     1000,
 	ExpandEdge:          true,
 
-	ConfigFile: "",
-	DebugMode:  false,
+	DebugMode: false,
 }
 
 // Sometimes users use config.yaml flag so /debug/vars doesn't have information about the
@@ -130,7 +128,6 @@ func SetConfiguration(newConfig Options) {
 	worker.Config.MaxPendingCount = Config.MaxPendingCount
 	worker.Config.ExpandEdge = Config.ExpandEdge
 
-	x.Config.ConfigFile = Config.ConfigFile
 	x.Config.DebugMode = Config.DebugMode
 }
 
diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS
new file mode 100644
index 0000000000000000000000000000000000000000..0a5bf8f617ab2ac33186e24f07ad9714cc206982
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS
@@ -0,0 +1,46 @@
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# You can update this list using the following command:
+#
+#   $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
+
+# Please keep the list sorted.
+
+Adrien Bustany <adrien@bustany.org>
+Amit Krishnan <amit.krishnan@oracle.com>
+Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+Bruno Bigras <bigras.bruno@gmail.com>
+Caleb Spare <cespare@gmail.com>
+Case Nelson <case@teammating.com>
+Chris Howey <chris@howey.me> <howeyc@gmail.com>
+Christoffer Buchholz <christoffer.buchholz@gmail.com>
+Daniel Wagner-Hall <dawagner@gmail.com>
+Dave Cheney <dave@cheney.net>
+Evan Phoenix <evan@fallingsnow.net>
+Francisco Souza <f@souza.cc>
+Hari haran <hariharan.uno@gmail.com>
+John C Barstow
+Kelvin Fo <vmirage@gmail.com>
+Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
+Matt Layher <mdlayher@gmail.com>
+Nathan Youngman <git@nathany.com>
+Patrick <patrick@dropbox.com>
+Paul Hammond <paul@paulhammond.org>
+Pawel Knap <pawelknap88@gmail.com>
+Pieter Droogendijk <pieter@binky.org.uk>
+Pursuit92 <JoshChase@techpursuit.net>
+Riku Voipio <riku.voipio@linaro.org>
+Rob Figueiredo <robfig@gmail.com>
+Slawek Ligus <root@ooz.ie>
+Soge Zhang <zhssoge@gmail.com>
+Tiffany Jernigan <tiffany.jernigan@intel.com>
+Tilak Sharma <tilaks@google.com>
+Travis Cline <travis.cline@gmail.com>
+Tudor Golubenco <tudor.g@gmail.com>
+Yukang <moorekang@gmail.com>
+bronze1man <bronze1man@gmail.com>
+debrando <denis.brandolini@gmail.com>
+henrikedwards <henrik.edwards@gmail.com>
+铁哥 <guotie.9@gmail.com>
diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..8c732c1d85ca4e8b6b9e3ac719c84592ad9abbc2
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
@@ -0,0 +1,307 @@
+# Changelog
+
+## v1.4.2 / 2016-10-10
+
+* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
+
+## v1.4.1 / 2016-10-04
+
+* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
+
+## v1.4.0 / 2016-10-01
+
+* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
+
+## v1.3.1 / 2016-06-28
+
+* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
+
+## v1.3.0 / 2016-04-19
+
+* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
+
+## v1.2.10 / 2016-03-02
+
+* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
+
+## v1.2.9 / 2016-01-13
+
+kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
+
+## v1.2.8 / 2015-12-17
+
+* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
+* inotify: fix race in test
+* enable race detection for continuous integration (Linux, Mac, Windows)
+
+## v1.2.5 / 2015-10-17
+
+* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
+* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
+* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
+* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
+
+## v1.2.1 / 2015-10-14
+
+* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
+
+## v1.2.0 / 2015-02-08
+
+* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
+* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
+* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
+
+## v1.1.1 / 2015-02-05
+
+* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
+
+## v1.1.0 / 2014-12-12
+
+* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
+    * add low-level functions
+    * only need to store flags on directories
+    * less mutexes [#13](https://github.com/fsnotify/fsnotify/issues/13)
+    * done can be an unbuffered channel
+    * remove calls to os.NewSyscallError
+* More efficient string concatenation for Event.String() [#52](https://github.com/fsnotify/fsnotify/pull/52) (thanks @mdlayher)
+* kqueue: fix regression in  rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v1.0.4 / 2014-09-07
+
+* kqueue: add dragonfly to the build tags.
+* Rename source code files, rearrange code so exported APIs are at the top.
+* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
+
+## v1.0.3 / 2014-08-19
+
+* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
+
+## v1.0.2 / 2014-08-17
+
+* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+* [Fix] Make ./path and path equivalent. (thanks @zhsso)
+
+## v1.0.0 / 2014-08-15
+
+* [API] Remove AddWatch on Windows, use Add.
+* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
+* Minor updates based on feedback from golint.
+
+## dev / 2014-07-09
+
+* Moved to [github.com/fsnotify/fsnotify](https://github.com/fsnotify/fsnotify).
+* Use os.NewSyscallError instead of returning errno (thanks @hariharan-uno)
+
+## dev / 2014-07-04
+
+* kqueue: fix incorrect mutex used in Close()
+* Update example to demonstrate usage of Op.
+
+## dev / 2014-06-28
+
+* [API] Don't set the Write Op for attribute notifications [#4](https://github.com/fsnotify/fsnotify/issues/4)
+* Fix for String() method on Event (thanks Alex Brainman)
+* Don't build on Plan 9 or Solaris (thanks @4ad)
+
+## dev / 2014-06-21
+
+* Events channel of type Event rather than *Event.
+* [internal] use syscall constants directly for inotify and kqueue.
+* [internal] kqueue: rename events to kevents and fileEvent to event.
+
+## dev / 2014-06-19
+
+* Go 1.3+ required on Windows (uses syscall.ERROR_MORE_DATA internally).
+* [internal] remove cookie from Event struct (unused).
+* [internal] Event struct has the same definition across every OS.
+* [internal] remove internal watch and removeWatch methods.
+
+## dev / 2014-06-12
+
+* [API] Renamed Watch() to Add() and RemoveWatch() to Remove().
+* [API] Pluralized channel names: Events and Errors.
+* [API] Renamed FileEvent struct to Event.
+* [API] Op constants replace methods like IsCreate().
+
+## dev / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## dev / 2014-05-23
+
+* [API] Remove current implementation of WatchFlags.
+    * current implementation doesn't take advantage of OS for efficiency
+    * provides little benefit over filtering events as they are received, but has  extra bookkeeping and mutexes
+    * no tests for the current implementation
+    * not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
+
+## v0.9.3 / 2014-12-31
+
+* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
+
+## v0.9.2 / 2014-08-17
+
+* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
+
+## v0.9.1 / 2014-06-12
+
+* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
+
+## v0.9.0 / 2014-01-17
+
+* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
+* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
+* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
+
+## v0.8.12 / 2013-11-13
+
+* [API] Remove FD_SET and friends from Linux adapter
+
+## v0.8.11 / 2013-11-02
+
+* [Doc] Add Changelog [#72][] (thanks @nathany)
+* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
+
+## v0.8.10 / 2013-10-19
+
+* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
+* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
+* [Doc] specify OS-specific limits in README (thanks @debrando)
+
+## v0.8.9 / 2013-09-08
+
+* [Doc] Contributing (thanks @nathany)
+* [Doc] update package path in example code [#63][] (thanks @paulhammond)
+* [Doc] GoCI badge in README (Linux only) [#60][]
+* [Doc] Cross-platform testing with Vagrant  [#59][] (thanks @nathany)
+
+## v0.8.8 / 2013-06-17
+
+* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
+
+## v0.8.7 / 2013-06-03
+
+* [API] Make syscall flags internal
+* [Fix] inotify: ignore event changes
+* [Fix] race in symlink test [#45][] (reported by @srid)
+* [Fix] tests on Windows
+* lower case error messages
+
+## v0.8.6 / 2013-05-23
+
+* kqueue: Use EVT_ONLY flag on Darwin
+* [Doc] Update README with full example
+
+## v0.8.5 / 2013-05-09
+
+* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
+
+## v0.8.4 / 2013-04-07
+
+* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
+
+## v0.8.3 / 2013-03-13
+
+* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
+* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
+
+## v0.8.2 / 2013-02-07
+
+* [Doc] add Authors
+* [Fix] fix data races for map access [#29][] (thanks @fsouza)
+
+## v0.8.1 / 2013-01-09
+
+* [Fix] Windows path separators
+* [Doc] BSD License
+
+## v0.8.0 / 2012-11-09
+
+* kqueue: directory watching improvements (thanks @vmirage)
+* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
+* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
+
+## v0.7.4 / 2012-10-09
+
+* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
+* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
+* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
+* [Fix] kqueue: modify after recreation of file
+
+## v0.7.3 / 2012-09-27
+
+* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
+* [Fix] kqueue: no longer get duplicate CREATE events
+
+## v0.7.2 / 2012-09-01
+
+* kqueue: events for created directories
+
+## v0.7.1 / 2012-07-14
+
+* [Fix] for renaming files
+
+## v0.7.0 / 2012-07-02
+
+* [Feature] FSNotify flags
+* [Fix] inotify: Added file name back to event path
+
+## v0.6.0 / 2012-06-06
+
+* kqueue: watch files after directory created (thanks @tmc)
+
+## v0.5.1 / 2012-05-22
+
+* [Fix] inotify: remove all watches before Close()
+
+## v0.5.0 / 2012-05-03
+
+* [API] kqueue: return errors during watch instead of sending over channel
+* kqueue: match symlink behavior on Linux
+* inotify: add `DELETE_SELF` (requested by @taralx)
+* [Fix] kqueue: handle EINTR (reported by @robfig)
+* [Doc] Godoc example [#1][] (thanks @davecheney)
+
+## v0.4.0 / 2012-03-30
+
+* Go 1 released: build with go tool
+* [Feature] Windows support using winfsnotify
+* Windows does not have attribute change notifications
+* Roll attribute notifications into IsModify
+
+## v0.3.0 / 2012-02-19
+
+* kqueue: add files when watch directory
+
+## v0.2.0 / 2011-12-30
+
+* update to latest Go weekly code
+
+## v0.1.0 / 2011-10-19
+
+* kqueue: add watch on file creation to match inotify
+* kqueue: create file event
+* inotify: ignore `IN_IGNORED` events
+* event String()
+* linux: common FileEvent functions
+* initial commit
+
+[#79]: https://github.com/howeyc/fsnotify/pull/79
+[#77]: https://github.com/howeyc/fsnotify/pull/77
+[#72]: https://github.com/howeyc/fsnotify/issues/72
+[#71]: https://github.com/howeyc/fsnotify/issues/71
+[#70]: https://github.com/howeyc/fsnotify/issues/70
+[#63]: https://github.com/howeyc/fsnotify/issues/63
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#60]: https://github.com/howeyc/fsnotify/issues/60
+[#59]: https://github.com/howeyc/fsnotify/issues/59
+[#49]: https://github.com/howeyc/fsnotify/issues/49
+[#45]: https://github.com/howeyc/fsnotify/issues/45
+[#40]: https://github.com/howeyc/fsnotify/issues/40
+[#36]: https://github.com/howeyc/fsnotify/issues/36
+[#33]: https://github.com/howeyc/fsnotify/issues/33
+[#29]: https://github.com/howeyc/fsnotify/issues/29
+[#25]: https://github.com/howeyc/fsnotify/issues/25
+[#24]: https://github.com/howeyc/fsnotify/issues/24
+[#21]: https://github.com/howeyc/fsnotify/issues/21
diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..828a60b24ba265b9dd88b15b0d942ed9e780ed90
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+## Issues
+
+* Request features and report bugs using the [GitHub Issue Tracker](https://github.com/fsnotify/fsnotify/issues).
+* Please indicate the platform you are using fsnotify on.
+* A code example to reproduce the problem is appreciated.
+
+## Pull Requests
+
+### Contributor License Agreement
+
+fsnotify is derived from code in the [golang.org/x/exp](https://godoc.org/golang.org/x/exp) package and it may be included [in the standard library](https://github.com/fsnotify/fsnotify/issues/1) in the future. Therefore fsnotify carries the same [LICENSE](https://github.com/fsnotify/fsnotify/blob/master/LICENSE) as Go. Contributors retain their copyright, so you need to fill out a short form before we can accept your contribution: [Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual).
+
+Please indicate that you have signed the CLA in your pull request.
+
+### How fsnotify is Developed
+
+* Development is done on feature branches.
+* Tests are run on BSD, Linux, macOS and Windows.
+* Pull requests are reviewed and [applied to master][am] using [hub][].
+  * Maintainers may modify or squash commits rather than asking contributors to.
+* To issue a new release, the maintainers will:
+  * Update the CHANGELOG
+  * Tag a version, which will become available through gopkg.in.
+ 
+### How to Fork
+
+For smooth sailing, always use the original import path. Installing with `go get` makes this easy. 
+
+1. Install from GitHub (`go get -u github.com/fsnotify/fsnotify`)
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Ensure everything works and the tests pass (see below)
+4. Commit your changes (`git commit -am 'Add some feature'`)
+
+Contribute upstream:
+
+1. Fork fsnotify on GitHub
+2. Add your remote (`git remote add fork git@github.com:mycompany/repo.git`)
+3. Push to the branch (`git push fork my-new-feature`)
+4. Create a new Pull Request on GitHub
+
+This workflow is [thoroughly explained by Katrina Owen](https://splice.com/blog/contributing-open-source-git-repositories-go/).
+
+### Testing
+
+fsnotify uses build tags to compile different code on Linux, BSD, macOS, and Windows.
+
+Before doing a pull request, please do your best to test your changes on multiple platforms, and list which platforms you were able/unable to test on.
+
+To aid in cross-platform testing there is a Vagrantfile for Linux and BSD.
+
+* Install [Vagrant](http://www.vagrantup.com/) and [VirtualBox](https://www.virtualbox.org/)
+* Setup [Vagrant Gopher](https://github.com/nathany/vagrant-gopher) in your `src` folder.
+* Run `vagrant up` from the project folder. You can also setup just one box with `vagrant up linux` or `vagrant up bsd` (note: the BSD box doesn't support Windows hosts at this time, and NFS may prompt for your host OS password)
+* Once setup, you can run the test suite on a given OS with a single command `vagrant ssh linux -c 'cd fsnotify/fsnotify; go test'`.
+* When you're done, you will want to halt or destroy the Vagrant boxes.
+
+Notice: fsnotify file system events won't trigger in shared folders. The tests get around this limitation by using the /tmp directory.
+
+Right now there is no equivalent solution for Windows and macOS, but there are Windows VMs [freely available from Microsoft](http://www.modern.ie/en-us/virtualization-tools#downloads).
+
+### Maintainers
+
+Help maintaining fsnotify is welcome. To be a maintainer:
+
+* Submit a pull request and sign the CLA as above.
+* You must be able to run the test suite on Mac, Windows, Linux and BSD.
+
+To keep master clean, the fsnotify project uses the "apply mail" workflow outlined in Nathaniel Talbott's post ["Merge pull request" Considered Harmful][am]. This requires installing [hub][].
+
+All code changes should be internal pull requests.
+
+Releases are tagged using [Semantic Versioning](http://semver.org/).
+
+[hub]: https://github.com/github/hub
+[am]: http://blog.spreedly.com/2014/06/24/merge-pull-request-considered-harmful/#.VGa5yZPF_Zs
diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f21e54080090f80a47395dcacc8245166da32723
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+Copyright (c) 2012 fsnotify Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3993207413a75f9a1b366517b9d054ba4dd61743
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/README.md
@@ -0,0 +1,79 @@
+# File system notifications for Go
+
+[![GoDoc](https://godoc.org/github.com/fsnotify/fsnotify?status.svg)](https://godoc.org/github.com/fsnotify/fsnotify) [![Go Report Card](https://goreportcard.com/badge/github.com/fsnotify/fsnotify)](https://goreportcard.com/report/github.com/fsnotify/fsnotify)
+
+fsnotify utilizes [golang.org/x/sys](https://godoc.org/golang.org/x/sys) rather than `syscall` from the standard library. Ensure you have the latest version installed by running:
+
+```console
+go get -u golang.org/x/sys/...
+```
+
+Cross platform: Windows, Linux, BSD and macOS.
+
+|Adapter   |OS        |Status    |
+|----------|----------|----------|
+|inotify   |Linux 2.6.27 or later, Android\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|kqueue    |BSD, macOS, iOS\*|Supported [![Build Status](https://travis-ci.org/fsnotify/fsnotify.svg?branch=master)](https://travis-ci.org/fsnotify/fsnotify)|
+|ReadDirectoryChangesW|Windows|Supported [![Build status](https://ci.appveyor.com/api/projects/status/ivwjubaih4r0udeh/branch/master?svg=true)](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
+|FSEvents  |macOS         |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
+|FEN       |Solaris 11    |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
+|fanotify  |Linux 2.6.37+ | |
+|USN Journals |Windows    |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
+|Polling   |*All*         |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
+
+\* Android and iOS are untested.
+
+Please see [the documentation](https://godoc.org/github.com/fsnotify/fsnotify) and consult the [FAQ](#faq) for usage information.
+
+## API stability
+
+fsnotify is a fork of [howeyc/fsnotify](https://godoc.org/github.com/howeyc/fsnotify) with a new API as of v1.0. The API is based on [this design document](http://goo.gl/MrYxyA). 
+
+All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based on [Semantic Versioning](http://semver.org/). Further API changes are [planned](https://github.com/fsnotify/fsnotify/milestones), and will be tagged with a new major revision number.
+
+Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
+
+## Contributing
+
+Please refer to [CONTRIBUTING][] before opening an issue or pull request.
+
+## Example
+
+See [example_test.go](https://github.com/fsnotify/fsnotify/blob/master/example_test.go).
+
+## FAQ
+
+**When a file is moved to another directory is it still being watched?**
+
+No (it shouldn't be, unless you are watching where it was moved to).
+
+**When I watch a directory, are all subdirectories watched as well?**
+
+No, you must add watches for any directory you want to watch (a recursive watcher is on the roadmap [#18][]).
+
+**Do I have to watch the Error and Event channels in a separate goroutine?**
+
+As of now, yes. Looking into making this single-thread friendly (see [howeyc #7][#7])
+
+**Why am I receiving multiple events for the same file on OS X?**
+
+Spotlight indexing on OS X can result in multiple events (see [howeyc #62][#62]). A temporary workaround is to add your folder(s) to the *Spotlight Privacy settings* until we have a native FSEvents implementation (see [#11][]).
+
+**How many files can be watched at once?**
+
+There are OS-specific limits as to how many watches can be created:
+* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
+* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
+
+[#62]: https://github.com/howeyc/fsnotify/issues/62
+[#18]: https://github.com/fsnotify/fsnotify/issues/18
+[#11]: https://github.com/fsnotify/fsnotify/issues/11
+[#7]: https://github.com/howeyc/fsnotify/issues/7
+
+[contributing]: https://github.com/fsnotify/fsnotify/blob/master/CONTRIBUTING.md
+
+## Related Projects
+
+* [notify](https://github.com/rjeczalik/notify)
+* [fsevents](https://github.com/fsnotify/fsevents)
+
diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go
new file mode 100644
index 0000000000000000000000000000000000000000..ced39cb881e6836902c6c260162f88407a1392dc
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fen.go
@@ -0,0 +1,37 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package fsnotify
+
+import (
+	"errors"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events chan Event
+	Errors chan error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	return nil, errors.New("FEN based watcher not yet supported for fsnotify\n")
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	return nil
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	return nil
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
new file mode 100644
index 0000000000000000000000000000000000000000..190bf0de575629521ae129b755007ca5cc874845
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go
@@ -0,0 +1,66 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9
+
+// Package fsnotify provides a platform-independent interface for file system notifications.
+package fsnotify
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+)
+
+// Event represents a single file system notification.
+type Event struct {
+	Name string // Relative path to the file or directory.
+	Op   Op     // File operation that triggered the event.
+}
+
+// Op describes a set of file operations.
+type Op uint32
+
+// These are the generalized file operations that can trigger a notification.
+const (
+	Create Op = 1 << iota
+	Write
+	Remove
+	Rename
+	Chmod
+)
+
+func (op Op) String() string {
+	// Use a buffer for efficient string concatenation
+	var buffer bytes.Buffer
+
+	if op&Create == Create {
+		buffer.WriteString("|CREATE")
+	}
+	if op&Remove == Remove {
+		buffer.WriteString("|REMOVE")
+	}
+	if op&Write == Write {
+		buffer.WriteString("|WRITE")
+	}
+	if op&Rename == Rename {
+		buffer.WriteString("|RENAME")
+	}
+	if op&Chmod == Chmod {
+		buffer.WriteString("|CHMOD")
+	}
+	if buffer.Len() == 0 {
+		return ""
+	}
+	return buffer.String()[1:] // Strip leading pipe
+}
+
+// String returns a string representation of the event in the form
+// "file: REMOVE|WRITE|..."
+func (e Event) String() string {
+	return fmt.Sprintf("%q: %s", e.Name, e.Op.String())
+}
+
+// Common errors that can be reported by a watcher
+var ErrEventOverflow = errors.New("fsnotify queue overflow")
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
new file mode 100644
index 0000000000000000000000000000000000000000..d9fd1b88a05f297b4e7324643802274738510031
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -0,0 +1,337 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events   chan Event
+	Errors   chan error
+	mu       sync.Mutex // Map access
+	fd       int
+	poller   *fdPoller
+	watches  map[string]*watch // Map of inotify watches (key: path)
+	paths    map[int]string    // Map of watched paths (key: watch descriptor)
+	done     chan struct{}     // Channel for sending a "quit message" to the reader goroutine
+	doneResp chan struct{}     // Channel to respond to Close
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	// Create inotify fd
+	fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC)
+	if fd == -1 {
+		return nil, errno
+	}
+	// Create epoll
+	poller, err := newFdPoller(fd)
+	if err != nil {
+		unix.Close(fd)
+		return nil, err
+	}
+	w := &Watcher{
+		fd:       fd,
+		poller:   poller,
+		watches:  make(map[string]*watch),
+		paths:    make(map[int]string),
+		Events:   make(chan Event),
+		Errors:   make(chan error),
+		done:     make(chan struct{}),
+		doneResp: make(chan struct{}),
+	}
+
+	go w.readEvents()
+	return w, nil
+}
+
+func (w *Watcher) isClosed() bool {
+	select {
+	case <-w.done:
+		return true
+	default:
+		return false
+	}
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	if w.isClosed() {
+		return nil
+	}
+
+	// Send 'close' signal to goroutine, and set the Watcher to closed.
+	close(w.done)
+
+	// Wake up goroutine
+	w.poller.wake()
+
+	// Wait for goroutine to close
+	<-w.doneResp
+
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	name = filepath.Clean(name)
+	if w.isClosed() {
+		return errors.New("inotify instance already closed")
+	}
+
+	const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM |
+		unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY |
+		unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF
+
+	var flags uint32 = agnosticEvents
+
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	watchEntry := w.watches[name]
+	if watchEntry != nil {
+		flags |= watchEntry.flags | unix.IN_MASK_ADD
+	}
+	wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
+	if wd == -1 {
+		return errno
+	}
+
+	if watchEntry == nil {
+		w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+		w.paths[wd] = name
+	} else {
+		watchEntry.wd = uint32(wd)
+		watchEntry.flags = flags
+	}
+
+	return nil
+}
+
+// Remove stops watching the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	name = filepath.Clean(name)
+
+	// Fetch the watch.
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	watch, ok := w.watches[name]
+
+	// Remove it from inotify.
+	if !ok {
+		return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
+	}
+
+	// We successfully removed the watch if InotifyRmWatch doesn't return an
+	// error, we need to clean up our internal state to ensure it matches
+	// inotify's kernel state.
+	delete(w.paths, int(watch.wd))
+	delete(w.watches, name)
+
+	// inotify_rm_watch will return EINVAL if the file has been deleted;
+	// the inotify will already have been removed.
+	// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
+	// by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE
+	// so that EINVAL means that the wd is being rm_watch()ed or its file removed
+	// by another thread and we have not received IN_IGNORE event.
+	success, errno := unix.InotifyRmWatch(w.fd, watch.wd)
+	if success == -1 {
+		// TODO: Perhaps it's not helpful to return an error here in every case.
+		// the only two possible errors are:
+		// EBADF, which happens when w.fd is not a valid file descriptor of any kind.
+		// EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor.
+		// Watch descriptors are invalidated when they are removed explicitly or implicitly;
+		// explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted.
+		return errno
+	}
+
+	return nil
+}
+
+type watch struct {
+	wd    uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall)
+	flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags)
+}
+
+// readEvents reads from the inotify file descriptor, converts the
+// received events into Event objects and sends them via the Events channel
+func (w *Watcher) readEvents() {
+	var (
+		buf   [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events
+		n     int                                  // Number of bytes read with read()
+		errno error                                // Syscall errno
+		ok    bool                                 // For poller.wait
+	)
+
+	defer close(w.doneResp)
+	defer close(w.Errors)
+	defer close(w.Events)
+	defer unix.Close(w.fd)
+	defer w.poller.close()
+
+	for {
+		// See if we have been closed.
+		if w.isClosed() {
+			return
+		}
+
+		ok, errno = w.poller.wait()
+		if errno != nil {
+			select {
+			case w.Errors <- errno:
+			case <-w.done:
+				return
+			}
+			continue
+		}
+
+		if !ok {
+			continue
+		}
+
+		n, errno = unix.Read(w.fd, buf[:])
+		// If a signal interrupted execution, see if we've been asked to close, and try again.
+		// http://man7.org/linux/man-pages/man7/signal.7.html :
+		// "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable"
+		if errno == unix.EINTR {
+			continue
+		}
+
+		// unix.Read might have been woken up by Close. If so, we're done.
+		if w.isClosed() {
+			return
+		}
+
+		if n < unix.SizeofInotifyEvent {
+			var err error
+			if n == 0 {
+				// If EOF is received. This should really never happen.
+				err = io.EOF
+			} else if n < 0 {
+				// If an error occurred while reading.
+				err = errno
+			} else {
+				// Read was too short.
+				err = errors.New("notify: short read in readEvents()")
+			}
+			select {
+			case w.Errors <- err:
+			case <-w.done:
+				return
+			}
+			continue
+		}
+
+		var offset uint32
+		// We don't know how many events we just read into the buffer
+		// While the offset points to at least one whole event...
+		for offset <= uint32(n-unix.SizeofInotifyEvent) {
+			// Point "raw" to the event in the buffer
+			raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset]))
+
+			mask := uint32(raw.Mask)
+			nameLen := uint32(raw.Len)
+
+			if mask&unix.IN_Q_OVERFLOW != 0 {
+				select {
+				case w.Errors <- ErrEventOverflow:
+				case <-w.done:
+					return
+				}
+			}
+
+			// If the event happened to the watched directory or the watched file, the kernel
+			// doesn't append the filename to the event, but we would like to always fill the
+			// the "Name" field with a valid filename. We retrieve the path of the watch from
+			// the "paths" map.
+			w.mu.Lock()
+			name, ok := w.paths[int(raw.Wd)]
+			// IN_DELETE_SELF occurs when the file/directory being watched is removed.
+			// This is a sign to clean up the maps, otherwise we are no longer in sync
+			// with the inotify kernel state which has already deleted the watch
+			// automatically.
+			if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+				delete(w.paths, int(raw.Wd))
+				delete(w.watches, name)
+			}
+			w.mu.Unlock()
+
+			if nameLen > 0 {
+				// Point "bytes" at the first byte of the filename
+				bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
+				// The filename is padded with NULL bytes. TrimRight() gets rid of those.
+				name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
+			}
+
+			event := newEvent(name, mask)
+
+			// Send the events that are not ignored on the events channel
+			if !event.ignoreLinux(mask) {
+				select {
+				case w.Events <- event:
+				case <-w.done:
+					return
+				}
+			}
+
+			// Move to the next event in the buffer
+			offset += unix.SizeofInotifyEvent + nameLen
+		}
+	}
+}
+
+// Certain types of events can be "ignored" and not sent over the Events
+// channel. Such as events marked ignore by the kernel, or MODIFY events
+// against files that do not exist.
+func (e *Event) ignoreLinux(mask uint32) bool {
+	// Ignore anything the inotify API says to ignore
+	if mask&unix.IN_IGNORED == unix.IN_IGNORED {
+		return true
+	}
+
+	// If the event is not a DELETE or RENAME, the file must exist.
+	// Otherwise the event is ignored.
+	// *Note*: this was put in place because it was seen that a MODIFY
+	// event was sent after the DELETE. This ignores that MODIFY and
+	// assumes a DELETE will come or has come if the file doesn't exist.
+	if !(e.Op&Remove == Remove || e.Op&Rename == Rename) {
+		_, statErr := os.Lstat(e.Name)
+		return os.IsNotExist(statErr)
+	}
+	return false
+}
+
+// newEvent returns an platform-independent Event based on an inotify mask.
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO {
+		e.Op |= Create
+	}
+	if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE {
+		e.Op |= Remove
+	}
+	if mask&unix.IN_MODIFY == unix.IN_MODIFY {
+		e.Op |= Write
+	}
+	if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM {
+		e.Op |= Rename
+	}
+	if mask&unix.IN_ATTRIB == unix.IN_ATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
new file mode 100644
index 0000000000000000000000000000000000000000..cc7db4b22ef5bdaf81c5cc629d3062fcf544e202
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package fsnotify
+
+import (
+	"errors"
+
+	"golang.org/x/sys/unix"
+)
+
+type fdPoller struct {
+	fd   int    // File descriptor (as returned by the inotify_init() syscall)
+	epfd int    // Epoll file descriptor
+	pipe [2]int // Pipe for waking up
+}
+
+func emptyPoller(fd int) *fdPoller {
+	poller := new(fdPoller)
+	poller.fd = fd
+	poller.epfd = -1
+	poller.pipe[0] = -1
+	poller.pipe[1] = -1
+	return poller
+}
+
+// Create a new inotify poller.
+// This creates an inotify handler, and an epoll handler.
+func newFdPoller(fd int) (*fdPoller, error) {
+	var errno error
+	poller := emptyPoller(fd)
+	defer func() {
+		if errno != nil {
+			poller.close()
+		}
+	}()
+	poller.fd = fd
+
+	// Create epoll fd
+	poller.epfd, errno = unix.EpollCreate1(0)
+	if poller.epfd == -1 {
+		return nil, errno
+	}
+	// Create pipe; pipe[0] is the read end, pipe[1] the write end.
+	errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
+	if errno != nil {
+		return nil, errno
+	}
+
+	// Register inotify fd with epoll
+	event := unix.EpollEvent{
+		Fd:     int32(poller.fd),
+		Events: unix.EPOLLIN,
+	}
+	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event)
+	if errno != nil {
+		return nil, errno
+	}
+
+	// Register pipe fd with epoll
+	event = unix.EpollEvent{
+		Fd:     int32(poller.pipe[0]),
+		Events: unix.EPOLLIN,
+	}
+	errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event)
+	if errno != nil {
+		return nil, errno
+	}
+
+	return poller, nil
+}
+
+// Wait using epoll.
+// Returns true if something is ready to be read,
+// false if there is not.
+func (poller *fdPoller) wait() (bool, error) {
+	// 3 possible events per fd, and 2 fds, makes a maximum of 6 events.
+	// I don't know whether epoll_wait returns the number of events returned,
+	// or the total number of events ready.
+	// I decided to catch both by making the buffer one larger than the maximum.
+	events := make([]unix.EpollEvent, 7)
+	for {
+		n, errno := unix.EpollWait(poller.epfd, events, -1)
+		if n == -1 {
+			if errno == unix.EINTR {
+				continue
+			}
+			return false, errno
+		}
+		if n == 0 {
+			// If there are no events, try again.
+			continue
+		}
+		if n > 6 {
+			// This should never happen. More events were returned than should be possible.
+			return false, errors.New("epoll_wait returned more events than I know what to do with")
+		}
+		ready := events[:n]
+		epollhup := false
+		epollerr := false
+		epollin := false
+		for _, event := range ready {
+			if event.Fd == int32(poller.fd) {
+				if event.Events&unix.EPOLLHUP != 0 {
+					// This should not happen, but if it does, treat it as a wakeup.
+					epollhup = true
+				}
+				if event.Events&unix.EPOLLERR != 0 {
+					// If an error is waiting on the file descriptor, we should pretend
+					// something is ready to read, and let unix.Read pick up the error.
+					epollerr = true
+				}
+				if event.Events&unix.EPOLLIN != 0 {
+					// There is data to read.
+					epollin = true
+				}
+			}
+			if event.Fd == int32(poller.pipe[0]) {
+				if event.Events&unix.EPOLLHUP != 0 {
+					// Write pipe descriptor was closed, by us. This means we're closing down the
+					// watcher, and we should wake up.
+				}
+				if event.Events&unix.EPOLLERR != 0 {
+					// If an error is waiting on the pipe file descriptor.
+					// This is an absolute mystery, and should never ever happen.
+					return false, errors.New("Error on the pipe descriptor.")
+				}
+				if event.Events&unix.EPOLLIN != 0 {
+					// This is a regular wakeup, so we have to clear the buffer.
+					err := poller.clearWake()
+					if err != nil {
+						return false, err
+					}
+				}
+			}
+		}
+
+		if epollhup || epollerr || epollin {
+			return true, nil
+		}
+		return false, nil
+	}
+}
+
+// Close the write end of the poller.
+func (poller *fdPoller) wake() error {
+	buf := make([]byte, 1)
+	n, errno := unix.Write(poller.pipe[1], buf)
+	if n == -1 {
+		if errno == unix.EAGAIN {
+			// Buffer is full, poller will wake.
+			return nil
+		}
+		return errno
+	}
+	return nil
+}
+
+func (poller *fdPoller) clearWake() error {
+	// You have to be woken up a LOT in order to get to 100!
+	buf := make([]byte, 100)
+	n, errno := unix.Read(poller.pipe[0], buf)
+	if n == -1 {
+		if errno == unix.EAGAIN {
+			// Buffer is empty, someone else cleared our wake.
+			return nil
+		}
+		return errno
+	}
+	return nil
+}
+
+// Close all poller file descriptors, but not the one passed to it.
+func (poller *fdPoller) close() {
+	if poller.pipe[1] != -1 {
+		unix.Close(poller.pipe[1])
+	}
+	if poller.pipe[0] != -1 {
+		unix.Close(poller.pipe[0])
+	}
+	if poller.epfd != -1 {
+		unix.Close(poller.epfd)
+	}
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go
new file mode 100644
index 0000000000000000000000000000000000000000..c2b4acb18ddf28874ca3ff4d2ef34076b9c253a3
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go
@@ -0,0 +1,503 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly darwin
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events chan Event
+	Errors chan error
+	done   chan bool // Channel for sending a "quit message" to the reader goroutine
+
+	kq int // File descriptor (as returned by the kqueue() syscall).
+
+	mu              sync.Mutex        // Protects access to watcher data
+	watches         map[string]int    // Map of watched file descriptors (key: path).
+	externalWatches map[string]bool   // Map of watches added by user of the library.
+	dirFlags        map[string]uint32 // Map of watched directories to fflags used in kqueue.
+	paths           map[int]pathInfo  // Map file descriptors to path names for processing kqueue events.
+	fileExists      map[string]bool   // Keep track of if we know this file exists (to stop duplicate create events).
+	isClosed        bool              // Set to true when Close() is first called
+}
+
+type pathInfo struct {
+	name  string
+	isDir bool
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	kq, err := kqueue()
+	if err != nil {
+		return nil, err
+	}
+
+	w := &Watcher{
+		kq:              kq,
+		watches:         make(map[string]int),
+		dirFlags:        make(map[string]uint32),
+		paths:           make(map[int]pathInfo),
+		fileExists:      make(map[string]bool),
+		externalWatches: make(map[string]bool),
+		Events:          make(chan Event),
+		Errors:          make(chan error),
+		done:            make(chan bool),
+	}
+
+	go w.readEvents()
+	return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	w.mu.Lock()
+	if w.isClosed {
+		w.mu.Unlock()
+		return nil
+	}
+	w.isClosed = true
+	w.mu.Unlock()
+
+	// copy paths to remove while locked
+	w.mu.Lock()
+	var pathsToRemove = make([]string, 0, len(w.watches))
+	for name := range w.watches {
+		pathsToRemove = append(pathsToRemove, name)
+	}
+	w.mu.Unlock()
+	// unlock before calling Remove, which also locks
+
+	var err error
+	for _, name := range pathsToRemove {
+		if e := w.Remove(name); e != nil && err == nil {
+			err = e
+		}
+	}
+
+	// Send "quit" message to the reader goroutine:
+	w.done <- true
+
+	return nil
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	w.mu.Lock()
+	w.externalWatches[name] = true
+	w.mu.Unlock()
+	_, err := w.addWatch(name, noteAllEvents)
+	return err
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	name = filepath.Clean(name)
+	w.mu.Lock()
+	watchfd, ok := w.watches[name]
+	w.mu.Unlock()
+	if !ok {
+		return fmt.Errorf("can't remove non-existent kevent watch for: %s", name)
+	}
+
+	const registerRemove = unix.EV_DELETE
+	if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil {
+		return err
+	}
+
+	unix.Close(watchfd)
+
+	w.mu.Lock()
+	isDir := w.paths[watchfd].isDir
+	delete(w.watches, name)
+	delete(w.paths, watchfd)
+	delete(w.dirFlags, name)
+	w.mu.Unlock()
+
+	// Find all watched paths that are in this directory that are not external.
+	if isDir {
+		var pathsToRemove []string
+		w.mu.Lock()
+		for _, path := range w.paths {
+			wdir, _ := filepath.Split(path.name)
+			if filepath.Clean(wdir) == name {
+				if !w.externalWatches[path.name] {
+					pathsToRemove = append(pathsToRemove, path.name)
+				}
+			}
+		}
+		w.mu.Unlock()
+		for _, name := range pathsToRemove {
+			// Since these are internal, not much sense in propagating error
+			// to the user, as that will just confuse them with an error about
+			// a path they did not explicitly watch themselves.
+			w.Remove(name)
+		}
+	}
+
+	return nil
+}
+
+// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE)
+const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME
+
+// keventWaitTime to block on each read from kevent
+var keventWaitTime = durationToTimespec(100 * time.Millisecond)
+
+// addWatch adds name to the watched file set.
+// The flags are interpreted as described in kevent(2).
+// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks.
+func (w *Watcher) addWatch(name string, flags uint32) (string, error) {
+	var isDir bool
+	// Make ./name and name equivalent
+	name = filepath.Clean(name)
+
+	w.mu.Lock()
+	if w.isClosed {
+		w.mu.Unlock()
+		return "", errors.New("kevent instance already closed")
+	}
+	watchfd, alreadyWatching := w.watches[name]
+	// We already have a watch, but we can still override flags.
+	if alreadyWatching {
+		isDir = w.paths[watchfd].isDir
+	}
+	w.mu.Unlock()
+
+	if !alreadyWatching {
+		fi, err := os.Lstat(name)
+		if err != nil {
+			return "", err
+		}
+
+		// Don't watch sockets.
+		if fi.Mode()&os.ModeSocket == os.ModeSocket {
+			return "", nil
+		}
+
+		// Don't watch named pipes.
+		if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe {
+			return "", nil
+		}
+
+		// Follow Symlinks
+		// Unfortunately, Linux can add bogus symlinks to watch list without
+		// issue, and Windows can't do symlinks period (AFAIK). To  maintain
+		// consistency, we will act like everything is fine. There will simply
+		// be no file events for broken symlinks.
+		// Hence the returns of nil on errors.
+		if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
+			name, err = filepath.EvalSymlinks(name)
+			if err != nil {
+				return "", nil
+			}
+
+			w.mu.Lock()
+			_, alreadyWatching = w.watches[name]
+			w.mu.Unlock()
+
+			if alreadyWatching {
+				return name, nil
+			}
+
+			fi, err = os.Lstat(name)
+			if err != nil {
+				return "", nil
+			}
+		}
+
+		watchfd, err = unix.Open(name, openMode, 0700)
+		if watchfd == -1 {
+			return "", err
+		}
+
+		isDir = fi.IsDir()
+	}
+
+	const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE
+	if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil {
+		unix.Close(watchfd)
+		return "", err
+	}
+
+	if !alreadyWatching {
+		w.mu.Lock()
+		w.watches[name] = watchfd
+		w.paths[watchfd] = pathInfo{name: name, isDir: isDir}
+		w.mu.Unlock()
+	}
+
+	if isDir {
+		// Watch the directory if it has not been watched before,
+		// or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles)
+		w.mu.Lock()
+
+		watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE &&
+			(!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE)
+		// Store flags so this watch can be updated later
+		w.dirFlags[name] = flags
+		w.mu.Unlock()
+
+		if watchDir {
+			if err := w.watchDirectoryFiles(name); err != nil {
+				return "", err
+			}
+		}
+	}
+	return name, nil
+}
+
+// readEvents reads from kqueue and converts the received kevents into
+// Event values that it sends down the Events channel.
+func (w *Watcher) readEvents() {
+	eventBuffer := make([]unix.Kevent_t, 10)
+
+	for {
+		// See if there is a message on the "done" channel
+		select {
+		case <-w.done:
+			err := unix.Close(w.kq)
+			if err != nil {
+				w.Errors <- err
+			}
+			close(w.Events)
+			close(w.Errors)
+			return
+		default:
+		}
+
+		// Get new events
+		kevents, err := read(w.kq, eventBuffer, &keventWaitTime)
+		// EINTR is okay, the syscall was interrupted before timeout expired.
+		if err != nil && err != unix.EINTR {
+			w.Errors <- err
+			continue
+		}
+
+		// Flush the events we received to the Events channel
+		for len(kevents) > 0 {
+			kevent := &kevents[0]
+			watchfd := int(kevent.Ident)
+			mask := uint32(kevent.Fflags)
+			w.mu.Lock()
+			path := w.paths[watchfd]
+			w.mu.Unlock()
+			event := newEvent(path.name, mask)
+
+			if path.isDir && !(event.Op&Remove == Remove) {
+				// Double check to make sure the directory exists. This can happen when
+				// we do a rm -fr on a recursively watched folders and we receive a
+				// modification event first but the folder has been deleted and later
+				// receive the delete event
+				if _, err := os.Lstat(event.Name); os.IsNotExist(err) {
+					// mark is as delete event
+					event.Op |= Remove
+				}
+			}
+
+			if event.Op&Rename == Rename || event.Op&Remove == Remove {
+				w.Remove(event.Name)
+				w.mu.Lock()
+				delete(w.fileExists, event.Name)
+				w.mu.Unlock()
+			}
+
+			if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) {
+				w.sendDirectoryChangeEvents(event.Name)
+			} else {
+				// Send the event on the Events channel
+				w.Events <- event
+			}
+
+			if event.Op&Remove == Remove {
+				// Look for a file that may have overwritten this.
+				// For example, mv f1 f2 will delete f2, then create f2.
+				if path.isDir {
+					fileDir := filepath.Clean(event.Name)
+					w.mu.Lock()
+					_, found := w.watches[fileDir]
+					w.mu.Unlock()
+					if found {
+						// make sure the directory exists before we watch for changes. When we
+						// do a recursive watch and perform rm -fr, the parent directory might
+						// have gone missing, ignore the missing directory and let the
+						// upcoming delete event remove the watch from the parent directory.
+						if _, err := os.Lstat(fileDir); err == nil {
+							w.sendDirectoryChangeEvents(fileDir)
+						}
+					}
+				} else {
+					filePath := filepath.Clean(event.Name)
+					if fileInfo, err := os.Lstat(filePath); err == nil {
+						w.sendFileCreatedEventIfNew(filePath, fileInfo)
+					}
+				}
+			}
+
+			// Move to next event
+			kevents = kevents[1:]
+		}
+	}
+}
+
+// newEvent returns an platform-independent Event based on kqueue Fflags.
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&unix.NOTE_DELETE == unix.NOTE_DELETE {
+		e.Op |= Remove
+	}
+	if mask&unix.NOTE_WRITE == unix.NOTE_WRITE {
+		e.Op |= Write
+	}
+	if mask&unix.NOTE_RENAME == unix.NOTE_RENAME {
+		e.Op |= Rename
+	}
+	if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
+
+func newCreateEvent(name string) Event {
+	return Event{Name: name, Op: Create}
+}
+
+// watchDirectoryFiles to mimic inotify when adding a watch on a directory
+func (w *Watcher) watchDirectoryFiles(dirPath string) error {
+	// Get all files
+	files, err := ioutil.ReadDir(dirPath)
+	if err != nil {
+		return err
+	}
+
+	for _, fileInfo := range files {
+		filePath := filepath.Join(dirPath, fileInfo.Name())
+		filePath, err = w.internalWatch(filePath, fileInfo)
+		if err != nil {
+			return err
+		}
+
+		w.mu.Lock()
+		w.fileExists[filePath] = true
+		w.mu.Unlock()
+	}
+
+	return nil
+}
+
+// sendDirectoryEvents searches the directory for newly created files
+// and sends them over the event channel. This functionality is to have
+// the BSD version of fsnotify match Linux inotify which provides a
+// create event for files created in a watched directory.
+func (w *Watcher) sendDirectoryChangeEvents(dirPath string) {
+	// Get all files
+	files, err := ioutil.ReadDir(dirPath)
+	if err != nil {
+		w.Errors <- err
+	}
+
+	// Search for new files
+	for _, fileInfo := range files {
+		filePath := filepath.Join(dirPath, fileInfo.Name())
+		err := w.sendFileCreatedEventIfNew(filePath, fileInfo)
+
+		if err != nil {
+			return
+		}
+	}
+}
+
+// sendFileCreatedEvent sends a create event if the file isn't already being tracked.
+func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) {
+	w.mu.Lock()
+	_, doesExist := w.fileExists[filePath]
+	w.mu.Unlock()
+	if !doesExist {
+		// Send create event
+		w.Events <- newCreateEvent(filePath)
+	}
+
+	// like watchDirectoryFiles (but without doing another ReadDir)
+	filePath, err = w.internalWatch(filePath, fileInfo)
+	if err != nil {
+		return err
+	}
+
+	w.mu.Lock()
+	w.fileExists[filePath] = true
+	w.mu.Unlock()
+
+	return nil
+}
+
+func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) {
+	if fileInfo.IsDir() {
+		// mimic Linux providing delete events for subdirectories
+		// but preserve the flags used if currently watching subdirectory
+		w.mu.Lock()
+		flags := w.dirFlags[name]
+		w.mu.Unlock()
+
+		flags |= unix.NOTE_DELETE | unix.NOTE_RENAME
+		return w.addWatch(name, flags)
+	}
+
+	// watch file to mimic Linux inotify
+	return w.addWatch(name, noteAllEvents)
+}
+
+// kqueue creates a new kernel event queue and returns a descriptor.
+func kqueue() (kq int, err error) {
+	kq, err = unix.Kqueue()
+	if kq == -1 {
+		return kq, err
+	}
+	return kq, nil
+}
+
+// register events with the queue
+func register(kq int, fds []int, flags int, fflags uint32) error {
+	changes := make([]unix.Kevent_t, len(fds))
+
+	for i, fd := range fds {
+		// SetKevent converts int to the platform-specific types:
+		unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags)
+		changes[i].Fflags = fflags
+	}
+
+	// register the events
+	success, err := unix.Kevent(kq, changes, nil, nil)
+	if success == -1 {
+		return err
+	}
+	return nil
+}
+
+// read retrieves pending events, or waits until an event occurs.
+// A timeout of nil blocks indefinitely, while 0 polls the queue.
+func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) {
+	n, err := unix.Kevent(kq, nil, events, timeout)
+	if err != nil {
+		return nil, err
+	}
+	return events[0:n], nil
+}
+
+// durationToTimespec prepares a timeout value
+func durationToTimespec(d time.Duration) unix.Timespec {
+	return unix.NsecToTimespec(d.Nanoseconds())
+}
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
new file mode 100644
index 0000000000000000000000000000000000000000..7d8de14513ede1cbd55a6d4493c1eafc550e0a42
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build freebsd openbsd netbsd dragonfly
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+const openMode = unix.O_NONBLOCK | unix.O_RDONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
new file mode 100644
index 0000000000000000000000000000000000000000..9139e17161bfb3796999fb0474846d14f3b7da0f
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package fsnotify
+
+import "golang.org/x/sys/unix"
+
+// note: this constant is not defined on BSD
+const openMode = unix.O_EVTONLY
diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go
new file mode 100644
index 0000000000000000000000000000000000000000..09436f31d821728522bb5548c091f7e0f5990ba2
--- /dev/null
+++ b/vendor/github.com/fsnotify/fsnotify/windows.go
@@ -0,0 +1,561 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+package fsnotify
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sync"
+	"syscall"
+	"unsafe"
+)
+
+// Watcher watches a set of files, delivering events to a channel.
+type Watcher struct {
+	Events   chan Event
+	Errors   chan error
+	isClosed bool           // Set to true when Close() is first called
+	mu       sync.Mutex     // Map access
+	port     syscall.Handle // Handle to completion port
+	watches  watchMap       // Map of watches (key: i-number)
+	input    chan *input    // Inputs to the reader are sent on this channel
+	quit     chan chan<- error
+}
+
+// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events.
+func NewWatcher() (*Watcher, error) {
+	port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0)
+	if e != nil {
+		return nil, os.NewSyscallError("CreateIoCompletionPort", e)
+	}
+	w := &Watcher{
+		port:    port,
+		watches: make(watchMap),
+		input:   make(chan *input, 1),
+		Events:  make(chan Event, 50),
+		Errors:  make(chan error),
+		quit:    make(chan chan<- error, 1),
+	}
+	go w.readEvents()
+	return w, nil
+}
+
+// Close removes all watches and closes the events channel.
+func (w *Watcher) Close() error {
+	if w.isClosed {
+		return nil
+	}
+	w.isClosed = true
+
+	// Send "quit" message to the reader goroutine
+	ch := make(chan error)
+	w.quit <- ch
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-ch
+}
+
+// Add starts watching the named file or directory (non-recursively).
+func (w *Watcher) Add(name string) error {
+	if w.isClosed {
+		return errors.New("watcher already closed")
+	}
+	in := &input{
+		op:    opAddWatch,
+		path:  filepath.Clean(name),
+		flags: sysFSALLEVENTS,
+		reply: make(chan error),
+	}
+	w.input <- in
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-in.reply
+}
+
+// Remove stops watching the the named file or directory (non-recursively).
+func (w *Watcher) Remove(name string) error {
+	in := &input{
+		op:    opRemoveWatch,
+		path:  filepath.Clean(name),
+		reply: make(chan error),
+	}
+	w.input <- in
+	if err := w.wakeupReader(); err != nil {
+		return err
+	}
+	return <-in.reply
+}
+
+const (
+	// Options for AddWatch
+	sysFSONESHOT = 0x80000000
+	sysFSONLYDIR = 0x1000000
+
+	// Events
+	sysFSACCESS     = 0x1
+	sysFSALLEVENTS  = 0xfff
+	sysFSATTRIB     = 0x4
+	sysFSCLOSE      = 0x18
+	sysFSCREATE     = 0x100
+	sysFSDELETE     = 0x200
+	sysFSDELETESELF = 0x400
+	sysFSMODIFY     = 0x2
+	sysFSMOVE       = 0xc0
+	sysFSMOVEDFROM  = 0x40
+	sysFSMOVEDTO    = 0x80
+	sysFSMOVESELF   = 0x800
+
+	// Special events
+	sysFSIGNORED   = 0x8000
+	sysFSQOVERFLOW = 0x4000
+)
+
+func newEvent(name string, mask uint32) Event {
+	e := Event{Name: name}
+	if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO {
+		e.Op |= Create
+	}
+	if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF {
+		e.Op |= Remove
+	}
+	if mask&sysFSMODIFY == sysFSMODIFY {
+		e.Op |= Write
+	}
+	if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM {
+		e.Op |= Rename
+	}
+	if mask&sysFSATTRIB == sysFSATTRIB {
+		e.Op |= Chmod
+	}
+	return e
+}
+
+const (
+	opAddWatch = iota
+	opRemoveWatch
+)
+
+const (
+	provisional uint64 = 1 << (32 + iota)
+)
+
+type input struct {
+	op    int
+	path  string
+	flags uint32
+	reply chan error
+}
+
+type inode struct {
+	handle syscall.Handle
+	volume uint32
+	index  uint64
+}
+
+type watch struct {
+	ov     syscall.Overlapped
+	ino    *inode            // i-number
+	path   string            // Directory path
+	mask   uint64            // Directory itself is being watched with these notify flags
+	names  map[string]uint64 // Map of names being watched and their notify flags
+	rename string            // Remembers the old name while renaming a file
+	buf    [4096]byte
+}
+
+type indexMap map[uint64]*watch
+type watchMap map[uint32]indexMap
+
+func (w *Watcher) wakeupReader() error {
+	e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil)
+	if e != nil {
+		return os.NewSyscallError("PostQueuedCompletionStatus", e)
+	}
+	return nil
+}
+
+func getDir(pathname string) (dir string, err error) {
+	attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname))
+	if e != nil {
+		return "", os.NewSyscallError("GetFileAttributes", e)
+	}
+	if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 {
+		dir = pathname
+	} else {
+		dir, _ = filepath.Split(pathname)
+		dir = filepath.Clean(dir)
+	}
+	return
+}
+
+func getIno(path string) (ino *inode, err error) {
+	h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path),
+		syscall.FILE_LIST_DIRECTORY,
+		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+		nil, syscall.OPEN_EXISTING,
+		syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0)
+	if e != nil {
+		return nil, os.NewSyscallError("CreateFile", e)
+	}
+	var fi syscall.ByHandleFileInformation
+	if e = syscall.GetFileInformationByHandle(h, &fi); e != nil {
+		syscall.CloseHandle(h)
+		return nil, os.NewSyscallError("GetFileInformationByHandle", e)
+	}
+	ino = &inode{
+		handle: h,
+		volume: fi.VolumeSerialNumber,
+		index:  uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow),
+	}
+	return ino, nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) get(ino *inode) *watch {
+	if i := m[ino.volume]; i != nil {
+		return i[ino.index]
+	}
+	return nil
+}
+
+// Must run within the I/O thread.
+func (m watchMap) set(ino *inode, watch *watch) {
+	i := m[ino.volume]
+	if i == nil {
+		i = make(indexMap)
+		m[ino.volume] = i
+	}
+	i[ino.index] = watch
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) addWatch(pathname string, flags uint64) error {
+	dir, err := getDir(pathname)
+	if err != nil {
+		return err
+	}
+	if flags&sysFSONLYDIR != 0 && pathname != dir {
+		return nil
+	}
+	ino, err := getIno(dir)
+	if err != nil {
+		return err
+	}
+	w.mu.Lock()
+	watchEntry := w.watches.get(ino)
+	w.mu.Unlock()
+	if watchEntry == nil {
+		if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil {
+			syscall.CloseHandle(ino.handle)
+			return os.NewSyscallError("CreateIoCompletionPort", e)
+		}
+		watchEntry = &watch{
+			ino:   ino,
+			path:  dir,
+			names: make(map[string]uint64),
+		}
+		w.mu.Lock()
+		w.watches.set(ino, watchEntry)
+		w.mu.Unlock()
+		flags |= provisional
+	} else {
+		syscall.CloseHandle(ino.handle)
+	}
+	if pathname == dir {
+		watchEntry.mask |= flags
+	} else {
+		watchEntry.names[filepath.Base(pathname)] |= flags
+	}
+	if err = w.startRead(watchEntry); err != nil {
+		return err
+	}
+	if pathname == dir {
+		watchEntry.mask &= ^provisional
+	} else {
+		watchEntry.names[filepath.Base(pathname)] &= ^provisional
+	}
+	return nil
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) remWatch(pathname string) error {
+	dir, err := getDir(pathname)
+	if err != nil {
+		return err
+	}
+	ino, err := getIno(dir)
+	if err != nil {
+		return err
+	}
+	w.mu.Lock()
+	watch := w.watches.get(ino)
+	w.mu.Unlock()
+	if watch == nil {
+		return fmt.Errorf("can't remove non-existent watch for: %s", pathname)
+	}
+	if pathname == dir {
+		w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+		watch.mask = 0
+	} else {
+		name := filepath.Base(pathname)
+		w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED)
+		delete(watch.names, name)
+	}
+	return w.startRead(watch)
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) deleteWatch(watch *watch) {
+	for name, mask := range watch.names {
+		if mask&provisional == 0 {
+			w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED)
+		}
+		delete(watch.names, name)
+	}
+	if watch.mask != 0 {
+		if watch.mask&provisional == 0 {
+			w.sendEvent(watch.path, watch.mask&sysFSIGNORED)
+		}
+		watch.mask = 0
+	}
+}
+
+// Must run within the I/O thread.
+func (w *Watcher) startRead(watch *watch) error {
+	if e := syscall.CancelIo(watch.ino.handle); e != nil {
+		w.Errors <- os.NewSyscallError("CancelIo", e)
+		w.deleteWatch(watch)
+	}
+	mask := toWindowsFlags(watch.mask)
+	for _, m := range watch.names {
+		mask |= toWindowsFlags(m)
+	}
+	if mask == 0 {
+		if e := syscall.CloseHandle(watch.ino.handle); e != nil {
+			w.Errors <- os.NewSyscallError("CloseHandle", e)
+		}
+		w.mu.Lock()
+		delete(w.watches[watch.ino.volume], watch.ino.index)
+		w.mu.Unlock()
+		return nil
+	}
+	e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0],
+		uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0)
+	if e != nil {
+		err := os.NewSyscallError("ReadDirectoryChanges", e)
+		if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 {
+			// Watched directory was probably removed
+			if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) {
+				if watch.mask&sysFSONESHOT != 0 {
+					watch.mask = 0
+				}
+			}
+			err = nil
+		}
+		w.deleteWatch(watch)
+		w.startRead(watch)
+		return err
+	}
+	return nil
+}
+
+// readEvents reads from the I/O completion port, converts the
+// received events into Event objects and sends them via the Events channel.
+// Entry point to the I/O thread.
+func (w *Watcher) readEvents() {
+	var (
+		n, key uint32
+		ov     *syscall.Overlapped
+	)
+	runtime.LockOSThread()
+
+	for {
+		e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE)
+		watch := (*watch)(unsafe.Pointer(ov))
+
+		if watch == nil {
+			select {
+			case ch := <-w.quit:
+				w.mu.Lock()
+				var indexes []indexMap
+				for _, index := range w.watches {
+					indexes = append(indexes, index)
+				}
+				w.mu.Unlock()
+				for _, index := range indexes {
+					for _, watch := range index {
+						w.deleteWatch(watch)
+						w.startRead(watch)
+					}
+				}
+				var err error
+				if e := syscall.CloseHandle(w.port); e != nil {
+					err = os.NewSyscallError("CloseHandle", e)
+				}
+				close(w.Events)
+				close(w.Errors)
+				ch <- err
+				return
+			case in := <-w.input:
+				switch in.op {
+				case opAddWatch:
+					in.reply <- w.addWatch(in.path, uint64(in.flags))
+				case opRemoveWatch:
+					in.reply <- w.remWatch(in.path)
+				}
+			default:
+			}
+			continue
+		}
+
+		switch e {
+		case syscall.ERROR_MORE_DATA:
+			if watch == nil {
+				w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer")
+			} else {
+				// The i/o succeeded but the buffer is full.
+				// In theory we should be building up a full packet.
+				// In practice we can get away with just carrying on.
+				n = uint32(unsafe.Sizeof(watch.buf))
+			}
+		case syscall.ERROR_ACCESS_DENIED:
+			// Watched directory was probably removed
+			w.sendEvent(watch.path, watch.mask&sysFSDELETESELF)
+			w.deleteWatch(watch)
+			w.startRead(watch)
+			continue
+		case syscall.ERROR_OPERATION_ABORTED:
+			// CancelIo was called on this handle
+			continue
+		default:
+			w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e)
+			continue
+		case nil:
+		}
+
+		var offset uint32
+		for {
+			if n == 0 {
+				w.Events <- newEvent("", sysFSQOVERFLOW)
+				w.Errors <- errors.New("short read in readEvents()")
+				break
+			}
+
+			// Point "raw" to the event in the buffer
+			raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset]))
+			buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName))
+			name := syscall.UTF16ToString(buf[:raw.FileNameLength/2])
+			fullname := filepath.Join(watch.path, name)
+
+			var mask uint64
+			switch raw.Action {
+			case syscall.FILE_ACTION_REMOVED:
+				mask = sysFSDELETESELF
+			case syscall.FILE_ACTION_MODIFIED:
+				mask = sysFSMODIFY
+			case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+				watch.rename = name
+			case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+				if watch.names[watch.rename] != 0 {
+					watch.names[name] |= watch.names[watch.rename]
+					delete(watch.names, watch.rename)
+					mask = sysFSMOVESELF
+				}
+			}
+
+			sendNameEvent := func() {
+				if w.sendEvent(fullname, watch.names[name]&mask) {
+					if watch.names[name]&sysFSONESHOT != 0 {
+						delete(watch.names, name)
+					}
+				}
+			}
+			if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME {
+				sendNameEvent()
+			}
+			if raw.Action == syscall.FILE_ACTION_REMOVED {
+				w.sendEvent(fullname, watch.names[name]&sysFSIGNORED)
+				delete(watch.names, name)
+			}
+			if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) {
+				if watch.mask&sysFSONESHOT != 0 {
+					watch.mask = 0
+				}
+			}
+			if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME {
+				fullname = filepath.Join(watch.path, watch.rename)
+				sendNameEvent()
+			}
+
+			// Move to the next event in the buffer
+			if raw.NextEntryOffset == 0 {
+				break
+			}
+			offset += raw.NextEntryOffset
+
+			// Error!
+			if offset >= n {
+				w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.")
+				break
+			}
+		}
+
+		if err := w.startRead(watch); err != nil {
+			w.Errors <- err
+		}
+	}
+}
+
+func (w *Watcher) sendEvent(name string, mask uint64) bool {
+	if mask == 0 {
+		return false
+	}
+	event := newEvent(name, uint32(mask))
+	select {
+	case ch := <-w.quit:
+		w.quit <- ch
+	case w.Events <- event:
+	}
+	return true
+}
+
+func toWindowsFlags(mask uint64) uint32 {
+	var m uint32
+	if mask&sysFSACCESS != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS
+	}
+	if mask&sysFSMODIFY != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE
+	}
+	if mask&sysFSATTRIB != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES
+	}
+	if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 {
+		m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME
+	}
+	return m
+}
+
+func toFSnotifyFlags(action uint32) uint64 {
+	switch action {
+	case syscall.FILE_ACTION_ADDED:
+		return sysFSCREATE
+	case syscall.FILE_ACTION_REMOVED:
+		return sysFSDELETE
+	case syscall.FILE_ACTION_MODIFIED:
+		return sysFSMODIFY
+	case syscall.FILE_ACTION_RENAMED_OLD_NAME:
+		return sysFSMOVEDFROM
+	case syscall.FILE_ACTION_RENAMED_NEW_NAME:
+		return sysFSMOVEDTO
+	}
+	return 0
+}
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..c33dcc7c928c646b497b74de395fb53916a7be25
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..84fd743f5cc28a747e8004be2adad093e5ca65cf
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/Makefile
@@ -0,0 +1,18 @@
+TEST?=./...
+
+default: test
+
+fmt: generate
+	go fmt ./...
+
+test: generate
+	go get -t ./...
+	go test $(TEST) $(TESTARGS)
+
+generate:
+	go generate ./...
+
+updatedeps:
+	go get -u golang.org/x/tools/cmd/stringer
+
+.PHONY: default generate test updatedeps
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c8223326ddc47958b8b4ccf1a5648d9c07aad8bb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/README.md
@@ -0,0 +1,125 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+  * Single line comments start with `#` or `//`
+
+  * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+    are not allowed. A multi-line comment (also known as a block comment)
+    terminates at the first `*/` found.
+
+  * Values are assigned with the syntax `key = value` (whitespace doesn't
+    matter). The value can be any primitive: a string, number, boolean,
+    object, or list.
+
+  * Strings are double-quoted and can contain any UTF-8 characters.
+    Example: `"Hello, World"`
+
+  * Multi-line strings start with `<<EOF` at the end of a line, and end
+    with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
+    Any text may be used in place of `EOF`. Example:
+```
+<<FOO
+hello
+world
+FOO
+```
+
+  * Numbers are assumed to be base 10. If you prefix a number with 0x,
+    it is treated as a hexadecimal. If it is prefixed with 0, it is
+    treated as an octal. Numbers can be in scientific notation: "1e10".
+
+  * Boolean values: `true`, `false`
+
+  * Arrays can be made by wrapping it in `[]`. Example:
+    `["foo", "bar", 42]`. Arrays can contain primitives,
+    other arrays, and objects. As an alternative, lists
+    of objects can be created with repeated blocks, using
+    this structure:
+
+    ```hcl
+    service {
+        key = "value"
+    }
+
+    service {
+        key = "value"
+    }
+    ```
+
+Objects and nested objects are created using the structure shown below:
+
+```
+variable "ami" {
+    description = "the AMI to use"
+}
+```
+This would be equivalent to the following json:
+``` json
+{
+  "variable": {
+      "ami": {
+          "description": "the AMI to use"
+        }
+    }
+}
+```
+
+## Thanks
+
+Thanks to:
+
+  * [@vstakhov](https://github.com/vstakhov) - The original libucl parser
+    and syntax that HCL was based off of.
+
+  * [@fatih](https://github.com/fatih) - The rewritten HCL parser
+    in pure Go (no goyacc) and support for a printer.
diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4db0b7112728269c3cfcc877920582b8f630c7bf
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/appveyor.yml
@@ -0,0 +1,19 @@
+version: "build-{branch}-{build}"
+image: Visual Studio 2015
+clone_folder: c:\gopath\src\github.com\hashicorp\hcl
+environment:
+  GOPATH: c:\gopath
+init:
+  - git config --global core.autocrlf false
+install:
+- cmd: >-
+    echo %Path%
+
+    go version
+
+    go env
+
+    go get -t ./...
+
+build_script:
+- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 0000000000000000000000000000000000000000..bed9ebbe141e38e021463bf5531e77eb0fd8806e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,729 @@
+package hcl
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/parser"
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+	// nodeType holds a reference to the type of ast.Node
+	nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+	root, err := parse(bs)
+	if err != nil {
+		return err
+	}
+
+	return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+	obj, err := Parse(in)
+	if err != nil {
+		return err
+	}
+
+	return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+	val := reflect.ValueOf(out)
+	if val.Kind() != reflect.Ptr {
+		return errors.New("result must be a pointer")
+	}
+
+	// If we have the file, we really decode the root node
+	if f, ok := n.(*ast.File); ok {
+		n = f.Node
+	}
+
+	var d decoder
+	return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+	stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+	k := result
+
+	// If we have an interface with a valid value, we use that
+	// for the check.
+	if result.Kind() == reflect.Interface {
+		elem := result.Elem()
+		if elem.IsValid() {
+			k = elem
+		}
+	}
+
+	// Push current onto stack unless it is an interface.
+	if k.Kind() != reflect.Interface {
+		d.stack = append(d.stack, k.Kind())
+
+		// Schedule a pop
+		defer func() {
+			d.stack = d.stack[:len(d.stack)-1]
+		}()
+	}
+
+	switch k.Kind() {
+	case reflect.Bool:
+		return d.decodeBool(name, node, result)
+	case reflect.Float32, reflect.Float64:
+		return d.decodeFloat(name, node, result)
+	case reflect.Int, reflect.Int32, reflect.Int64:
+		return d.decodeInt(name, node, result)
+	case reflect.Interface:
+		// When we see an interface, we make our own thing
+		return d.decodeInterface(name, node, result)
+	case reflect.Map:
+		return d.decodeMap(name, node, result)
+	case reflect.Ptr:
+		return d.decodePtr(name, node, result)
+	case reflect.Slice:
+		return d.decodeSlice(name, node, result)
+	case reflect.String:
+		return d.decodeString(name, node, result)
+	case reflect.Struct:
+		return d.decodeStruct(name, node, result)
+	default:
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+		}
+	}
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		if n.Token.Type == token.BOOL {
+			v, err := strconv.ParseBool(n.Token.Text)
+			if err != nil {
+				return err
+			}
+
+			result.Set(reflect.ValueOf(v))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
+			v, err := strconv.ParseFloat(n.Token.Text, 64)
+			if err != nil {
+				return err
+			}
+
+			result.Set(reflect.ValueOf(v).Convert(result.Type()))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.NUMBER:
+			v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+			if err != nil {
+				return err
+			}
+
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
+			return nil
+		case token.STRING:
+			v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+			if err != nil {
+				return err
+			}
+
+			if result.Kind() == reflect.Interface {
+				result.Set(reflect.ValueOf(int(v)))
+			} else {
+				result.SetInt(v)
+			}
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type %T", name, node),
+	}
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+	// When we see an ast.Node, we retain the value to enable deferred decoding.
+	// Very useful in situations where we want to preserve ast.Node information
+	// like Pos
+	if result.Type() == nodeType && result.CanSet() {
+		result.Set(reflect.ValueOf(node))
+		return nil
+	}
+
+	var set reflect.Value
+	redecode := true
+
+	// For testing types, ObjectType should just be treated as a list. We
+	// set this to a temporary var because we want to pass in the real node.
+	testNode := node
+	if ot, ok := node.(*ast.ObjectType); ok {
+		testNode = ot.List
+	}
+
+	switch n := testNode.(type) {
+	case *ast.ObjectList:
+		// If we're at the root or we're directly within a slice, then we
+		// decode objects into map[string]interface{}, otherwise we decode
+		// them into lists.
+		if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+			var temp map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeMap(
+				reflect.MapOf(
+					reflect.TypeOf(""),
+					tempVal.Type().Elem()))
+
+			set = result
+		} else {
+			var temp []map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeSlice(
+				reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+			set = result
+		}
+	case *ast.ObjectType:
+		// If we're at the root or we're directly within a slice, then we
+		// decode objects into map[string]interface{}, otherwise we decode
+		// them into lists.
+		if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+			var temp map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeMap(
+				reflect.MapOf(
+					reflect.TypeOf(""),
+					tempVal.Type().Elem()))
+
+			set = result
+		} else {
+			var temp []map[string]interface{}
+			tempVal := reflect.ValueOf(temp)
+			result := reflect.MakeSlice(
+				reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+			set = result
+		}
+	case *ast.ListType:
+		var temp []interface{}
+		tempVal := reflect.ValueOf(temp)
+		result := reflect.MakeSlice(
+			reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+		set = result
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.BOOL:
+			var result bool
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.FLOAT:
+			var result float64
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.NUMBER:
+			var result int
+			set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+		case token.STRING, token.HEREDOC:
+			set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+		default:
+			return &parser.PosError{
+				Pos: node.Pos(),
+				Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+			}
+		}
+	default:
+		return fmt.Errorf(
+			"%s: cannot decode into interface: %T",
+			name, node)
+	}
+
+	// Set the result to what its supposed to be, then reset
+	// result so we don't reflect into this method anymore.
+	result.Set(set)
+
+	if redecode {
+		// Revisit the node so that we can use the newly instantiated
+		// thing and populate it.
+		if err := d.decode(name, node, result); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+	if item, ok := node.(*ast.ObjectItem); ok {
+		node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+	}
+
+	if ot, ok := node.(*ast.ObjectType); ok {
+		node = ot.List
+	}
+
+	n, ok := node.(*ast.ObjectList)
+	if !ok {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+		}
+	}
+
+	// If we have an interface, then we can address the interface,
+	// but not the slice itself, so get the element but set the interface
+	set := result
+	if result.Kind() == reflect.Interface {
+		result = result.Elem()
+	}
+
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	resultKeyType := resultType.Key()
+	if resultKeyType.Kind() != reflect.String {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: map must have string keys", name),
+		}
+	}
+
+	// Make a map if it is nil
+	resultMap := result
+	if result.IsNil() {
+		resultMap = reflect.MakeMap(
+			reflect.MapOf(resultKeyType, resultElemType))
+	}
+
+	// Go through each element and decode it.
+	done := make(map[string]struct{})
+	for _, item := range n.Items {
+		if item.Val == nil {
+			continue
+		}
+
+		// github.com/hashicorp/terraform/issue/5740
+		if len(item.Keys) == 0 {
+			return &parser.PosError{
+				Pos: node.Pos(),
+				Err: fmt.Errorf("%s: map must have string keys", name),
+			}
+		}
+
+		// Get the key we're dealing with, which is the first item
+		keyStr := item.Keys[0].Token.Value().(string)
+
+		// If we've already processed this key, then ignore it
+		if _, ok := done[keyStr]; ok {
+			continue
+		}
+
+		// Determine the value. If we have more than one key, then we
+		// get the objectlist of only these keys.
+		itemVal := item.Val
+		if len(item.Keys) > 1 {
+			itemVal = n.Filter(keyStr)
+			done[keyStr] = struct{}{}
+		}
+
+		// Make the field name
+		fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+		// Get the key/value as reflection values
+		key := reflect.ValueOf(keyStr)
+		val := reflect.Indirect(reflect.New(resultElemType))
+
+		// If we have a pre-existing value in the map, use that
+		oldVal := resultMap.MapIndex(key)
+		if oldVal.IsValid() {
+			val.Set(oldVal)
+		}
+
+		// Decode!
+		if err := d.decode(fieldName, itemVal, val); err != nil {
+			return err
+		}
+
+		// Set the value on the map
+		resultMap.SetMapIndex(key, val)
+	}
+
+	// Set the final map if we can
+	set.Set(resultMap)
+	return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	val := reflect.New(resultElemType)
+	if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+		return err
+	}
+
+	result.Set(val)
+	return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+	// If we have an interface, then we can address the interface,
+	// but not the slice itself, so get the element but set the interface
+	set := result
+	if result.Kind() == reflect.Interface {
+		result = result.Elem()
+	}
+	// Create the slice if it isn't nil
+	resultType := result.Type()
+	resultElemType := resultType.Elem()
+	if result.IsNil() {
+		resultSliceType := reflect.SliceOf(resultElemType)
+		result = reflect.MakeSlice(
+			resultSliceType, 0, 0)
+	}
+
+	// Figure out the items we'll be copying into the slice
+	var items []ast.Node
+	switch n := node.(type) {
+	case *ast.ObjectList:
+		items = make([]ast.Node, len(n.Items))
+		for i, item := range n.Items {
+			items[i] = item
+		}
+	case *ast.ObjectType:
+		items = []ast.Node{n}
+	case *ast.ListType:
+		items = n.List
+	default:
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("unknown slice type: %T", node),
+		}
+	}
+
+	for i, item := range items {
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+		// Decode
+		val := reflect.Indirect(reflect.New(resultElemType))
+
+		// if item is an object that was decoded from ambiguous JSON and
+		// flattened, make sure it's expanded if it needs to decode into a
+		// defined structure.
+		item := expandObject(item, val)
+
+		if err := d.decode(fieldName, item, val); err != nil {
+			return err
+		}
+
+		// Append it onto the slice
+		result = reflect.Append(result, val)
+	}
+
+	set.Set(result)
+	return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+	item, ok := node.(*ast.ObjectItem)
+	if !ok {
+		return node
+	}
+
+	elemType := result.Type()
+
+	// our target type must be a struct
+	switch elemType.Kind() {
+	case reflect.Ptr:
+		switch elemType.Elem().Kind() {
+		case reflect.Struct:
+			//OK
+		default:
+			return node
+		}
+	case reflect.Struct:
+		//OK
+	default:
+		return node
+	}
+
+	// A list value will have a key and field name. If it had more fields,
+	// it wouldn't have been flattened.
+	if len(item.Keys) != 2 {
+		return node
+	}
+
+	keyToken := item.Keys[0].Token
+	item.Keys = item.Keys[1:]
+
+	// we need to un-flatten the ast enough to decode
+	newNode := &ast.ObjectItem{
+		Keys: []*ast.ObjectKey{
+			&ast.ObjectKey{
+				Token: keyToken,
+			},
+		},
+		Val: &ast.ObjectType{
+			List: &ast.ObjectList{
+				Items: []*ast.ObjectItem{item},
+			},
+		},
+	}
+
+	return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+	switch n := node.(type) {
+	case *ast.LiteralType:
+		switch n.Token.Type {
+		case token.NUMBER:
+			result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+			return nil
+		case token.STRING, token.HEREDOC:
+			result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+			return nil
+		}
+	}
+
+	return &parser.PosError{
+		Pos: node.Pos(),
+		Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+	}
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+	var item *ast.ObjectItem
+	if it, ok := node.(*ast.ObjectItem); ok {
+		item = it
+		node = it.Val
+	}
+
+	if ot, ok := node.(*ast.ObjectType); ok {
+		node = ot.List
+	}
+
+	// Handle the special case where the object itself is a literal. Previously
+	// the yacc parser would always ensure top-level elements were arrays. The new
+	// parser does not make the same guarantees, thus we need to convert any
+	// top-level literal elements into a list.
+	if _, ok := node.(*ast.LiteralType); ok && item != nil {
+		node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+	}
+
+	list, ok := node.(*ast.ObjectList)
+	if !ok {
+		return &parser.PosError{
+			Pos: node.Pos(),
+			Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+		}
+	}
+
+	// This slice will keep track of all the structs we'll be decoding.
+	// There can be more than one struct if there are embedded structs
+	// that are squashed.
+	structs := make([]reflect.Value, 1, 5)
+	structs[0] = result
+
+	// Compile the list of all the fields that we're going to be decoding
+	// from all the structs.
+	type field struct {
+		field reflect.StructField
+		val   reflect.Value
+	}
+	fields := []field{}
+	for len(structs) > 0 {
+		structVal := structs[0]
+		structs = structs[1:]
+
+		structType := structVal.Type()
+		for i := 0; i < structType.NumField(); i++ {
+			fieldType := structType.Field(i)
+			tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+			// Ignore fields with tag name "-"
+			if tagParts[0] == "-" {
+				continue
+			}
+
+			if fieldType.Anonymous {
+				fieldKind := fieldType.Type.Kind()
+				if fieldKind != reflect.Struct {
+					return &parser.PosError{
+						Pos: node.Pos(),
+						Err: fmt.Errorf("%s: unsupported type to struct: %s",
+							fieldType.Name, fieldKind),
+					}
+				}
+
+				// We have an embedded field. We "squash" the fields down
+				// if specified in the tag.
+				squash := false
+				for _, tag := range tagParts[1:] {
+					if tag == "squash" {
+						squash = true
+						break
+					}
+				}
+
+				if squash {
+					structs = append(
+						structs, result.FieldByName(fieldType.Name))
+					continue
+				}
+			}
+
+			// Normal struct field, store it away
+			fields = append(fields, field{fieldType, structVal.Field(i)})
+		}
+	}
+
+	usedKeys := make(map[string]struct{})
+	decodedFields := make([]string, 0, len(fields))
+	decodedFieldsVal := make([]reflect.Value, 0)
+	unusedKeysVal := make([]reflect.Value, 0)
+	for _, f := range fields {
+		field, fieldValue := f.field, f.val
+		if !fieldValue.IsValid() {
+			// This should never happen
+			panic("field is not valid")
+		}
+
+		// If we can't set the field, then it is unexported or something,
+		// and we just continue onwards.
+		if !fieldValue.CanSet() {
+			continue
+		}
+
+		fieldName := field.Name
+
+		tagValue := field.Tag.Get(tagName)
+		tagParts := strings.SplitN(tagValue, ",", 2)
+		if len(tagParts) >= 2 {
+			switch tagParts[1] {
+			case "decodedFields":
+				decodedFieldsVal = append(decodedFieldsVal, fieldValue)
+				continue
+			case "key":
+				if item == nil {
+					return &parser.PosError{
+						Pos: node.Pos(),
+						Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+							name, fieldName),
+					}
+				}
+
+				fieldValue.SetString(item.Keys[0].Token.Value().(string))
+				continue
+			case "unusedKeys":
+				unusedKeysVal = append(unusedKeysVal, fieldValue)
+				continue
+			}
+		}
+
+		if tagParts[0] != "" {
+			fieldName = tagParts[0]
+		}
+
+		// Determine the element we'll use to decode. If it is a single
+		// match (only object with the field), then we decode it exactly.
+		// If it is a prefix match, then we decode the matches.
+		filter := list.Filter(fieldName)
+
+		prefixMatches := filter.Children()
+		matches := filter.Elem()
+		if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+			continue
+		}
+
+		// Track the used key
+		usedKeys[fieldName] = struct{}{}
+
+		// Create the field name and decode. We range over the elements
+		// because we actually want the value.
+		fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+		if len(prefixMatches.Items) > 0 {
+			if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
+				return err
+			}
+		}
+		for _, match := range matches.Items {
+			var decodeNode ast.Node = match.Val
+			if ot, ok := decodeNode.(*ast.ObjectType); ok {
+				decodeNode = &ast.ObjectList{Items: ot.List.Items}
+			}
+
+			if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
+				return err
+			}
+		}
+
+		decodedFields = append(decodedFields, field.Name)
+	}
+
+	if len(decodedFieldsVal) > 0 {
+		// Sort it so that it is deterministic
+		sort.Strings(decodedFields)
+
+		for _, v := range decodedFieldsVal {
+			v.Set(reflect.ValueOf(decodedFields))
+		}
+	}
+
+	return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+	var nodeContainer struct {
+		Node ast.Node
+	}
+	value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+	return value.Type()
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 0000000000000000000000000000000000000000..575a20b50b5c1114f5f79ab940fbb388ee3cb0f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 0000000000000000000000000000000000000000..6e5ef654bb839e1df8e43c5fac5ae949119df14d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+	node()
+	Pos() token.Pos
+}
+
+func (File) node()         {}
+func (ObjectList) node()   {}
+func (ObjectKey) node()    {}
+func (ObjectItem) node()   {}
+func (Comment) node()      {}
+func (CommentGroup) node() {}
+func (ObjectType) node()   {}
+func (LiteralType) node()  {}
+func (ListType) node()     {}
+
+// File represents a single HCL file
+type File struct {
+	Node     Node            // usually a *ObjectList
+	Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+	return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+	Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+	o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		// If there aren't enough keys, then ignore this
+		if len(item.Keys) < len(keys) {
+			continue
+		}
+
+		match := true
+		for i, key := range item.Keys[:len(keys)] {
+			key := key.Token.Value().(string)
+			if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+				match = false
+				break
+			}
+		}
+		if !match {
+			continue
+		}
+
+		// Strip off the prefix from the children
+		newItem := *item
+		newItem.Keys = newItem.Keys[len(keys):]
+		result.Add(&newItem)
+	}
+
+	return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		if len(item.Keys) > 0 {
+			result.Add(item)
+		}
+	}
+
+	return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+	var result ObjectList
+	for _, item := range o.Items {
+		if len(item.Keys) == 0 {
+			result.Add(item)
+		}
+	}
+
+	return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+	// always returns the uninitiliazed position
+	return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+	// keys is only one length long if it's of type assignment. If it's a
+	// nested object it can be larger than one. In that case "assign" is
+	// invalid as there is no assignments for a nested object.
+	Keys []*ObjectKey
+
+	// assign contains the position of "=", if any
+	Assign token.Pos
+
+	// val is the item itself. It can be an object,list, number, bool or a
+	// string. If key length is larger than one, val can be only of type
+	// Object.
+	Val Node
+
+	LeadComment *CommentGroup // associated lead comment
+	LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+	// I'm not entirely sure what causes this, but removing this causes
+	// a test failure. We should investigate at some point.
+	if len(o.Keys) == 0 {
+		return token.Pos{}
+	}
+
+	return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+	Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+	return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+	Token token.Token
+
+	// comment types, only used when in a list
+	LeadComment *CommentGroup
+	LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+	return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+	Lbrack token.Pos // position of "["
+	Rbrack token.Pos // position of "]"
+	List   []Node    // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+	return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+	l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+	Lbrace token.Pos   // position of "{"
+	Rbrace token.Pos   // position of "}"
+	List   *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+	return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+	Start token.Pos // position of / or #
+	Text  string
+}
+
+func (c *Comment) Pos() token.Pos {
+	return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+	List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+	return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string  { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba07ad42b022ebc7841f38efa8fa2a41cf69df15
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+	rewritten, ok := fn(node)
+	if !ok {
+		return rewritten
+	}
+
+	switch n := node.(type) {
+	case *File:
+		n.Node = Walk(n.Node, fn)
+	case *ObjectList:
+		for i, item := range n.Items {
+			n.Items[i] = Walk(item, fn).(*ObjectItem)
+		}
+	case *ObjectKey:
+		// nothing to do
+	case *ObjectItem:
+		for i, k := range n.Keys {
+			n.Keys[i] = Walk(k, fn).(*ObjectKey)
+		}
+
+		if n.Val != nil {
+			n.Val = Walk(n.Val, fn)
+		}
+	case *LiteralType:
+		// nothing to do
+	case *ListType:
+		for i, l := range n.List {
+			n.List[i] = Walk(l, fn)
+		}
+	case *ObjectType:
+		n.List = Walk(n.List, fn).(*ObjectList)
+	default:
+		// should we panic here?
+		fmt.Printf("unknown type: %T\n", n)
+	}
+
+	fn(nil)
+	return rewritten
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c99381dfbf1332e2cf495efc831b6a3bb74021f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+	Pos token.Pos
+	Err error
+}
+
+func (e *PosError) Error() string {
+	return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..098e1bc495556aec0bb09623c64533bda6b372a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,526 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	"github.com/hashicorp/hcl/hcl/scanner"
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+	sc *scanner.Scanner
+
+	// Last read token
+	tok       token.Token
+	commaPrev token.Token
+
+	comments    []*ast.CommentGroup
+	leadComment *ast.CommentGroup // last lead comment
+	lineComment *ast.CommentGroup // last line comment
+
+	enableTrace bool
+	indent      int
+	n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+	return &Parser{
+		sc: scanner.New(src),
+	}
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+	// normalize all line endings
+	// since the scanner and output only work with "\n" line endings, we may
+	// end up with dangling "\r" characters in the parsed data.
+	src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
+	p := newParser(src)
+	return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+	f := &ast.File{}
+	var err, scerr error
+	p.sc.Error = func(pos token.Pos, msg string) {
+		scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+	}
+
+	f.Node, err = p.objectList(false)
+	if scerr != nil {
+		return nil, scerr
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	f.Comments = p.comments
+	return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+	defer un(trace(p, "ParseObjectList"))
+	node := &ast.ObjectList{}
+
+	for {
+		if obj {
+			tok := p.scan()
+			p.unscan()
+			if tok.Type == token.RBRACE {
+				break
+			}
+		}
+
+		n, err := p.objectItem()
+		if err == errEofToken {
+			break // we are finished
+		}
+
+		// we don't return a nil node, because might want to use already
+		// collected items.
+		if err != nil {
+			return node, err
+		}
+
+		node.Add(n)
+
+		// object lists can be optionally comma-delimited e.g. when a list of maps
+		// is being expressed, so a comma is allowed here - it's simply consumed
+		tok := p.scan()
+		if tok.Type != token.COMMA {
+			p.unscan()
+		}
+	}
+	return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+	endline = p.tok.Pos.Line
+
+	// count the endline if it's multiline comment, ie starting with /*
+	if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+		// don't use range here - no need to decode Unicode code points
+		for i := 0; i < len(p.tok.Text); i++ {
+			if p.tok.Text[i] == '\n' {
+				endline++
+			}
+		}
+	}
+
+	comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+	p.tok = p.sc.Scan()
+	return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+	var list []*ast.Comment
+	endline = p.tok.Pos.Line
+
+	for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+		var comment *ast.Comment
+		comment, endline = p.consumeComment()
+		list = append(list, comment)
+	}
+
+	// add comment group to the comments list
+	comments = &ast.CommentGroup{List: list}
+	p.comments = append(p.comments, comments)
+
+	return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+	defer un(trace(p, "ParseObjectItem"))
+
+	keys, err := p.objectKey()
+	if len(keys) > 0 && err == errEofToken {
+		// We ignore eof token here since it is an error if we didn't
+		// receive a value (but we did receive a key) for the item.
+		err = nil
+	}
+	if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+		// This is a strange boolean statement, but what it means is:
+		// We have keys with no value, and we're likely in an object
+		// (since RBrace ends an object). For this, we set err to nil so
+		// we continue and get the error below of having the wrong value
+		// type.
+		err = nil
+
+		// Reset the token type so we don't think it completed fine. See
+		// objectType which uses p.tok.Type to check if we're done with
+		// the object.
+		p.tok.Type = token.EOF
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	o := &ast.ObjectItem{
+		Keys: keys,
+	}
+
+	if p.leadComment != nil {
+		o.LeadComment = p.leadComment
+		p.leadComment = nil
+	}
+
+	switch p.tok.Type {
+	case token.ASSIGN:
+		o.Assign = p.tok.Pos
+		o.Val, err = p.object()
+		if err != nil {
+			return nil, err
+		}
+	case token.LBRACE:
+		o.Val, err = p.objectType()
+		if err != nil {
+			return nil, err
+		}
+	default:
+		keyStr := make([]string, 0, len(keys))
+		for _, k := range keys {
+			keyStr = append(keyStr, k.Token.Text)
+		}
+
+		return nil, &PosError{
+			Pos: p.tok.Pos,
+			Err: fmt.Errorf(
+				"key '%s' expected start of object ('{') or assignment ('=')",
+				strings.Join(keyStr, " ")),
+		}
+	}
+
+	// do a look-ahead for line comment
+	p.scan()
+	if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+		o.LineComment = p.lineComment
+		p.lineComment = nil
+	}
+	p.unscan()
+	return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+	keyCount := 0
+	keys := make([]*ast.ObjectKey, 0)
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.EOF:
+			// It is very important to also return the keys here as well as
+			// the error. This is because we need to be able to tell if we
+			// did parse keys prior to finding the EOF, or if we just found
+			// a bare EOF.
+			return keys, errEofToken
+		case token.ASSIGN:
+			// assignment or object only, but not nested objects. this is not
+			// allowed: `foo bar = {}`
+			if keyCount > 1 {
+				return nil, &PosError{
+					Pos: p.tok.Pos,
+					Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+				}
+			}
+
+			if keyCount == 0 {
+				return nil, &PosError{
+					Pos: p.tok.Pos,
+					Err: errors.New("no object keys found!"),
+				}
+			}
+
+			return keys, nil
+		case token.LBRACE:
+			var err error
+
+			// If we have no keys, then it is a syntax error. i.e. {{}} is not
+			// allowed.
+			if len(keys) == 0 {
+				err = &PosError{
+					Pos: p.tok.Pos,
+					Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+				}
+			}
+
+			// object
+			return keys, err
+		case token.IDENT, token.STRING:
+			keyCount++
+			keys = append(keys, &ast.ObjectKey{Token: p.tok})
+		case token.ILLEGAL:
+			return keys, &PosError{
+				Pos: p.tok.Pos,
+				Err: fmt.Errorf("illegal character"),
+			}
+		default:
+			return keys, &PosError{
+				Pos: p.tok.Pos,
+				Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+			}
+		}
+	}
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+	defer un(trace(p, "ParseType"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+		return p.literalType()
+	case token.LBRACE:
+		return p.objectType()
+	case token.LBRACK:
+		return p.listType()
+	case token.COMMENT:
+		// implement comment
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, &PosError{
+		Pos: tok.Pos,
+		Err: fmt.Errorf("Unknown token: %+v", tok),
+	}
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseObjectType"))
+
+	// we assume that the currently scanned token is a LBRACE
+	o := &ast.ObjectType{
+		Lbrace: p.tok.Pos,
+	}
+
+	l, err := p.objectList(true)
+
+	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+	// not a RBRACE, it's an syntax error and we just return it.
+	if err != nil && p.tok.Type != token.RBRACE {
+		return nil, err
+	}
+
+	// No error, scan and expect the ending to be a brace
+	if tok := p.scan(); tok.Type != token.RBRACE {
+		return nil, &PosError{
+			Pos: tok.Pos,
+			Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+		}
+	}
+
+	o.List = l
+	o.Rbrace = p.tok.Pos // advanced via parseObjectList
+	return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+	defer un(trace(p, "ParseListType"))
+
+	// we assume that the currently scanned token is a LBRACK
+	l := &ast.ListType{
+		Lbrack: p.tok.Pos,
+	}
+
+	needComma := false
+	for {
+		tok := p.scan()
+		if needComma {
+			switch tok.Type {
+			case token.COMMA, token.RBRACK:
+			default:
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error parsing list, expected comma or list end, got: %s",
+						tok.Type),
+				}
+			}
+		}
+		switch tok.Type {
+		case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+			node, err := p.literalType()
+			if err != nil {
+				return nil, err
+			}
+
+			// If there is a lead comment, apply it
+			if p.leadComment != nil {
+				node.LeadComment = p.leadComment
+				p.leadComment = nil
+			}
+
+			l.Add(node)
+			needComma = true
+		case token.COMMA:
+			// get next list item or we are at the end
+			// do a look-ahead for line comment
+			p.scan()
+			if p.lineComment != nil && len(l.List) > 0 {
+				lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+				if ok {
+					lit.LineComment = p.lineComment
+					l.List[len(l.List)-1] = lit
+					p.lineComment = nil
+				}
+			}
+			p.unscan()
+
+			needComma = false
+			continue
+		case token.LBRACE:
+			// Looks like a nested object, so parse it out
+			node, err := p.objectType()
+			if err != nil {
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error while trying to parse object within list: %s", err),
+				}
+			}
+			l.Add(node)
+			needComma = true
+		case token.LBRACK:
+			node, err := p.listType()
+			if err != nil {
+				return nil, &PosError{
+					Pos: tok.Pos,
+					Err: fmt.Errorf(
+						"error while trying to parse list within list: %s", err),
+				}
+			}
+			l.Add(node)
+		case token.RBRACK:
+			// finished
+			l.Rbrack = p.tok.Pos
+			return l, nil
+		default:
+			return nil, &PosError{
+				Pos: tok.Pos,
+				Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+			}
+		}
+	}
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+	defer un(trace(p, "ParseLiteral"))
+
+	return &ast.LiteralType{
+		Token: p.tok,
+	}, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+	// If we have a token on the buffer, then return it.
+	if p.n != 0 {
+		p.n = 0
+		return p.tok
+	}
+
+	// Otherwise read the next token from the scanner and Save it to the buffer
+	// in case we unscan later.
+	prev := p.tok
+	p.tok = p.sc.Scan()
+
+	if p.tok.Type == token.COMMENT {
+		var comment *ast.CommentGroup
+		var endline int
+
+		// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+		// p.tok.Pos.Line, prev.Pos.Line, endline)
+		if p.tok.Pos.Line == prev.Pos.Line {
+			// The comment is on same line as the previous token; it
+			// cannot be a lead comment but may be a line comment.
+			comment, endline = p.consumeCommentGroup(0)
+			if p.tok.Pos.Line != endline {
+				// The next token is on a different line, thus
+				// the last comment group is a line comment.
+				p.lineComment = comment
+			}
+		}
+
+		// consume successor comments, if any
+		endline = -1
+		for p.tok.Type == token.COMMENT {
+			comment, endline = p.consumeCommentGroup(1)
+		}
+
+		if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+			switch p.tok.Type {
+			case token.RBRACE, token.RBRACK:
+				// Do not count for these cases
+			default:
+				// The next token is following on the line immediately after the
+				// comment group, thus the last comment group is a lead comment.
+				p.leadComment = comment
+			}
+		}
+
+	}
+
+	return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+	p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+	p.indent--
+	p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644
index 0000000000000000000000000000000000000000..6601ef76e6ca6a1a4c2a8b696013f4992445a2ce
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -0,0 +1,651 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"regexp"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+	buf *bytes.Buffer // Source buffer for advancing and scanning
+	src []byte        // Source buffer for immutable access
+
+	// Source Position
+	srcPos  token.Pos // current position
+	prevPos token.Pos // previous position, used for peek() method
+
+	lastCharLen int // length of last character in bytes
+	lastLineLen int // length of last line in characters (for correct column reporting)
+
+	tokStart int // token text start position
+	tokEnd   int // token text end  position
+
+	// Error is called for each error encountered. If no Error
+	// function is set, the error is reported to os.Stderr.
+	Error func(pos token.Pos, msg string)
+
+	// ErrorCount is incremented by one for each error encountered.
+	ErrorCount int
+
+	// tokPos is the start position of most recently scanned token; set by
+	// Scan. The Filename field is always left untouched by the Scanner.  If
+	// an error is reported (via Error) and Position is invalid, the scanner is
+	// not inside a token.
+	tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+	// even though we accept a src, we read from a io.Reader compatible type
+	// (*bytes.Buffer). So in the future we might easily change it to streaming
+	// read.
+	b := bytes.NewBuffer(src)
+	s := &Scanner{
+		buf: b,
+		src: src,
+	}
+
+	// srcPosition always starts with 1
+	s.srcPos.Line = 1
+	return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+	ch, size, err := s.buf.ReadRune()
+	if err != nil {
+		// advance for error reporting
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		return eof
+	}
+
+	if ch == utf8.RuneError && size == 1 {
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		s.err("illegal UTF-8 encoding")
+		return ch
+	}
+
+	// remember last position
+	s.prevPos = s.srcPos
+
+	s.srcPos.Column++
+	s.lastCharLen = size
+	s.srcPos.Offset += size
+
+	if ch == '\n' {
+		s.srcPos.Line++
+		s.lastLineLen = s.srcPos.Column
+		s.srcPos.Column = 0
+	}
+
+	// If we see a null character with data left, then that is an error
+	if ch == '\x00' && s.buf.Len() > 0 {
+		s.err("unexpected null character (0x00)")
+		return eof
+	}
+
+	// debug
+	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+	return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+	if err := s.buf.UnreadRune(); err != nil {
+		panic(err) // this is user fault, we should catch it
+	}
+	s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+	peek, _, err := s.buf.ReadRune()
+	if err != nil {
+		return eof
+	}
+
+	s.buf.UnreadRune()
+	return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+	ch := s.next()
+
+	// skip white space
+	for isWhitespace(ch) {
+		ch = s.next()
+	}
+
+	var tok token.Type
+
+	// token text markings
+	s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+	// token position, initial next() is moving the offset by one(size of rune
+	// actually), though we are interested with the starting point
+	s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+	if s.srcPos.Column > 0 {
+		// common case: last character was not a '\n'
+		s.tokPos.Line = s.srcPos.Line
+		s.tokPos.Column = s.srcPos.Column
+	} else {
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		s.tokPos.Line = s.srcPos.Line - 1
+		s.tokPos.Column = s.lastLineLen
+	}
+
+	switch {
+	case isLetter(ch):
+		tok = token.IDENT
+		lit := s.scanIdentifier()
+		if lit == "true" || lit == "false" {
+			tok = token.BOOL
+		}
+	case isDecimal(ch):
+		tok = s.scanNumber(ch)
+	default:
+		switch ch {
+		case eof:
+			tok = token.EOF
+		case '"':
+			tok = token.STRING
+			s.scanString()
+		case '#', '/':
+			tok = token.COMMENT
+			s.scanComment(ch)
+		case '.':
+			tok = token.PERIOD
+			ch = s.peek()
+			if isDecimal(ch) {
+				tok = token.FLOAT
+				ch = s.scanMantissa(ch)
+				ch = s.scanExponent(ch)
+			}
+		case '<':
+			tok = token.HEREDOC
+			s.scanHeredoc()
+		case '[':
+			tok = token.LBRACK
+		case ']':
+			tok = token.RBRACK
+		case '{':
+			tok = token.LBRACE
+		case '}':
+			tok = token.RBRACE
+		case ',':
+			tok = token.COMMA
+		case '=':
+			tok = token.ASSIGN
+		case '+':
+			tok = token.ADD
+		case '-':
+			if isDecimal(s.peek()) {
+				ch := s.next()
+				tok = s.scanNumber(ch)
+			} else {
+				tok = token.SUB
+			}
+		default:
+			s.err("illegal char")
+		}
+	}
+
+	// finish token ending
+	s.tokEnd = s.srcPos.Offset
+
+	// create token literal
+	var tokenText string
+	if s.tokStart >= 0 {
+		tokenText = string(s.src[s.tokStart:s.tokEnd])
+	}
+	s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+	return token.Token{
+		Type: tok,
+		Pos:  s.tokPos,
+		Text: tokenText,
+	}
+}
+
+func (s *Scanner) scanComment(ch rune) {
+	// single line comments
+	if ch == '#' || (ch == '/' && s.peek() != '*') {
+		if ch == '/' && s.peek() != '/' {
+			s.err("expected '/' for comment")
+			return
+		}
+
+		ch = s.next()
+		for ch != '\n' && ch >= 0 && ch != eof {
+			ch = s.next()
+		}
+		if ch != eof && ch >= 0 {
+			s.unread()
+		}
+		return
+	}
+
+	// be sure we get the character after /* This allows us to find comment's
+	// that are not erminated
+	if ch == '/' {
+		s.next()
+		ch = s.next() // read character after "/*"
+	}
+
+	// look for /* - style comments
+	for {
+		if ch < 0 || ch == eof {
+			s.err("comment not terminated")
+			break
+		}
+
+		ch0 := ch
+		ch = s.next()
+		if ch0 == '*' && ch == '/' {
+			break
+		}
+	}
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+	if ch == '0' {
+		// check for hexadecimal, octal or float
+		ch = s.next()
+		if ch == 'x' || ch == 'X' {
+			// hexadecimal
+			ch = s.next()
+			found := false
+			for isHexadecimal(ch) {
+				ch = s.next()
+				found = true
+			}
+
+			if !found {
+				s.err("illegal hexadecimal number")
+			}
+
+			if ch != eof {
+				s.unread()
+			}
+
+			return token.NUMBER
+		}
+
+		// now it's either something like: 0421(octal) or 0.1231(float)
+		illegalOctal := false
+		for isDecimal(ch) {
+			ch = s.next()
+			if ch == '8' || ch == '9' {
+				// this is just a possibility. For example 0159 is illegal, but
+				// 0159.23 is valid. So we mark a possible illegal octal. If
+				// the next character is not a period, we'll print the error.
+				illegalOctal = true
+			}
+		}
+
+		if ch == 'e' || ch == 'E' {
+			ch = s.scanExponent(ch)
+			return token.FLOAT
+		}
+
+		if ch == '.' {
+			ch = s.scanFraction(ch)
+
+			if ch == 'e' || ch == 'E' {
+				ch = s.next()
+				ch = s.scanExponent(ch)
+			}
+			return token.FLOAT
+		}
+
+		if illegalOctal {
+			s.err("illegal octal number")
+		}
+
+		if ch != eof {
+			s.unread()
+		}
+		return token.NUMBER
+	}
+
+	s.scanMantissa(ch)
+	ch = s.next() // seek forward
+	if ch == 'e' || ch == 'E' {
+		ch = s.scanExponent(ch)
+		return token.FLOAT
+	}
+
+	if ch == '.' {
+		ch = s.scanFraction(ch)
+		if ch == 'e' || ch == 'E' {
+			ch = s.next()
+			ch = s.scanExponent(ch)
+		}
+		return token.FLOAT
+	}
+
+	if ch != eof {
+		s.unread()
+	}
+	return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+	scanned := false
+	for isDecimal(ch) {
+		ch = s.next()
+		scanned = true
+	}
+
+	if scanned && ch != eof {
+		s.unread()
+	}
+	return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+	if ch == '.' {
+		ch = s.peek() // we peek just to see if we can move forward
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+	if ch == 'e' || ch == 'E' {
+		ch = s.next()
+		if ch == '-' || ch == '+' {
+			ch = s.next()
+		}
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+	// Scan the second '<' in example: '<<EOF'
+	if s.next() != '<' {
+		s.err("heredoc expected second '<', didn't see it")
+		return
+	}
+
+	// Get the original offset so we can read just the heredoc ident
+	offs := s.srcPos.Offset
+
+	// Scan the identifier
+	ch := s.next()
+
+	// Indented heredoc syntax
+	if ch == '-' {
+		ch = s.next()
+	}
+
+	for isLetter(ch) || isDigit(ch) {
+		ch = s.next()
+	}
+
+	// If we reached an EOF then that is not good
+	if ch == eof {
+		s.err("heredoc not terminated")
+		return
+	}
+
+	// Ignore the '\r' in Windows line endings
+	if ch == '\r' {
+		if s.peek() == '\n' {
+			ch = s.next()
+		}
+	}
+
+	// If we didn't reach a newline then that is also not good
+	if ch != '\n' {
+		s.err("invalid characters in heredoc anchor")
+		return
+	}
+
+	// Read the identifier
+	identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
+	if len(identBytes) == 0 {
+		s.err("zero-length heredoc anchor")
+		return
+	}
+
+	var identRegexp *regexp.Regexp
+	if identBytes[0] == '-' {
+		identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
+	} else {
+		identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
+	}
+
+	// Read the actual string value
+	lineStart := s.srcPos.Offset
+	for {
+		ch := s.next()
+
+		// Special newline handling.
+		if ch == '\n' {
+			// Math is fast, so we first compare the byte counts to see if we have a chance
+			// of seeing the same identifier - if the length is less than the number of bytes
+			// in the identifier, this cannot be a valid terminator.
+			lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
+			if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+				break
+			}
+
+			// Not an anchor match, record the start of a new line
+			lineStart = s.srcPos.Offset
+		}
+
+		if ch == eof {
+			s.err("heredoc not terminated")
+			return
+		}
+	}
+
+	return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+	braces := 0
+	for {
+		// '"' opening already consumed
+		// read character after quote
+		ch := s.next()
+
+		if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+			s.err("literal not terminated")
+			return
+		}
+
+		if ch == '"' && braces == 0 {
+			break
+		}
+
+		// If we're going into a ${} then we can ignore quotes for awhile
+		if braces == 0 && ch == '$' && s.peek() == '{' {
+			braces++
+			s.next()
+		} else if braces > 0 && ch == '{' {
+			braces++
+		}
+		if braces > 0 && ch == '}' {
+			braces--
+		}
+
+		if ch == '\\' {
+			s.scanEscape()
+		}
+	}
+
+	return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+	// http://en.cppreference.com/w/cpp/language/escape
+	ch := s.next() // read character after '/'
+	switch ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+		// nothing to do
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		// octal notation
+		ch = s.scanDigits(ch, 8, 3)
+	case 'x':
+		// hexademical notation
+		ch = s.scanDigits(s.next(), 16, 2)
+	case 'u':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 4)
+	case 'U':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 8)
+	default:
+		s.err("illegal char escape")
+	}
+	return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+	start := n
+	for n > 0 && digitVal(ch) < base {
+		ch = s.next()
+		if ch == eof {
+			// If we see an EOF, we halt any more scanning of digits
+			// immediately.
+			break
+		}
+
+		n--
+	}
+	if n > 0 {
+		s.err("illegal char escape")
+	}
+
+	if n != start {
+		// we scanned all digits, put the last non digit char back,
+		// only if we read anything at all
+		s.unread()
+	}
+
+	return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+	offs := s.srcPos.Offset - s.lastCharLen
+	ch := s.next()
+	for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+		ch = s.next()
+	}
+
+	if ch != eof {
+		s.unread() // we got identifier, put back latest char
+	}
+
+	return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+	pos.Offset = s.srcPos.Offset - s.lastCharLen
+	switch {
+	case s.srcPos.Column > 0:
+		// common case: last character was not a '\n'
+		pos.Line = s.srcPos.Line
+		pos.Column = s.srcPos.Column
+	case s.lastLineLen > 0:
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		pos.Line = s.srcPos.Line - 1
+		pos.Column = s.lastLineLen
+	default:
+		// at the beginning of the source
+		pos.Line = 1
+		pos.Column = 1
+	}
+	return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+	s.ErrorCount++
+	pos := s.recentPosition()
+
+	if s.Error != nil {
+		s.Error(pos, msg)
+		return
+	}
+
+	fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= ch && ch <= 'f':
+		return int(ch - 'a' + 10)
+	case 'A' <= ch && ch <= 'F':
+		return int(ch - 'A' + 10)
+	}
+	return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 0000000000000000000000000000000000000000..5f981eaa2f0f68875d300b9aec553ad8cca3a945
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+	"errors"
+	"unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes.  (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+	n := len(s)
+	if n < 2 {
+		return "", ErrSyntax
+	}
+	quote := s[0]
+	if quote != s[n-1] {
+		return "", ErrSyntax
+	}
+	s = s[1 : n-1]
+
+	if quote != '"' {
+		return "", ErrSyntax
+	}
+	if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+		return "", ErrSyntax
+	}
+
+	// Is it trivial?  Avoid allocation.
+	if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+		switch quote {
+		case '"':
+			return s, nil
+		case '\'':
+			r, size := utf8.DecodeRuneInString(s)
+			if size == len(s) && (r != utf8.RuneError || size != 1) {
+				return s, nil
+			}
+		}
+	}
+
+	var runeTmp [utf8.UTFMax]byte
+	buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+	for len(s) > 0 {
+		// If we're starting a '${}' then let it through un-unquoted.
+		// Specifically: we don't unquote any characters within the `${}`
+		// section.
+		if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+			buf = append(buf, '$', '{')
+			s = s[2:]
+
+			// Continue reading until we find the closing brace, copying as-is
+			braces := 1
+			for len(s) > 0 && braces > 0 {
+				r, size := utf8.DecodeRuneInString(s)
+				if r == utf8.RuneError {
+					return "", ErrSyntax
+				}
+
+				s = s[size:]
+
+				n := utf8.EncodeRune(runeTmp[:], r)
+				buf = append(buf, runeTmp[:n]...)
+
+				switch r {
+				case '{':
+					braces++
+				case '}':
+					braces--
+				}
+			}
+			if braces != 0 {
+				return "", ErrSyntax
+			}
+			if len(s) == 0 {
+				// If there's no string left, we're done!
+				break
+			} else {
+				// If there's more left, we need to pop back up to the top of the loop
+				// in case there's another interpolation in this string.
+				continue
+			}
+		}
+
+		if s[0] == '\n' {
+			return "", ErrSyntax
+		}
+
+		c, multibyte, ss, err := unquoteChar(s, quote)
+		if err != nil {
+			return "", err
+		}
+		s = ss
+		if c < utf8.RuneSelf || !multibyte {
+			buf = append(buf, byte(c))
+		} else {
+			n := utf8.EncodeRune(runeTmp[:], c)
+			buf = append(buf, runeTmp[:n]...)
+		}
+		if quote == '\'' && len(s) != 0 {
+			// single-quoted must be single character
+			return "", ErrSyntax
+		}
+	}
+	return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] == c {
+			return true
+		}
+	}
+	return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+	c := rune(b)
+	switch {
+	case '0' <= c && c <= '9':
+		return c - '0', true
+	case 'a' <= c && c <= 'f':
+		return c - 'a' + 10, true
+	case 'A' <= c && c <= 'F':
+		return c - 'A' + 10, true
+	}
+	return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+	// easy cases
+	switch c := s[0]; {
+	case c == quote && (quote == '\'' || quote == '"'):
+		err = ErrSyntax
+		return
+	case c >= utf8.RuneSelf:
+		r, size := utf8.DecodeRuneInString(s)
+		return r, true, s[size:], nil
+	case c != '\\':
+		return rune(s[0]), false, s[1:], nil
+	}
+
+	// hard case: c is backslash
+	if len(s) <= 1 {
+		err = ErrSyntax
+		return
+	}
+	c := s[1]
+	s = s[2:]
+
+	switch c {
+	case 'a':
+		value = '\a'
+	case 'b':
+		value = '\b'
+	case 'f':
+		value = '\f'
+	case 'n':
+		value = '\n'
+	case 'r':
+		value = '\r'
+	case 't':
+		value = '\t'
+	case 'v':
+		value = '\v'
+	case 'x', 'u', 'U':
+		n := 0
+		switch c {
+		case 'x':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		var v rune
+		if len(s) < n {
+			err = ErrSyntax
+			return
+		}
+		for j := 0; j < n; j++ {
+			x, ok := unhex(s[j])
+			if !ok {
+				err = ErrSyntax
+				return
+			}
+			v = v<<4 | x
+		}
+		s = s[n:]
+		if c == 'x' {
+			// single-byte string, possibly not UTF-8
+			value = v
+			break
+		}
+		if v > utf8.MaxRune {
+			err = ErrSyntax
+			return
+		}
+		value = v
+		multibyte = true
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		v := rune(c) - '0'
+		if len(s) < 2 {
+			err = ErrSyntax
+			return
+		}
+		for j := 0; j < 2; j++ { // one digit already; two more
+			x := rune(s[j]) - '0'
+			if x < 0 || x > 7 {
+				err = ErrSyntax
+				return
+			}
+			v = (v << 3) | x
+		}
+		s = s[2:]
+		if v > 255 {
+			err = ErrSyntax
+			return
+		}
+		value = v
+	case '\\':
+		value = '\\'
+	case '\'', '"':
+		if c != quote {
+			err = ErrSyntax
+			return
+		}
+		value = rune(c)
+	default:
+		err = ErrSyntax
+		return
+	}
+	tail = s
+	return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 0000000000000000000000000000000000000000..59c1bb72d4a4ed0c773cc263f2e003d083b507fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+	Filename string // filename, if any
+	Offset   int    // offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//	file:line:column    valid position with file name
+//	line:column         valid position without file name
+//	file                invalid position with file name
+//	-                   invalid position without file name
+func (p Pos) String() string {
+	s := p.Filename
+	if p.IsValid() {
+		if s != "" {
+			s += ":"
+		}
+		s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+	}
+	if s == "" {
+		s = "-"
+	}
+	return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+	return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+	return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 0000000000000000000000000000000000000000..e37c0664ecd3a82484f4d48c3e8f42fbad59b66d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+	Type Type
+	Pos  Pos
+	Text string
+	JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+	// Special tokens
+	ILLEGAL Type = iota
+	EOF
+	COMMENT
+
+	identifier_beg
+	IDENT // literals
+	literal_beg
+	NUMBER  // 12345
+	FLOAT   // 123.45
+	BOOL    // true,false
+	STRING  // "abc"
+	HEREDOC // <<FOO\nbar\nFOO
+	literal_end
+	identifier_end
+
+	operator_beg
+	LBRACK // [
+	LBRACE // {
+	COMMA  // ,
+	PERIOD // .
+
+	RBRACK // ]
+	RBRACE // }
+
+	ASSIGN // =
+	ADD    // +
+	SUB    // -
+	operator_end
+)
+
+var tokens = [...]string{
+	ILLEGAL: "ILLEGAL",
+
+	EOF:     "EOF",
+	COMMENT: "COMMENT",
+
+	IDENT:  "IDENT",
+	NUMBER: "NUMBER",
+	FLOAT:  "FLOAT",
+	BOOL:   "BOOL",
+	STRING: "STRING",
+
+	LBRACK:  "LBRACK",
+	LBRACE:  "LBRACE",
+	COMMA:   "COMMA",
+	PERIOD:  "PERIOD",
+	HEREDOC: "HEREDOC",
+
+	RBRACK: "RBRACK",
+	RBRACE: "RBRACE",
+
+	ASSIGN: "ASSIGN",
+	ADD:    "ADD",
+	SUB:    "SUB",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+	s := ""
+	if 0 <= t && t < Type(len(tokens)) {
+		s = tokens[t]
+	}
+	if s == "" {
+		s = "token(" + strconv.Itoa(int(t)) + ")"
+	}
+	return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+	return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// Value returns the properly typed value for this token. The type of
+// the returned interface{} is guaranteed based on the Type field.
+//
+// This can only be called for literal types. If it is called for any other
+// type, this will panic.
+func (t Token) Value() interface{} {
+	switch t.Type {
+	case BOOL:
+		if t.Text == "true" {
+			return true
+		} else if t.Text == "false" {
+			return false
+		}
+
+		panic("unknown bool value: " + t.Text)
+	case FLOAT:
+		v, err := strconv.ParseFloat(t.Text, 64)
+		if err != nil {
+			panic(err)
+		}
+
+		return float64(v)
+	case NUMBER:
+		v, err := strconv.ParseInt(t.Text, 0, 64)
+		if err != nil {
+			panic(err)
+		}
+
+		return int64(v)
+	case IDENT:
+		return t.Text
+	case HEREDOC:
+		return unindentHeredoc(t.Text)
+	case STRING:
+		// Determine the Unquote method to use. If it came from JSON,
+		// then we need to use the built-in unquote since we have to
+		// escape interpolations there.
+		f := hclstrconv.Unquote
+		if t.JSON {
+			f = strconv.Unquote
+		}
+
+		// This case occurs if json null is used
+		if t.Text == "" {
+			return ""
+		}
+
+		v, err := f(t.Text)
+		if err != nil {
+			panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
+		}
+
+		return v
+	default:
+		panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
+	}
+}
+
+// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
+// and the content of a HEREDOC with the hanging indent removed if it is started with
+// a <<-, and the terminating line is at least as indented as the least indented line.
+func unindentHeredoc(heredoc string) string {
+	// We need to find the end of the marker
+	idx := strings.IndexByte(heredoc, '\n')
+	if idx == -1 {
+		panic("heredoc doesn't contain newline")
+	}
+
+	unindent := heredoc[2] == '-'
+
+	// We can optimize if the heredoc isn't marked for indentation
+	if !unindent {
+		return string(heredoc[idx+1 : len(heredoc)-idx+1])
+	}
+
+	// We need to unindent each line based on the indentation level of the marker
+	lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
+	whitespacePrefix := lines[len(lines)-1]
+
+	isIndented := true
+	for _, v := range lines {
+		if strings.HasPrefix(v, whitespacePrefix) {
+			continue
+		}
+
+		isIndented = false
+		break
+	}
+
+	// If all lines are not at least as indented as the terminating mark, return the
+	// heredoc as is, but trim the leading space from the marker on the final line.
+	if !isIndented {
+		return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
+	}
+
+	unindentedLines := make([]string, len(lines))
+	for k, v := range lines {
+		if k == len(lines)-1 {
+			unindentedLines[k] = ""
+			break
+		}
+
+		unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
+	}
+
+	return strings.Join(unindentedLines, "\n")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
new file mode 100644
index 0000000000000000000000000000000000000000..f652d6fe78e4e9aa9c2ae54de693e8d40a8b2780
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
@@ -0,0 +1,117 @@
+package parser
+
+import "github.com/hashicorp/hcl/hcl/ast"
+
+// flattenObjects takes an AST node, walks it, and flattens
+func flattenObjects(node ast.Node) {
+	ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
+		// We only care about lists, because this is what we modify
+		list, ok := n.(*ast.ObjectList)
+		if !ok {
+			return n, true
+		}
+
+		// Rebuild the item list
+		items := make([]*ast.ObjectItem, 0, len(list.Items))
+		frontier := make([]*ast.ObjectItem, len(list.Items))
+		copy(frontier, list.Items)
+		for len(frontier) > 0 {
+			// Pop the current item
+			n := len(frontier)
+			item := frontier[n-1]
+			frontier = frontier[:n-1]
+
+			switch v := item.Val.(type) {
+			case *ast.ObjectType:
+				items, frontier = flattenObjectType(v, item, items, frontier)
+			case *ast.ListType:
+				items, frontier = flattenListType(v, item, items, frontier)
+			default:
+				items = append(items, item)
+			}
+		}
+
+		// Reverse the list since the frontier model runs things backwards
+		for i := len(items)/2 - 1; i >= 0; i-- {
+			opp := len(items) - 1 - i
+			items[i], items[opp] = items[opp], items[i]
+		}
+
+		// Done! Set the original items
+		list.Items = items
+		return n, true
+	})
+}
+
+func flattenListType(
+	ot *ast.ListType,
+	item *ast.ObjectItem,
+	items []*ast.ObjectItem,
+	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+	// If the list is empty, keep the original list
+	if len(ot.List) == 0 {
+		items = append(items, item)
+		return items, frontier
+	}
+
+	// All the elements of this object must also be objects!
+	for _, subitem := range ot.List {
+		if _, ok := subitem.(*ast.ObjectType); !ok {
+			items = append(items, item)
+			return items, frontier
+		}
+	}
+
+	// Great! We have a match go through all the items and flatten
+	for _, elem := range ot.List {
+		// Add it to the frontier so that we can recurse
+		frontier = append(frontier, &ast.ObjectItem{
+			Keys:        item.Keys,
+			Assign:      item.Assign,
+			Val:         elem,
+			LeadComment: item.LeadComment,
+			LineComment: item.LineComment,
+		})
+	}
+
+	return items, frontier
+}
+
+func flattenObjectType(
+	ot *ast.ObjectType,
+	item *ast.ObjectItem,
+	items []*ast.ObjectItem,
+	frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+	// If the list has no items we do not have to flatten anything
+	if ot.List.Items == nil {
+		items = append(items, item)
+		return items, frontier
+	}
+
+	// All the elements of this object must also be objects!
+	for _, subitem := range ot.List.Items {
+		if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+			items = append(items, item)
+			return items, frontier
+		}
+	}
+
+	// Great! We have a match go through all the items and flatten
+	for _, subitem := range ot.List.Items {
+		// Copy the new key
+		keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+		copy(keys, item.Keys)
+		copy(keys[len(item.Keys):], subitem.Keys)
+
+		// Add it to the frontier so that we can recurse
+		frontier = append(frontier, &ast.ObjectItem{
+			Keys:        keys,
+			Assign:      item.Assign,
+			Val:         subitem.Val,
+			LeadComment: item.LeadComment,
+			LineComment: item.LineComment,
+		})
+	}
+
+	return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..125a5f07298c293260c1140aeea31220ff41486d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	hcltoken "github.com/hashicorp/hcl/hcl/token"
+	"github.com/hashicorp/hcl/json/scanner"
+	"github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+	sc *scanner.Scanner
+
+	// Last read token
+	tok       token.Token
+	commaPrev token.Token
+
+	enableTrace bool
+	indent      int
+	n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+	return &Parser{
+		sc: scanner.New(src),
+	}
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+	p := newParser(src)
+	return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+	f := &ast.File{}
+	var err, scerr error
+	p.sc.Error = func(pos token.Pos, msg string) {
+		scerr = fmt.Errorf("%s: %s", pos, msg)
+	}
+
+	// The root must be an object in JSON
+	object, err := p.object()
+	if scerr != nil {
+		return nil, scerr
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	// We make our final node an object list so it is more HCL compatible
+	f.Node = object.List
+
+	// Flatten it, which finds patterns and turns them into more HCL-like
+	// AST trees.
+	flattenObjects(f.Node)
+
+	return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+	defer un(trace(p, "ParseObjectList"))
+	node := &ast.ObjectList{}
+
+	for {
+		n, err := p.objectItem()
+		if err == errEofToken {
+			break // we are finished
+		}
+
+		// we don't return a nil node, because might want to use already
+		// collected items.
+		if err != nil {
+			return node, err
+		}
+
+		node.Add(n)
+
+		// Check for a followup comma. If it isn't a comma, then we're done
+		if tok := p.scan(); tok.Type != token.COMMA {
+			break
+		}
+	}
+
+	return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+	defer un(trace(p, "ParseObjectItem"))
+
+	keys, err := p.objectKey()
+	if err != nil {
+		return nil, err
+	}
+
+	o := &ast.ObjectItem{
+		Keys: keys,
+	}
+
+	switch p.tok.Type {
+	case token.COLON:
+		pos := p.tok.Pos
+		o.Assign = hcltoken.Pos{
+			Filename: pos.Filename,
+			Offset:   pos.Offset,
+			Line:     pos.Line,
+			Column:   pos.Column,
+		}
+
+		o.Val, err = p.objectValue()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+	keyCount := 0
+	keys := make([]*ast.ObjectKey, 0)
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.EOF:
+			return nil, errEofToken
+		case token.STRING:
+			keyCount++
+			keys = append(keys, &ast.ObjectKey{
+				Token: p.tok.HCLToken(),
+			})
+		case token.COLON:
+			// If we have a zero keycount it means that we never got
+			// an object key, i.e. `{ :`. This is a syntax error.
+			if keyCount == 0 {
+				return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+			}
+
+			// Done
+			return keys, nil
+		case token.ILLEGAL:
+			return nil, errors.New("illegal")
+		default:
+			return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+		}
+	}
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+	defer un(trace(p, "ParseObjectValue"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+		return p.literalType()
+	case token.LBRACE:
+		return p.objectType()
+	case token.LBRACK:
+		return p.listType()
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseType"))
+	tok := p.scan()
+
+	switch tok.Type {
+	case token.LBRACE:
+		return p.objectType()
+	case token.EOF:
+		return nil, errEofToken
+	}
+
+	return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+	defer un(trace(p, "ParseObjectType"))
+
+	// we assume that the currently scanned token is a LBRACE
+	o := &ast.ObjectType{}
+
+	l, err := p.objectList()
+
+	// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+	// not a RBRACE, it's an syntax error and we just return it.
+	if err != nil && p.tok.Type != token.RBRACE {
+		return nil, err
+	}
+
+	o.List = l
+	return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+	defer un(trace(p, "ParseListType"))
+
+	// we assume that the currently scanned token is a LBRACK
+	l := &ast.ListType{}
+
+	for {
+		tok := p.scan()
+		switch tok.Type {
+		case token.NUMBER, token.FLOAT, token.STRING:
+			node, err := p.literalType()
+			if err != nil {
+				return nil, err
+			}
+
+			l.Add(node)
+		case token.COMMA:
+			continue
+		case token.LBRACE:
+			node, err := p.objectType()
+			if err != nil {
+				return nil, err
+			}
+
+			l.Add(node)
+		case token.BOOL:
+			// TODO(arslan) should we support? not supported by HCL yet
+		case token.LBRACK:
+			// TODO(arslan) should we support nested lists? Even though it's
+			// written in README of HCL, it's not a part of the grammar
+			// (not defined in parse.y)
+		case token.RBRACK:
+			// finished
+			return l, nil
+		default:
+			return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+		}
+
+	}
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+	defer un(trace(p, "ParseLiteral"))
+
+	return &ast.LiteralType{
+		Token: p.tok.HCLToken(),
+	}, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+	// If we have a token on the buffer, then return it.
+	if p.n != 0 {
+		p.n = 0
+		return p.tok
+	}
+
+	p.tok = p.sc.Scan()
+	return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+	p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+	if !p.enableTrace {
+		return
+	}
+
+	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+	const n = len(dots)
+	fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+	i := 2 * p.indent
+	for i > n {
+		fmt.Print(dots)
+		i -= n
+	}
+	// i <= n
+	fmt.Print(dots[0:i])
+	fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+	p.printTrace(msg, "(")
+	p.indent++
+	return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+	p.indent--
+	p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe3f0f095026925817450d6feb9092a35fffb322
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+	buf *bytes.Buffer // Source buffer for advancing and scanning
+	src []byte        // Source buffer for immutable access
+
+	// Source Position
+	srcPos  token.Pos // current position
+	prevPos token.Pos // previous position, used for peek() method
+
+	lastCharLen int // length of last character in bytes
+	lastLineLen int // length of last line in characters (for correct column reporting)
+
+	tokStart int // token text start position
+	tokEnd   int // token text end  position
+
+	// Error is called for each error encountered. If no Error
+	// function is set, the error is reported to os.Stderr.
+	Error func(pos token.Pos, msg string)
+
+	// ErrorCount is incremented by one for each error encountered.
+	ErrorCount int
+
+	// tokPos is the start position of most recently scanned token; set by
+	// Scan. The Filename field is always left untouched by the Scanner.  If
+	// an error is reported (via Error) and Position is invalid, the scanner is
+	// not inside a token.
+	tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+	// even though we accept a src, we read from a io.Reader compatible type
+	// (*bytes.Buffer). So in the future we might easily change it to streaming
+	// read.
+	b := bytes.NewBuffer(src)
+	s := &Scanner{
+		buf: b,
+		src: src,
+	}
+
+	// srcPosition always starts with 1
+	s.srcPos.Line = 1
+	return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+	ch, size, err := s.buf.ReadRune()
+	if err != nil {
+		// advance for error reporting
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		return eof
+	}
+
+	if ch == utf8.RuneError && size == 1 {
+		s.srcPos.Column++
+		s.srcPos.Offset += size
+		s.lastCharLen = size
+		s.err("illegal UTF-8 encoding")
+		return ch
+	}
+
+	// remember last position
+	s.prevPos = s.srcPos
+
+	s.srcPos.Column++
+	s.lastCharLen = size
+	s.srcPos.Offset += size
+
+	if ch == '\n' {
+		s.srcPos.Line++
+		s.lastLineLen = s.srcPos.Column
+		s.srcPos.Column = 0
+	}
+
+	// debug
+	// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+	return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+	if err := s.buf.UnreadRune(); err != nil {
+		panic(err) // this is user fault, we should catch it
+	}
+	s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+	peek, _, err := s.buf.ReadRune()
+	if err != nil {
+		return eof
+	}
+
+	s.buf.UnreadRune()
+	return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+	ch := s.next()
+
+	// skip white space
+	for isWhitespace(ch) {
+		ch = s.next()
+	}
+
+	var tok token.Type
+
+	// token text markings
+	s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+	// token position, initial next() is moving the offset by one(size of rune
+	// actually), though we are interested with the starting point
+	s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+	if s.srcPos.Column > 0 {
+		// common case: last character was not a '\n'
+		s.tokPos.Line = s.srcPos.Line
+		s.tokPos.Column = s.srcPos.Column
+	} else {
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		s.tokPos.Line = s.srcPos.Line - 1
+		s.tokPos.Column = s.lastLineLen
+	}
+
+	switch {
+	case isLetter(ch):
+		lit := s.scanIdentifier()
+		if lit == "true" || lit == "false" {
+			tok = token.BOOL
+		} else if lit == "null" {
+			tok = token.NULL
+		} else {
+			s.err("illegal char")
+		}
+	case isDecimal(ch):
+		tok = s.scanNumber(ch)
+	default:
+		switch ch {
+		case eof:
+			tok = token.EOF
+		case '"':
+			tok = token.STRING
+			s.scanString()
+		case '.':
+			tok = token.PERIOD
+			ch = s.peek()
+			if isDecimal(ch) {
+				tok = token.FLOAT
+				ch = s.scanMantissa(ch)
+				ch = s.scanExponent(ch)
+			}
+		case '[':
+			tok = token.LBRACK
+		case ']':
+			tok = token.RBRACK
+		case '{':
+			tok = token.LBRACE
+		case '}':
+			tok = token.RBRACE
+		case ',':
+			tok = token.COMMA
+		case ':':
+			tok = token.COLON
+		case '-':
+			if isDecimal(s.peek()) {
+				ch := s.next()
+				tok = s.scanNumber(ch)
+			} else {
+				s.err("illegal char")
+			}
+		default:
+			s.err("illegal char: " + string(ch))
+		}
+	}
+
+	// finish token ending
+	s.tokEnd = s.srcPos.Offset
+
+	// create token literal
+	var tokenText string
+	if s.tokStart >= 0 {
+		tokenText = string(s.src[s.tokStart:s.tokEnd])
+	}
+	s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+	return token.Token{
+		Type: tok,
+		Pos:  s.tokPos,
+		Text: tokenText,
+	}
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+	zero := ch == '0'
+	pos := s.srcPos
+
+	s.scanMantissa(ch)
+	ch = s.next() // seek forward
+	if ch == 'e' || ch == 'E' {
+		ch = s.scanExponent(ch)
+		return token.FLOAT
+	}
+
+	if ch == '.' {
+		ch = s.scanFraction(ch)
+		if ch == 'e' || ch == 'E' {
+			ch = s.next()
+			ch = s.scanExponent(ch)
+		}
+		return token.FLOAT
+	}
+
+	if ch != eof {
+		s.unread()
+	}
+
+	// If we have a larger number and this is zero, error
+	if zero && pos != s.srcPos {
+		s.err("numbers cannot start with 0")
+	}
+
+	return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+	scanned := false
+	for isDecimal(ch) {
+		ch = s.next()
+		scanned = true
+	}
+
+	if scanned && ch != eof {
+		s.unread()
+	}
+	return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+	if ch == '.' {
+		ch = s.peek() // we peek just to see if we can move forward
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+	if ch == 'e' || ch == 'E' {
+		ch = s.next()
+		if ch == '-' || ch == '+' {
+			ch = s.next()
+		}
+		ch = s.scanMantissa(ch)
+	}
+	return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+	braces := 0
+	for {
+		// '"' opening already consumed
+		// read character after quote
+		ch := s.next()
+
+		if ch == '\n' || ch < 0 || ch == eof {
+			s.err("literal not terminated")
+			return
+		}
+
+		if ch == '"' {
+			break
+		}
+
+		// If we're going into a ${} then we can ignore quotes for awhile
+		if braces == 0 && ch == '$' && s.peek() == '{' {
+			braces++
+			s.next()
+		} else if braces > 0 && ch == '{' {
+			braces++
+		}
+		if braces > 0 && ch == '}' {
+			braces--
+		}
+
+		if ch == '\\' {
+			s.scanEscape()
+		}
+	}
+
+	return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+	// http://en.cppreference.com/w/cpp/language/escape
+	ch := s.next() // read character after '/'
+	switch ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+		// nothing to do
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		// octal notation
+		ch = s.scanDigits(ch, 8, 3)
+	case 'x':
+		// hexademical notation
+		ch = s.scanDigits(s.next(), 16, 2)
+	case 'u':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 4)
+	case 'U':
+		// universal character name
+		ch = s.scanDigits(s.next(), 16, 8)
+	default:
+		s.err("illegal char escape")
+	}
+	return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+	for n > 0 && digitVal(ch) < base {
+		ch = s.next()
+		n--
+	}
+	if n > 0 {
+		s.err("illegal char escape")
+	}
+
+	// we scanned all digits, put the last non digit char back
+	s.unread()
+	return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+	offs := s.srcPos.Offset - s.lastCharLen
+	ch := s.next()
+	for isLetter(ch) || isDigit(ch) || ch == '-' {
+		ch = s.next()
+	}
+
+	if ch != eof {
+		s.unread() // we got identifier, put back latest char
+	}
+
+	return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+	pos.Offset = s.srcPos.Offset - s.lastCharLen
+	switch {
+	case s.srcPos.Column > 0:
+		// common case: last character was not a '\n'
+		pos.Line = s.srcPos.Line
+		pos.Column = s.srcPos.Column
+	case s.lastLineLen > 0:
+		// last character was a '\n'
+		// (we cannot be at the beginning of the source
+		// since we have called next() at least once)
+		pos.Line = s.srcPos.Line - 1
+		pos.Column = s.lastLineLen
+	default:
+		// at the beginning of the source
+		pos.Line = 1
+		pos.Column = 1
+	}
+	return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+	s.ErrorCount++
+	pos := s.recentPosition()
+
+	if s.Error != nil {
+		s.Error(pos, msg)
+		return
+	}
+
+	fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+	return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+	return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+	return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= ch && ch <= 'f':
+		return int(ch - 'a' + 10)
+	case 'A' <= ch && ch <= 'F':
+		return int(ch - 'A' + 10)
+	}
+	return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 0000000000000000000000000000000000000000..59c1bb72d4a4ed0c773cc263f2e003d083b507fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+	Filename string // filename, if any
+	Offset   int    // offset, starting at 0
+	Line     int    // line number, starting at 1
+	Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//	file:line:column    valid position with file name
+//	line:column         valid position without file name
+//	file                invalid position with file name
+//	-                   invalid position without file name
+func (p Pos) String() string {
+	s := p.Filename
+	if p.IsValid() {
+		if s != "" {
+			s += ":"
+		}
+		s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+	}
+	if s == "" {
+		s = "-"
+	}
+	return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+	return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+	return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 0000000000000000000000000000000000000000..95a0c3eee653a0a666b16ffa8a8db565636595d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+	"fmt"
+	"strconv"
+
+	hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+	Type Type
+	Pos  Pos
+	Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+	// Special tokens
+	ILLEGAL Type = iota
+	EOF
+
+	identifier_beg
+	literal_beg
+	NUMBER // 12345
+	FLOAT  // 123.45
+	BOOL   // true,false
+	STRING // "abc"
+	NULL   // null
+	literal_end
+	identifier_end
+
+	operator_beg
+	LBRACK // [
+	LBRACE // {
+	COMMA  // ,
+	PERIOD // .
+	COLON  // :
+
+	RBRACK // ]
+	RBRACE // }
+
+	operator_end
+)
+
+var tokens = [...]string{
+	ILLEGAL: "ILLEGAL",
+
+	EOF: "EOF",
+
+	NUMBER: "NUMBER",
+	FLOAT:  "FLOAT",
+	BOOL:   "BOOL",
+	STRING: "STRING",
+	NULL:   "NULL",
+
+	LBRACK: "LBRACK",
+	LBRACE: "LBRACE",
+	COMMA:  "COMMA",
+	PERIOD: "PERIOD",
+	COLON:  "COLON",
+
+	RBRACK: "RBRACK",
+	RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+	s := ""
+	if 0 <= t && t < Type(len(tokens)) {
+		s = tokens[t]
+	}
+	if s == "" {
+		s = "token(" + strconv.Itoa(int(t)) + ")"
+	}
+	return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+	return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+	switch t.Type {
+	case BOOL:
+		return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+	case FLOAT:
+		return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+	case NULL:
+		return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+	case NUMBER:
+		return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+	case STRING:
+		return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+	default:
+		panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+	}
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 0000000000000000000000000000000000000000..d9993c2928a5f8d09236751bcff3ba8f19f80ce4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+	"unicode"
+	"unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+	lexModeUnknown lexModeValue = iota
+	lexModeHcl
+	lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+	var (
+		r      rune
+		w      int
+		offset int
+	)
+
+	for {
+		r, w = utf8.DecodeRune(v[offset:])
+		offset += w
+		if unicode.IsSpace(r) {
+			continue
+		}
+		if r == '{' {
+			return lexModeJson
+		}
+		break
+	}
+
+	return lexModeHcl
+}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 0000000000000000000000000000000000000000..1fca53c4cee2a727fba5fcc5e5c7d1d94411847d
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+	"fmt"
+
+	"github.com/hashicorp/hcl/hcl/ast"
+	hclParser "github.com/hashicorp/hcl/hcl/parser"
+	jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+	return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+	return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+	switch lexMode(in) {
+	case lexModeHcl:
+		return hclParser.Parse(in)
+	case lexModeJson:
+		return jsonParser.Parse(in)
+	}
+
+	return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+	return parse([]byte(input))
+}
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
new file mode 100644
index 0000000000000000000000000000000000000000..b0d98d81de486aa41ed6b06bc003e61c8c9af2c1
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -0,0 +1,104 @@
+## Changelog
+
+### [1.7.4](https://github.com/magiconair/properties/tree/v1.7.4) - 31 Oct 2017
+
+ * [Issue #23](https://github.com/magiconair/properties/issues/23): Ignore blank lines with whitespaces
+ * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled
+   Thanks to @mgurov for the fix.
+
+### [1.7.3](https://github.com/magiconair/properties/tree/v1.7.3) - 10 Jul 2017
+
+ * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically
+ * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map
+
+### [1.7.2](https://github.com/magiconair/properties/tree/v1.7.2) - 20 Mar 2017
+
+ * [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency
+ * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc)
+
+### [1.7.1](https://github.com/magiconair/properties/tree/v1.7.1) - 13 Jan 2017
+
+ * [Issue #14](https://github.com/magiconair/properties/issues/14): Decouple TestLoadExpandedFile from `$USER`
+ * [PR #12](https://github.com/magiconair/properties/pull/12): Load from files and URLs
+ * [PR #16](https://github.com/magiconair/properties/pull/16): Keep gofmt happy
+ * [PR #18](https://github.com/magiconair/properties/pull/18): Fix Delete() function
+
+### [1.7.0](https://github.com/magiconair/properties/tree/v1.7.0) - 20 Mar 2016
+
+ * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL.
+ * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string.
+ * [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe)
+
+### [1.6.0](https://github.com/magiconair/properties/tree/v1.6.0) - 11 Dec 2015
+
+ * Add [Decode](http://godoc.org/github.com/magiconair/properties#Properties.Decode) method to populate struct from properties via tags.
+
+### [1.5.6](https://github.com/magiconair/properties/tree/v1.5.6) - 18 Oct 2015
+
+ * Vendored in gopkg.in/check.v1
+
+### [1.5.5](https://github.com/magiconair/properties/tree/v1.5.5) - 31 Jul 2015
+
+ * [PR #6](https://github.com/magiconair/properties/pull/6): Add [Delete](http://godoc.org/github.com/magiconair/properties#Properties.Delete) method to remove keys including comments. (@gerbenjacobs)
+
+### [1.5.4](https://github.com/magiconair/properties/tree/v1.5.4) - 23 Jun 2015
+
+ * [Issue #5](https://github.com/magiconair/properties/issues/5): Allow disabling of property expansion [DisableExpansion](http://godoc.org/github.com/magiconair/properties#Properties.DisableExpansion). When property expansion is disabled Properties become a simple key/value store and don't check for circular references.
+
+### [1.5.3](https://github.com/magiconair/properties/tree/v1.5.3) - 02 Jun 2015
+
+ * [Issue #4](https://github.com/magiconair/properties/issues/4): Maintain key order in [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) and [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp)
+
+### [1.5.2](https://github.com/magiconair/properties/tree/v1.5.2) - 10 Apr 2015
+
+ * [Issue #3](https://github.com/magiconair/properties/issues/3): Don't print comments in [WriteComment()](http://godoc.org/github.com/magiconair/properties#Properties.WriteComment) if they are all empty
+ * Add clickable links to README
+
+### [1.5.1](https://github.com/magiconair/properties/tree/v1.5.1) - 08 Dec 2014
+
+ * Added [GetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.GetParsedDuration) and [MustGetParsedDuration()](http://godoc.org/github.com/magiconair/properties#Properties.MustGetParsedDuration) for values specified compatible with
+   [time.ParseDuration()](http://golang.org/pkg/time/#ParseDuration).
+
+### [1.5.0](https://github.com/magiconair/properties/tree/v1.5.0) - 18 Nov 2014
+
+ * Added support for single and multi-line comments (reading, writing and updating)
+ * The order of keys is now preserved
+ * Calling [Set()](http://godoc.org/github.com/magiconair/properties#Properties.Set) with an empty key now silently ignores the call and does not create a new entry
+ * Added a [MustSet()](http://godoc.org/github.com/magiconair/properties#Properties.MustSet) method
+ * Migrated test library from launchpad.net/gocheck to [gopkg.in/check.v1](http://gopkg.in/check.v1)
+
+### [1.4.2](https://github.com/magiconair/properties/tree/v1.4.2) - 15 Nov 2014
+
+ * [Issue #2](https://github.com/magiconair/properties/issues/2): Fixed goroutine leak in parser which created two lexers but cleaned up only one
+
+### [1.4.1](https://github.com/magiconair/properties/tree/v1.4.1) - 13 Nov 2014
+
+ * [Issue #1](https://github.com/magiconair/properties/issues/1): Fixed bug in Keys() method which returned an empty string
+
+### [1.4.0](https://github.com/magiconair/properties/tree/v1.4.0) - 23 Sep 2014
+
+ * Added [Keys()](http://godoc.org/github.com/magiconair/properties#Properties.Keys) to get the keys
+ * Added [Filter()](http://godoc.org/github.com/magiconair/properties#Properties.Filter), [FilterRegexp()](http://godoc.org/github.com/magiconair/properties#Properties.FilterRegexp) and [FilterPrefix()](http://godoc.org/github.com/magiconair/properties#Properties.FilterPrefix) to get a subset of the properties
+
+### [1.3.0](https://github.com/magiconair/properties/tree/v1.3.0) - 18 Mar 2014
+
+* Added support for time.Duration
+* Made MustXXX() failure beha[ior configurable (log.Fatal, panic](https://github.com/magiconair/properties/tree/vior configurable (log.Fatal, panic) - custom)
+* Changed default of MustXXX() failure from panic to log.Fatal
+
+### [1.2.0](https://github.com/magiconair/properties/tree/v1.2.0) - 05 Mar 2014
+
+* Added MustGet... functions
+* Added support for int and uint with range checks on 32 bit platforms
+
+### [1.1.0](https://github.com/magiconair/properties/tree/v1.1.0) - 20 Jan 2014
+
+* Renamed from goproperties to properties
+* Added support for expansion of environment vars in
+  filenames and value expressions
+* Fixed bug where value expressions were not at the
+  start of the string
+
+### [1.0.0](https://github.com/magiconair/properties/tree/v1.0.0) - 7 Jan 2014
+
+* Initial release
diff --git a/vendor/github.com/magiconair/properties/LICENSE b/vendor/github.com/magiconair/properties/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..7eab43b6bf9dcf3fff2279bbfc3ea26e44ddded3
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/LICENSE
@@ -0,0 +1,25 @@
+goproperties - properties file decoder for Go
+
+Copyright (c) 2013-2014 - Frank Schroeder
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..258fbb2b586958921cf61bdbf16dc05b4cc9b2e9
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/README.md
@@ -0,0 +1,100 @@
+Overview [![Build Status](https://travis-ci.org/magiconair/properties.svg?branch=master)](https://travis-ci.org/magiconair/properties)
+========
+
+#### Current version: 1.7.4
+
+properties is a Go library for reading and writing properties files.
+
+It supports reading from multiple files or URLs and Spring style recursive
+property expansion of expressions like `${key}` to their corresponding value.
+Value expressions can refer to other keys like in `${key}` or to environment
+variables like in `${USER}`.  Filenames can also contain environment variables
+like in `/home/${USER}/myapp.properties`.
+
+Properties can be decoded into structs, maps, arrays and values through
+struct tags.
+
+Comments and the order of keys are preserved. Comments can be modified
+and can be written to the output.
+
+The properties library supports both ISO-8859-1 and UTF-8 encoded data.
+
+Starting from version 1.3.0 the behavior of the MustXXX() functions is
+configurable by providing a custom `ErrorHandler` function. The default has
+changed from `panic` to `log.Fatal` but this is configurable and custom
+error handling functions can be provided. See the package documentation for
+details.
+
+Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties)   [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties)
+
+Getting Started
+---------------
+
+```go
+import (
+	"flag"
+	"github.com/magiconair/properties"
+)
+
+func main() {
+	// init from a file
+	p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
+
+	// or multiple files
+	p = properties.MustLoadFiles([]string{
+			"${HOME}/config.properties",
+			"${HOME}/config-${USER}.properties",
+		}, properties.UTF8, true)
+
+	// or from a map
+	p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"})
+
+	// or from a string
+	p = properties.MustLoadString("key=value\nabc=def")
+
+	// or from a URL
+	p = properties.MustLoadURL("http://host/path")
+
+	// or from multiple URLs
+	p = properties.MustLoadURL([]string{
+			"http://host/config",
+			"http://host/config-${USER}",
+		}, true)
+
+	// or from flags
+	p.MustFlag(flag.CommandLine)
+
+	// get values through getters
+	host := p.MustGetString("host")
+	port := p.GetInt("port", 8080)
+
+	// or through Decode
+	type Config struct {
+		Host    string        `properties:"host"`
+		Port    int           `properties:"port,default=9000"`
+		Accept  []string      `properties:"accept,default=image/png;image;gif"`
+		Timeout time.Duration `properties:"timeout,default=5s"`
+	}
+	var cfg Config
+	if err := p.Decode(&cfg); err != nil {
+		log.Fatal(err)
+	}
+}
+
+```
+
+Installation and Upgrade
+------------------------
+
+```
+$ go get -u github.com/magiconair/properties
+```
+
+License
+-------
+
+2 clause BSD license. See [LICENSE](https://github.com/magiconair/properties/blob/master/LICENSE) file for details.
+
+ToDo
+----
+* Dump contents with passwords and secrets obscured
diff --git a/vendor/github.com/magiconair/properties/decode.go b/vendor/github.com/magiconair/properties/decode.go
new file mode 100644
index 0000000000000000000000000000000000000000..0a961bb0443173d33c1261a5a262bb4f37e4baa3
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/decode.go
@@ -0,0 +1,289 @@
+// Copyright 2017 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Decode assigns property values to exported fields of a struct.
+//
+// Decode traverses v recursively and returns an error if a value cannot be
+// converted to the field type or a required value is missing for a field.
+//
+// The following type dependent decodings are used:
+//
+// String, boolean, numeric fields have the value of the property key assigned.
+// The property key name is the name of the field. A different key and a default
+// value can be set in the field's tag. Fields without default value are
+// required. If the value cannot be converted to the field type an error is
+// returned.
+//
+// time.Duration fields have the result of time.ParseDuration() assigned.
+//
+// time.Time fields have the vaule of time.Parse() assigned. The default layout
+// is time.RFC3339 but can be set in the field's tag.
+//
+// Arrays and slices of string, boolean, numeric, time.Duration and time.Time
+// fields have the value interpreted as a comma separated list of values. The
+// individual values are trimmed of whitespace and empty values are ignored. A
+// default value can be provided as a semicolon separated list in the field's
+// tag.
+//
+// Struct fields are decoded recursively using the field name plus "." as
+// prefix. The prefix (without dot) can be overridden in the field's tag.
+// Default values are not supported in the field's tag. Specify them on the
+// fields of the inner struct instead.
+//
+// Map fields must have a key of type string and are decoded recursively by
+// using the field's name plus ".' as prefix and the next element of the key
+// name as map key. The prefix (without dot) can be overridden in the field's
+// tag. Default values are not supported.
+//
+// Examples:
+//
+//     // Field is ignored.
+//     Field int `properties:"-"`
+//
+//     // Field is assigned value of 'Field'.
+//     Field int
+//
+//     // Field is assigned value of 'myName'.
+//     Field int `properties:"myName"`
+//
+//     // Field is assigned value of key 'myName' and has a default
+//     // value 15 if the key does not exist.
+//     Field int `properties:"myName,default=15"`
+//
+//     // Field is assigned value of key 'Field' and has a default
+//     // value 15 if the key does not exist.
+//     Field int `properties:",default=15"`
+//
+//     // Field is assigned value of key 'date' and the date
+//     // is in format 2006-01-02
+//     Field time.Time `properties:"date,layout=2006-01-02"`
+//
+//     // Field is assigned the non-empty and whitespace trimmed
+//     // values of key 'Field' split by commas.
+//     Field []string
+//
+//     // Field is assigned the non-empty and whitespace trimmed
+//     // values of key 'Field' split by commas and has a default
+//     // value ["a", "b", "c"] if the key does not exist.
+//     Field []string `properties:",default=a;b;c"`
+//
+//     // Field is decoded recursively with "Field." as key prefix.
+//     Field SomeStruct
+//
+//     // Field is decoded recursively with "myName." as key prefix.
+//     Field SomeStruct `properties:"myName"`
+//
+//     // Field is decoded recursively with "Field." as key prefix
+//     // and the next dotted element of the key as map key.
+//     Field map[string]string
+//
+//     // Field is decoded recursively with "myName." as key prefix
+//     // and the next dotted element of the key as map key.
+//     Field map[string]string `properties:"myName"`
+func (p *Properties) Decode(x interface{}) error {
+	t, v := reflect.TypeOf(x), reflect.ValueOf(x)
+	if t.Kind() != reflect.Ptr || v.Elem().Type().Kind() != reflect.Struct {
+		return fmt.Errorf("not a pointer to struct: %s", t)
+	}
+	if err := dec(p, "", nil, nil, v); err != nil {
+		return err
+	}
+	return nil
+}
+
+func dec(p *Properties, key string, def *string, opts map[string]string, v reflect.Value) error {
+	t := v.Type()
+
+	// value returns the property value for key or the default if provided.
+	value := func() (string, error) {
+		if val, ok := p.Get(key); ok {
+			return val, nil
+		}
+		if def != nil {
+			return *def, nil
+		}
+		return "", fmt.Errorf("missing required key %s", key)
+	}
+
+	// conv converts a string to a value of the given type.
+	conv := func(s string, t reflect.Type) (val reflect.Value, err error) {
+		var v interface{}
+
+		switch {
+		case isDuration(t):
+			v, err = time.ParseDuration(s)
+
+		case isTime(t):
+			layout := opts["layout"]
+			if layout == "" {
+				layout = time.RFC3339
+			}
+			v, err = time.Parse(layout, s)
+
+		case isBool(t):
+			v, err = boolVal(s), nil
+
+		case isString(t):
+			v, err = s, nil
+
+		case isFloat(t):
+			v, err = strconv.ParseFloat(s, 64)
+
+		case isInt(t):
+			v, err = strconv.ParseInt(s, 10, 64)
+
+		case isUint(t):
+			v, err = strconv.ParseUint(s, 10, 64)
+
+		default:
+			return reflect.Zero(t), fmt.Errorf("unsupported type %s", t)
+		}
+		if err != nil {
+			return reflect.Zero(t), err
+		}
+		return reflect.ValueOf(v).Convert(t), nil
+	}
+
+	// keydef returns the property key and the default value based on the
+	// name of the struct field and the options in the tag.
+	keydef := func(f reflect.StructField) (string, *string, map[string]string) {
+		_key, _opts := parseTag(f.Tag.Get("properties"))
+
+		var _def *string
+		if d, ok := _opts["default"]; ok {
+			_def = &d
+		}
+		if _key != "" {
+			return _key, _def, _opts
+		}
+		return f.Name, _def, _opts
+	}
+
+	switch {
+	case isDuration(t) || isTime(t) || isBool(t) || isString(t) || isFloat(t) || isInt(t) || isUint(t):
+		s, err := value()
+		if err != nil {
+			return err
+		}
+		val, err := conv(s, t)
+		if err != nil {
+			return err
+		}
+		v.Set(val)
+
+	case isPtr(t):
+		return dec(p, key, def, opts, v.Elem())
+
+	case isStruct(t):
+		for i := 0; i < v.NumField(); i++ {
+			fv := v.Field(i)
+			fk, def, opts := keydef(t.Field(i))
+			if !fv.CanSet() {
+				return fmt.Errorf("cannot set %s", t.Field(i).Name)
+			}
+			if fk == "-" {
+				continue
+			}
+			if key != "" {
+				fk = key + "." + fk
+			}
+			if err := dec(p, fk, def, opts, fv); err != nil {
+				return err
+			}
+		}
+		return nil
+
+	case isArray(t):
+		val, err := value()
+		if err != nil {
+			return err
+		}
+		vals := split(val, ";")
+		a := reflect.MakeSlice(t, 0, len(vals))
+		for _, s := range vals {
+			val, err := conv(s, t.Elem())
+			if err != nil {
+				return err
+			}
+			a = reflect.Append(a, val)
+		}
+		v.Set(a)
+
+	case isMap(t):
+		valT := t.Elem()
+		m := reflect.MakeMap(t)
+		for postfix := range p.FilterStripPrefix(key + ".").m {
+			pp := strings.SplitN(postfix, ".", 2)
+			mk, mv := pp[0], reflect.New(valT)
+			if err := dec(p, key+"."+mk, nil, nil, mv); err != nil {
+				return err
+			}
+			m.SetMapIndex(reflect.ValueOf(mk), mv.Elem())
+		}
+		v.Set(m)
+
+	default:
+		return fmt.Errorf("unsupported type %s", t)
+	}
+	return nil
+}
+
+// split splits a string on sep, trims whitespace of elements
+// and omits empty elements
+func split(s string, sep string) []string {
+	var a []string
+	for _, v := range strings.Split(s, sep) {
+		if v = strings.TrimSpace(v); v != "" {
+			a = append(a, v)
+		}
+	}
+	return a
+}
+
+// parseTag parses a "key,k=v,k=v,..."
+func parseTag(tag string) (key string, opts map[string]string) {
+	opts = map[string]string{}
+	for i, s := range strings.Split(tag, ",") {
+		if i == 0 {
+			key = s
+			continue
+		}
+
+		pp := strings.SplitN(s, "=", 2)
+		if len(pp) == 1 {
+			opts[pp[0]] = ""
+		} else {
+			opts[pp[0]] = pp[1]
+		}
+	}
+	return key, opts
+}
+
+func isArray(t reflect.Type) bool    { return t.Kind() == reflect.Array || t.Kind() == reflect.Slice }
+func isBool(t reflect.Type) bool     { return t.Kind() == reflect.Bool }
+func isDuration(t reflect.Type) bool { return t == reflect.TypeOf(time.Second) }
+func isMap(t reflect.Type) bool      { return t.Kind() == reflect.Map }
+func isPtr(t reflect.Type) bool      { return t.Kind() == reflect.Ptr }
+func isString(t reflect.Type) bool   { return t.Kind() == reflect.String }
+func isStruct(t reflect.Type) bool   { return t.Kind() == reflect.Struct }
+func isTime(t reflect.Type) bool     { return t == reflect.TypeOf(time.Time{}) }
+func isFloat(t reflect.Type) bool {
+	return t.Kind() == reflect.Float32 || t.Kind() == reflect.Float64
+}
+func isInt(t reflect.Type) bool {
+	return t.Kind() == reflect.Int || t.Kind() == reflect.Int8 || t.Kind() == reflect.Int16 || t.Kind() == reflect.Int32 || t.Kind() == reflect.Int64
+}
+func isUint(t reflect.Type) bool {
+	return t.Kind() == reflect.Uint || t.Kind() == reflect.Uint8 || t.Kind() == reflect.Uint16 || t.Kind() == reflect.Uint32 || t.Kind() == reflect.Uint64
+}
diff --git a/vendor/github.com/magiconair/properties/doc.go b/vendor/github.com/magiconair/properties/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..36c8368089fa281cb1ea5964ecf69ad62c8325a5
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/doc.go
@@ -0,0 +1,156 @@
+// Copyright 2017 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package properties provides functions for reading and writing
+// ISO-8859-1 and UTF-8 encoded .properties files and has
+// support for recursive property expansion.
+//
+// Java properties files are ISO-8859-1 encoded and use Unicode
+// literals for characters outside the ISO character set. Unicode
+// literals can be used in UTF-8 encoded properties files but
+// aren't necessary.
+//
+// To load a single properties file use MustLoadFile():
+//
+//   p := properties.MustLoadFile(filename, properties.UTF8)
+//
+// To load multiple properties files use MustLoadFiles()
+// which loads the files in the given order and merges the
+// result. Missing properties files can be ignored if the
+// 'ignoreMissing' flag is set to true.
+//
+// Filenames can contain environment variables which are expanded
+// before loading.
+//
+//   f1 := "/etc/myapp/myapp.conf"
+//   f2 := "/home/${USER}/myapp.conf"
+//   p := MustLoadFiles([]string{f1, f2}, properties.UTF8, true)
+//
+// All of the different key/value delimiters ' ', ':' and '=' are
+// supported as well as the comment characters '!' and '#' and
+// multi-line values.
+//
+//   ! this is a comment
+//   # and so is this
+//
+//   # the following expressions are equal
+//   key value
+//   key=value
+//   key:value
+//   key = value
+//   key : value
+//   key = val\
+//         ue
+//
+// Properties stores all comments preceding a key and provides
+// GetComments() and SetComments() methods to retrieve and
+// update them. The convenience functions GetComment() and
+// SetComment() allow access to the last comment. The
+// WriteComment() method writes properties files including
+// the comments and with the keys in the original order.
+// This can be used for sanitizing properties files.
+//
+// Property expansion is recursive and circular references
+// and malformed expressions are not allowed and cause an
+// error. Expansion of environment variables is supported.
+//
+//   # standard property
+//   key = value
+//
+//   # property expansion: key2 = value
+//   key2 = ${key}
+//
+//   # recursive expansion: key3 = value
+//   key3 = ${key2}
+//
+//   # circular reference (error)
+//   key = ${key}
+//
+//   # malformed expression (error)
+//   key = ${ke
+//
+//   # refers to the users' home dir
+//   home = ${HOME}
+//
+//   # local key takes precendence over env var: u = foo
+//   USER = foo
+//   u = ${USER}
+//
+// The default property expansion format is ${key} but can be
+// changed by setting different pre- and postfix values on the
+// Properties object.
+//
+//   p := properties.NewProperties()
+//   p.Prefix = "#["
+//   p.Postfix = "]#"
+//
+// Properties provides convenience functions for getting typed
+// values with default values if the key does not exist or the
+// type conversion failed.
+//
+//   # Returns true if the value is either "1", "on", "yes" or "true"
+//   # Returns false for every other value and the default value if
+//   # the key does not exist.
+//   v = p.GetBool("key", false)
+//
+//   # Returns the value if the key exists and the format conversion
+//   # was successful. Otherwise, the default value is returned.
+//   v = p.GetInt64("key", 999)
+//   v = p.GetUint64("key", 999)
+//   v = p.GetFloat64("key", 123.0)
+//   v = p.GetString("key", "def")
+//   v = p.GetDuration("key", 999)
+//
+// As an alterantive properties may be applied with the standard
+// library's flag implementation at any time.
+//
+//   # Standard configuration
+//   v = flag.Int("key", 999, "help message")
+//   flag.Parse()
+//
+//   # Merge p into the flag set
+//   p.MustFlag(flag.CommandLine)
+//
+// Properties provides several MustXXX() convenience functions
+// which will terminate the app if an error occurs. The behavior
+// of the failure is configurable and the default is to call
+// log.Fatal(err). To have the MustXXX() functions panic instead
+// of logging the error set a different ErrorHandler before
+// you use the Properties package.
+//
+//   properties.ErrorHandler = properties.PanicHandler
+//
+//   # Will panic instead of logging an error
+//   p := properties.MustLoadFile("config.properties")
+//
+// You can also provide your own ErrorHandler function. The only requirement
+// is that the error handler function must exit after handling the error.
+//
+//   properties.ErrorHandler = func(err error) {
+//	     fmt.Println(err)
+//       os.Exit(1)
+//   }
+//
+//   # Will write to stdout and then exit
+//   p := properties.MustLoadFile("config.properties")
+//
+// Properties can also be loaded into a struct via the `Decode`
+// method, e.g.
+//
+//   type S struct {
+//       A string        `properties:"a,default=foo"`
+//       D time.Duration `properties:"timeout,default=5s"`
+//       E time.Time     `properties:"expires,layout=2006-01-02,default=2015-01-01"`
+//   }
+//
+// See `Decode()` method for the full documentation.
+//
+// The following documents provide a description of the properties
+// file format.
+//
+// http://en.wikipedia.org/wiki/.properties
+//
+// http://docs.oracle.com/javase/7/docs/api/java/util/Properties.html#load%28java.io.Reader%29
+//
+package properties
diff --git a/vendor/github.com/magiconair/properties/integrate.go b/vendor/github.com/magiconair/properties/integrate.go
new file mode 100644
index 0000000000000000000000000000000000000000..0d775e0350bf62555e07bcdd7cf0a6ddaf0f09ab
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/integrate.go
@@ -0,0 +1,34 @@
+// Copyright 2017 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import "flag"
+
+// MustFlag sets flags that are skipped by dst.Parse when p contains
+// the respective key for flag.Flag.Name.
+//
+// It's use is recommended with command line arguments as in:
+// 	flag.Parse()
+// 	p.MustFlag(flag.CommandLine)
+func (p *Properties) MustFlag(dst *flag.FlagSet) {
+	m := make(map[string]*flag.Flag)
+	dst.VisitAll(func(f *flag.Flag) {
+		m[f.Name] = f
+	})
+	dst.Visit(func(f *flag.Flag) {
+		delete(m, f.Name) // overridden
+	})
+
+	for name, f := range m {
+		v, ok := p.Get(name)
+		if !ok {
+			continue
+		}
+
+		if err := f.Value.Set(v); err != nil {
+			ErrorHandler(err)
+		}
+	}
+}
diff --git a/vendor/github.com/magiconair/properties/lex.go b/vendor/github.com/magiconair/properties/lex.go
new file mode 100644
index 0000000000000000000000000000000000000000..c63fcc60d7acc5e2bdda1a60f3ecdcc8aa076b6a
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/lex.go
@@ -0,0 +1,407 @@
+// Copyright 2017 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// Parts of the lexer are from the template/text/parser package
+// For these parts the following applies:
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file of the go 1.2
+// distribution.
+
+package properties
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// item represents a token or text string returned from the scanner.
+type item struct {
+	typ itemType // The type of this item.
+	pos int      // The starting position, in bytes, of this item in the input string.
+	val string   // The value of this item.
+}
+
+func (i item) String() string {
+	switch {
+	case i.typ == itemEOF:
+		return "EOF"
+	case i.typ == itemError:
+		return i.val
+	case len(i.val) > 10:
+		return fmt.Sprintf("%.10q...", i.val)
+	}
+	return fmt.Sprintf("%q", i.val)
+}
+
+// itemType identifies the type of lex items.
+type itemType int
+
+const (
+	itemError itemType = iota // error occurred; value is text of error
+	itemEOF
+	itemKey     // a key
+	itemValue   // a value
+	itemComment // a comment
+)
+
+// defines a constant for EOF
+const eof = -1
+
+// permitted whitespace characters space, FF and TAB
+const whitespace = " \f\t"
+
+// stateFn represents the state of the scanner as a function that returns the next state.
+type stateFn func(*lexer) stateFn
+
+// lexer holds the state of the scanner.
+type lexer struct {
+	input   string    // the string being scanned
+	state   stateFn   // the next lexing function to enter
+	pos     int       // current position in the input
+	start   int       // start position of this item
+	width   int       // width of last rune read from input
+	lastPos int       // position of most recent item returned by nextItem
+	runes   []rune    // scanned runes for this item
+	items   chan item // channel of scanned items
+}
+
+// next returns the next rune in the input.
+func (l *lexer) next() rune {
+	if l.pos >= len(l.input) {
+		l.width = 0
+		return eof
+	}
+	r, w := utf8.DecodeRuneInString(l.input[l.pos:])
+	l.width = w
+	l.pos += l.width
+	return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (l *lexer) peek() rune {
+	r := l.next()
+	l.backup()
+	return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (l *lexer) backup() {
+	l.pos -= l.width
+}
+
+// emit passes an item back to the client.
+func (l *lexer) emit(t itemType) {
+	i := item{t, l.start, string(l.runes)}
+	l.items <- i
+	l.start = l.pos
+	l.runes = l.runes[:0]
+}
+
+// ignore skips over the pending input before this point.
+func (l *lexer) ignore() {
+	l.start = l.pos
+}
+
+// appends the rune to the current value
+func (l *lexer) appendRune(r rune) {
+	l.runes = append(l.runes, r)
+}
+
+// accept consumes the next rune if it's from the valid set.
+func (l *lexer) accept(valid string) bool {
+	if strings.ContainsRune(valid, l.next()) {
+		return true
+	}
+	l.backup()
+	return false
+}
+
+// acceptRun consumes a run of runes from the valid set.
+func (l *lexer) acceptRun(valid string) {
+	for strings.ContainsRune(valid, l.next()) {
+	}
+	l.backup()
+}
+
+// acceptRunUntil consumes a run of runes up to a terminator.
+func (l *lexer) acceptRunUntil(term rune) {
+	for term != l.next() {
+	}
+	l.backup()
+}
+
+// hasText returns true if the current parsed text is not empty.
+func (l *lexer) isNotEmpty() bool {
+	return l.pos > l.start
+}
+
+// lineNumber reports which line we're on, based on the position of
+// the previous item returned by nextItem. Doing it this way
+// means we don't have to worry about peek double counting.
+func (l *lexer) lineNumber() int {
+	return 1 + strings.Count(l.input[:l.lastPos], "\n")
+}
+
+// errorf returns an error token and terminates the scan by passing
+// back a nil pointer that will be the next state, terminating l.nextItem.
+func (l *lexer) errorf(format string, args ...interface{}) stateFn {
+	l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
+	return nil
+}
+
+// nextItem returns the next item from the input.
+func (l *lexer) nextItem() item {
+	i := <-l.items
+	l.lastPos = i.pos
+	return i
+}
+
+// lex creates a new scanner for the input string.
+func lex(input string) *lexer {
+	l := &lexer{
+		input: input,
+		items: make(chan item),
+		runes: make([]rune, 0, 32),
+	}
+	go l.run()
+	return l
+}
+
+// run runs the state machine for the lexer.
+func (l *lexer) run() {
+	for l.state = lexBeforeKey(l); l.state != nil; {
+		l.state = l.state(l)
+	}
+}
+
+// state functions
+
+// lexBeforeKey scans until a key begins.
+func lexBeforeKey(l *lexer) stateFn {
+	switch r := l.next(); {
+	case isEOF(r):
+		l.emit(itemEOF)
+		return nil
+
+	case isEOL(r):
+		l.ignore()
+		return lexBeforeKey
+
+	case isComment(r):
+		return lexComment
+
+	case isWhitespace(r):
+		l.ignore()
+		return lexBeforeKey
+
+	default:
+		l.backup()
+		return lexKey
+	}
+}
+
+// lexComment scans a comment line. The comment character has already been scanned.
+func lexComment(l *lexer) stateFn {
+	l.acceptRun(whitespace)
+	l.ignore()
+	for {
+		switch r := l.next(); {
+		case isEOF(r):
+			l.ignore()
+			l.emit(itemEOF)
+			return nil
+		case isEOL(r):
+			l.emit(itemComment)
+			return lexBeforeKey
+		default:
+			l.appendRune(r)
+		}
+	}
+}
+
+// lexKey scans the key up to a delimiter
+func lexKey(l *lexer) stateFn {
+	var r rune
+
+Loop:
+	for {
+		switch r = l.next(); {
+
+		case isEscape(r):
+			err := l.scanEscapeSequence()
+			if err != nil {
+				return l.errorf(err.Error())
+			}
+
+		case isEndOfKey(r):
+			l.backup()
+			break Loop
+
+		case isEOF(r):
+			break Loop
+
+		default:
+			l.appendRune(r)
+		}
+	}
+
+	if len(l.runes) > 0 {
+		l.emit(itemKey)
+	}
+
+	if isEOF(r) {
+		l.emit(itemEOF)
+		return nil
+	}
+
+	return lexBeforeValue
+}
+
+// lexBeforeValue scans the delimiter between key and value.
+// Leading and trailing whitespace is ignored.
+// We expect to be just after the key.
+func lexBeforeValue(l *lexer) stateFn {
+	l.acceptRun(whitespace)
+	l.accept(":=")
+	l.acceptRun(whitespace)
+	l.ignore()
+	return lexValue
+}
+
+// lexValue scans text until the end of the line. We expect to be just after the delimiter.
+func lexValue(l *lexer) stateFn {
+	for {
+		switch r := l.next(); {
+		case isEscape(r):
+			if isEOL(l.peek()) {
+				l.next()
+				l.acceptRun(whitespace)
+			} else {
+				err := l.scanEscapeSequence()
+				if err != nil {
+					return l.errorf(err.Error())
+				}
+			}
+
+		case isEOL(r):
+			l.emit(itemValue)
+			l.ignore()
+			return lexBeforeKey
+
+		case isEOF(r):
+			l.emit(itemValue)
+			l.emit(itemEOF)
+			return nil
+
+		default:
+			l.appendRune(r)
+		}
+	}
+}
+
+// scanEscapeSequence scans either one of the escaped characters
+// or a unicode literal. We expect to be after the escape character.
+func (l *lexer) scanEscapeSequence() error {
+	switch r := l.next(); {
+
+	case isEscapedCharacter(r):
+		l.appendRune(decodeEscapedCharacter(r))
+		return nil
+
+	case atUnicodeLiteral(r):
+		return l.scanUnicodeLiteral()
+
+	case isEOF(r):
+		return fmt.Errorf("premature EOF")
+
+	// silently drop the escape character and append the rune as is
+	default:
+		l.appendRune(r)
+		return nil
+	}
+}
+
+// scans a unicode literal in the form \uXXXX. We expect to be after the \u.
+func (l *lexer) scanUnicodeLiteral() error {
+	// scan the digits
+	d := make([]rune, 4)
+	for i := 0; i < 4; i++ {
+		d[i] = l.next()
+		if d[i] == eof || !strings.ContainsRune("0123456789abcdefABCDEF", d[i]) {
+			return fmt.Errorf("invalid unicode literal")
+		}
+	}
+
+	// decode the digits into a rune
+	r, err := strconv.ParseInt(string(d), 16, 0)
+	if err != nil {
+		return err
+	}
+
+	l.appendRune(rune(r))
+	return nil
+}
+
+// decodeEscapedCharacter returns the unescaped rune. We expect to be after the escape character.
+func decodeEscapedCharacter(r rune) rune {
+	switch r {
+	case 'f':
+		return '\f'
+	case 'n':
+		return '\n'
+	case 'r':
+		return '\r'
+	case 't':
+		return '\t'
+	default:
+		return r
+	}
+}
+
+// atUnicodeLiteral reports whether we are at a unicode literal.
+// The escape character has already been consumed.
+func atUnicodeLiteral(r rune) bool {
+	return r == 'u'
+}
+
+// isComment reports whether we are at the start of a comment.
+func isComment(r rune) bool {
+	return r == '#' || r == '!'
+}
+
+// isEndOfKey reports whether the rune terminates the current key.
+func isEndOfKey(r rune) bool {
+	return strings.ContainsRune(" \f\t\r\n:=", r)
+}
+
+// isEOF reports whether we are at EOF.
+func isEOF(r rune) bool {
+	return r == eof
+}
+
+// isEOL reports whether we are at a new line character.
+func isEOL(r rune) bool {
+	return r == '\n' || r == '\r'
+}
+
+// isEscape reports whether the rune is the escape character which
+// prefixes unicode literals and other escaped characters.
+func isEscape(r rune) bool {
+	return r == '\\'
+}
+
+// isEscapedCharacter reports whether we are at one of the characters that need escaping.
+// The escape character has already been consumed.
+func isEscapedCharacter(r rune) bool {
+	return strings.ContainsRune(" :=fnrt", r)
+}
+
+// isWhitespace reports whether the rune is a whitespace character.
+func isWhitespace(r rune) bool {
+	return strings.ContainsRune(whitespace, r)
+}
diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go
new file mode 100644
index 0000000000000000000000000000000000000000..278cc2ea011d7675a74820acda6b7104af43902e
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/load.go
@@ -0,0 +1,241 @@
+// Copyright 2017 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strings"
+)
+
+// Encoding specifies encoding of the input data.
+type Encoding uint
+
+const (
+	// UTF8 interprets the input data as UTF-8.
+	UTF8 Encoding = 1 << iota
+
+	// ISO_8859_1 interprets the input data as ISO-8859-1.
+	ISO_8859_1
+)
+
+// Load reads a buffer into a Properties struct.
+func Load(buf []byte, enc Encoding) (*Properties, error) {
+	return loadBuf(buf, enc)
+}
+
+// LoadString reads an UTF8 string into a properties struct.
+func LoadString(s string) (*Properties, error) {
+	return loadBuf([]byte(s), UTF8)
+}
+
+// LoadMap creates a new Properties struct from a string map.
+func LoadMap(m map[string]string) *Properties {
+	p := NewProperties()
+	for k, v := range m {
+		p.Set(k, v)
+	}
+	return p
+}
+
+// LoadFile reads a file into a Properties struct.
+func LoadFile(filename string, enc Encoding) (*Properties, error) {
+	return loadAll([]string{filename}, enc, false)
+}
+
+// LoadFiles reads multiple files in the given order into
+// a Properties struct. If 'ignoreMissing' is true then
+// non-existent files will not be reported as error.
+func LoadFiles(filenames []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+	return loadAll(filenames, enc, ignoreMissing)
+}
+
+// LoadURL reads the content of the URL into a Properties struct.
+//
+// The encoding is determined via the Content-Type header which
+// should be set to 'text/plain'. If the 'charset' parameter is
+// missing, 'iso-8859-1' or 'latin1' the encoding is set to
+// ISO-8859-1. If the 'charset' parameter is set to 'utf-8' the
+// encoding is set to UTF-8. A missing content type header is
+// interpreted as 'text/plain; charset=utf-8'.
+func LoadURL(url string) (*Properties, error) {
+	return loadAll([]string{url}, UTF8, false)
+}
+
+// LoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code will
+// not be reported as error. See LoadURL for the Content-Type header
+// and the encoding.
+func LoadURLs(urls []string, ignoreMissing bool) (*Properties, error) {
+	return loadAll(urls, UTF8, ignoreMissing)
+}
+
+// LoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding.
+func LoadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+	return loadAll(names, enc, ignoreMissing)
+}
+
+// MustLoadString reads an UTF8 string into a Properties struct and
+// panics on error.
+func MustLoadString(s string) *Properties {
+	return must(LoadString(s))
+}
+
+// MustLoadFile reads a file into a Properties struct and
+// panics on error.
+func MustLoadFile(filename string, enc Encoding) *Properties {
+	return must(LoadFile(filename, enc))
+}
+
+// MustLoadFiles reads multiple files in the given order into
+// a Properties struct and panics on error. If 'ignoreMissing'
+// is true then non-existent files will not be reported as error.
+func MustLoadFiles(filenames []string, enc Encoding, ignoreMissing bool) *Properties {
+	return must(LoadFiles(filenames, enc, ignoreMissing))
+}
+
+// MustLoadURL reads the content of a URL into a Properties struct and
+// panics on error.
+func MustLoadURL(url string) *Properties {
+	return must(LoadURL(url))
+}
+
+// MustLoadURLs reads the content of multiple URLs in the given order into a
+// Properties struct and panics on error. If 'ignoreMissing' is true then a 404
+// status code will not be reported as error.
+func MustLoadURLs(urls []string, ignoreMissing bool) *Properties {
+	return must(LoadURLs(urls, ignoreMissing))
+}
+
+// MustLoadAll reads the content of multiple URLs or files in the given order into a
+// Properties struct. If 'ignoreMissing' is true then a 404 status code or missing file will
+// not be reported as error. Encoding sets the encoding for files. For the URLs please see
+// LoadURL for the Content-Type header and the encoding. It panics on error.
+func MustLoadAll(names []string, enc Encoding, ignoreMissing bool) *Properties {
+	return must(LoadAll(names, enc, ignoreMissing))
+}
+
+func loadBuf(buf []byte, enc Encoding) (*Properties, error) {
+	p, err := parse(convert(buf, enc))
+	if err != nil {
+		return nil, err
+	}
+	return p, p.check()
+}
+
+func loadAll(names []string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+	result := NewProperties()
+	for _, name := range names {
+		n, err := expandName(name)
+		if err != nil {
+			return nil, err
+		}
+		var p *Properties
+		if strings.HasPrefix(n, "http://") || strings.HasPrefix(n, "https://") {
+			p, err = loadURL(n, ignoreMissing)
+		} else {
+			p, err = loadFile(n, enc, ignoreMissing)
+		}
+		if err != nil {
+			return nil, err
+		}
+		result.Merge(p)
+
+	}
+	return result, result.check()
+}
+
+func loadFile(filename string, enc Encoding, ignoreMissing bool) (*Properties, error) {
+	data, err := ioutil.ReadFile(filename)
+	if err != nil {
+		if ignoreMissing && os.IsNotExist(err) {
+			LogPrintf("properties: %s not found. skipping", filename)
+			return NewProperties(), nil
+		}
+		return nil, err
+	}
+	p, err := parse(convert(data, enc))
+	if err != nil {
+		return nil, err
+	}
+	return p, nil
+}
+
+func loadURL(url string, ignoreMissing bool) (*Properties, error) {
+	resp, err := http.Get(url)
+	if err != nil {
+		return nil, fmt.Errorf("properties: error fetching %q. %s", url, err)
+	}
+	if resp.StatusCode == 404 && ignoreMissing {
+		LogPrintf("properties: %s returned %d. skipping", url, resp.StatusCode)
+		return NewProperties(), nil
+	}
+	if resp.StatusCode != 200 {
+		return nil, fmt.Errorf("properties: %s returned %d", url, resp.StatusCode)
+	}
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
+	}
+	if err = resp.Body.Close(); err != nil {
+		return nil, fmt.Errorf("properties: %s error reading response. %s", url, err)
+	}
+
+	ct := resp.Header.Get("Content-Type")
+	var enc Encoding
+	switch strings.ToLower(ct) {
+	case "text/plain", "text/plain; charset=iso-8859-1", "text/plain; charset=latin1":
+		enc = ISO_8859_1
+	case "", "text/plain; charset=utf-8":
+		enc = UTF8
+	default:
+		return nil, fmt.Errorf("properties: invalid content type %s", ct)
+	}
+
+	p, err := parse(convert(body, enc))
+	if err != nil {
+		return nil, err
+	}
+	return p, nil
+}
+
+func must(p *Properties, err error) *Properties {
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return p
+}
+
+// expandName expands ${ENV_VAR} expressions in a name.
+// If the environment variable does not exist then it will be replaced
+// with an empty string. Malformed expressions like "${ENV_VAR" will
+// be reported as error.
+func expandName(name string) (string, error) {
+	return expand(name, make(map[string]bool), "${", "}", make(map[string]string))
+}
+
+// Interprets a byte buffer either as an ISO-8859-1 or UTF-8 encoded string.
+// For ISO-8859-1 we can convert each byte straight into a rune since the
+// first 256 unicode code points cover ISO-8859-1.
+func convert(buf []byte, enc Encoding) string {
+	switch enc {
+	case UTF8:
+		return string(buf)
+	case ISO_8859_1:
+		runes := make([]rune, len(buf))
+		for i, b := range buf {
+			runes[i] = rune(b)
+		}
+		return string(runes)
+	default:
+		ErrorHandler(fmt.Errorf("unsupported encoding %v", enc))
+	}
+	panic("ErrorHandler should exit")
+}
diff --git a/vendor/github.com/magiconair/properties/parser.go b/vendor/github.com/magiconair/properties/parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..90f555cb93dc93eacaa911360b76fdb94754fd05
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/parser.go
@@ -0,0 +1,95 @@
+// Copyright 2017 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"runtime"
+)
+
+type parser struct {
+	lex *lexer
+}
+
+func parse(input string) (properties *Properties, err error) {
+	p := &parser{lex: lex(input)}
+	defer p.recover(&err)
+
+	properties = NewProperties()
+	key := ""
+	comments := []string{}
+
+	for {
+		token := p.expectOneOf(itemComment, itemKey, itemEOF)
+		switch token.typ {
+		case itemEOF:
+			goto done
+		case itemComment:
+			comments = append(comments, token.val)
+			continue
+		case itemKey:
+			key = token.val
+			if _, ok := properties.m[key]; !ok {
+				properties.k = append(properties.k, key)
+			}
+		}
+
+		token = p.expectOneOf(itemValue, itemEOF)
+		if len(comments) > 0 {
+			properties.c[key] = comments
+			comments = []string{}
+		}
+		switch token.typ {
+		case itemEOF:
+			properties.m[key] = ""
+			goto done
+		case itemValue:
+			properties.m[key] = token.val
+		}
+	}
+
+done:
+	return properties, nil
+}
+
+func (p *parser) errorf(format string, args ...interface{}) {
+	format = fmt.Sprintf("properties: Line %d: %s", p.lex.lineNumber(), format)
+	panic(fmt.Errorf(format, args...))
+}
+
+func (p *parser) expect(expected itemType) (token item) {
+	token = p.lex.nextItem()
+	if token.typ != expected {
+		p.unexpected(token)
+	}
+	return token
+}
+
+func (p *parser) expectOneOf(expected ...itemType) (token item) {
+	token = p.lex.nextItem()
+	for _, v := range expected {
+		if token.typ == v {
+			return token
+		}
+	}
+	p.unexpected(token)
+	panic("unexpected token")
+}
+
+func (p *parser) unexpected(token item) {
+	p.errorf(token.String())
+}
+
+// recover is the handler that turns panics into returns from the top level of Parse.
+func (p *parser) recover(errp *error) {
+	e := recover()
+	if e != nil {
+		if _, ok := e.(runtime.Error); ok {
+			panic(e)
+		}
+		*errp = e.(error)
+	}
+	return
+}
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
new file mode 100644
index 0000000000000000000000000000000000000000..85bb18618dfcffe57163c3164694f1148dab8bfe
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -0,0 +1,811 @@
+// Copyright 2017 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+// BUG(frank): Set() does not check for invalid unicode literals since this is currently handled by the lexer.
+// BUG(frank): Write() does not allow to configure the newline character. Therefore, on Windows LF is used.
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+// ErrorHandlerFunc defines the type of function which handles failures
+// of the MustXXX() functions. An error handler function must exit
+// the application after handling the error.
+type ErrorHandlerFunc func(error)
+
+// ErrorHandler is the function which handles failures of the MustXXX()
+// functions. The default is LogFatalHandler.
+var ErrorHandler ErrorHandlerFunc = LogFatalHandler
+
+// LogHandlerFunc defines the function prototype for logging errors.
+type LogHandlerFunc func(fmt string, args ...interface{})
+
+// LogPrintf defines a log handler which uses log.Printf.
+var LogPrintf LogHandlerFunc = log.Printf
+
+// LogFatalHandler handles the error by logging a fatal error and exiting.
+func LogFatalHandler(err error) {
+	log.Fatal(err)
+}
+
+// PanicHandler handles the error by panicking.
+func PanicHandler(err error) {
+	panic(err)
+}
+
+// -----------------------------------------------------------------------------
+
+// A Properties contains the key/value pairs from the properties input.
+// All values are stored in unexpanded form and are expanded at runtime
+type Properties struct {
+	// Pre-/Postfix for property expansion.
+	Prefix  string
+	Postfix string
+
+	// DisableExpansion controls the expansion of properties on Get()
+	// and the check for circular references on Set(). When set to
+	// true Properties behaves like a simple key/value store and does
+	// not check for circular references on Get() or on Set().
+	DisableExpansion bool
+
+	// Stores the key/value pairs
+	m map[string]string
+
+	// Stores the comments per key.
+	c map[string][]string
+
+	// Stores the keys in order of appearance.
+	k []string
+}
+
+// NewProperties creates a new Properties struct with the default
+// configuration for "${key}" expressions.
+func NewProperties() *Properties {
+	return &Properties{
+		Prefix:  "${",
+		Postfix: "}",
+		m:       map[string]string{},
+		c:       map[string][]string{},
+		k:       []string{},
+	}
+}
+
+// Get returns the expanded value for the given key if exists.
+// Otherwise, ok is false.
+func (p *Properties) Get(key string) (value string, ok bool) {
+	v, ok := p.m[key]
+	if p.DisableExpansion {
+		return v, ok
+	}
+	if !ok {
+		return "", false
+	}
+
+	expanded, err := p.expand(v)
+
+	// we guarantee that the expanded value is free of
+	// circular references and malformed expressions
+	// so we panic if we still get an error here.
+	if err != nil {
+		ErrorHandler(fmt.Errorf("%s in %q", err, key+" = "+v))
+	}
+
+	return expanded, true
+}
+
+// MustGet returns the expanded value for the given key if exists.
+// Otherwise, it panics.
+func (p *Properties) MustGet(key string) string {
+	if v, ok := p.Get(key); ok {
+		return v
+	}
+	ErrorHandler(invalidKeyError(key))
+	panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// ClearComments removes the comments for all keys.
+func (p *Properties) ClearComments() {
+	p.c = map[string][]string{}
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComment returns the last comment before the given key or an empty string.
+func (p *Properties) GetComment(key string) string {
+	comments, ok := p.c[key]
+	if !ok || len(comments) == 0 {
+		return ""
+	}
+	return comments[len(comments)-1]
+}
+
+// ----------------------------------------------------------------------------
+
+// GetComments returns all comments that appeared before the given key or nil.
+func (p *Properties) GetComments(key string) []string {
+	if comments, ok := p.c[key]; ok {
+		return comments
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComment sets the comment for the key.
+func (p *Properties) SetComment(key, comment string) {
+	p.c[key] = []string{comment}
+}
+
+// ----------------------------------------------------------------------------
+
+// SetComments sets the comments for the key. If the comments are nil then
+// all comments for this key are deleted.
+func (p *Properties) SetComments(key string, comments []string) {
+	if comments == nil {
+		delete(p.c, key)
+		return
+	}
+	p.c[key] = comments
+}
+
+// ----------------------------------------------------------------------------
+
+// GetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the default value is returned.
+func (p *Properties) GetBool(key string, def bool) bool {
+	v, err := p.getBool(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetBool checks if the expanded value is one of '1', 'yes',
+// 'true' or 'on' if the key exists. The comparison is case-insensitive.
+// If the key does not exist the function panics.
+func (p *Properties) MustGetBool(key string) bool {
+	v, err := p.getBool(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getBool(key string) (value bool, err error) {
+	if v, ok := p.Get(key); ok {
+		return boolVal(v), nil
+	}
+	return false, invalidKeyError(key)
+}
+
+func boolVal(v string) bool {
+	v = strings.ToLower(v)
+	return v == "1" || v == "true" || v == "yes" || v == "on"
+}
+
+// ----------------------------------------------------------------------------
+
+// GetDuration parses the expanded value as an time.Duration (in ns) if the
+// key exists. If key does not exist or the value cannot be parsed the default
+// value is returned. In almost all cases you want to use GetParsedDuration().
+func (p *Properties) GetDuration(key string, def time.Duration) time.Duration {
+	v, err := p.getInt64(key)
+	if err != nil {
+		return def
+	}
+	return time.Duration(v)
+}
+
+// MustGetDuration parses the expanded value as an time.Duration (in ns) if
+// the key exists. If key does not exist or the value cannot be parsed the
+// function panics. In almost all cases you want to use MustGetParsedDuration().
+func (p *Properties) MustGetDuration(key string) time.Duration {
+	v, err := p.getInt64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return time.Duration(v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetParsedDuration(key string, def time.Duration) time.Duration {
+	s, ok := p.Get(key)
+	if !ok {
+		return def
+	}
+	v, err := time.ParseDuration(s)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetParsedDuration parses the expanded value with time.ParseDuration() if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetParsedDuration(key string) time.Duration {
+	s, ok := p.Get(key)
+	if !ok {
+		ErrorHandler(invalidKeyError(key))
+	}
+	v, err := time.ParseDuration(s)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+// ----------------------------------------------------------------------------
+
+// GetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetFloat64(key string, def float64) float64 {
+	v, err := p.getFloat64(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetFloat64 parses the expanded value as a float64 if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetFloat64(key string) float64 {
+	v, err := p.getFloat64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getFloat64(key string) (value float64, err error) {
+	if v, ok := p.Get(key); ok {
+		value, err = strconv.ParseFloat(v, 64)
+		if err != nil {
+			return 0, err
+		}
+		return value, nil
+	}
+	return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetInt(key string, def int) int {
+	v, err := p.getInt64(key)
+	if err != nil {
+		return def
+	}
+	return intRangeCheck(key, v)
+}
+
+// MustGetInt parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetInt(key string) int {
+	v, err := p.getInt64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return intRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetInt64 parses the expanded value as an int64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetInt64(key string, def int64) int64 {
+	v, err := p.getInt64(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetInt64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetInt64(key string) int64 {
+	v, err := p.getInt64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getInt64(key string) (value int64, err error) {
+	if v, ok := p.Get(key); ok {
+		value, err = strconv.ParseInt(v, 10, 64)
+		if err != nil {
+			return 0, err
+		}
+		return value, nil
+	}
+	return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint parses the expanded value as an uint if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned. If the value does not fit into an int the
+// function panics with an out of range error.
+func (p *Properties) GetUint(key string, def uint) uint {
+	v, err := p.getUint64(key)
+	if err != nil {
+		return def
+	}
+	return uintRangeCheck(key, v)
+}
+
+// MustGetUint parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+// If the value does not fit into an int the function panics with
+// an out of range error.
+func (p *Properties) MustGetUint(key string) uint {
+	v, err := p.getUint64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return uintRangeCheck(key, v)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetUint64 parses the expanded value as an uint64 if the key exists.
+// If key does not exist or the value cannot be parsed the default
+// value is returned.
+func (p *Properties) GetUint64(key string, def uint64) uint64 {
+	v, err := p.getUint64(key)
+	if err != nil {
+		return def
+	}
+	return v
+}
+
+// MustGetUint64 parses the expanded value as an int if the key exists.
+// If key does not exist or the value cannot be parsed the function panics.
+func (p *Properties) MustGetUint64(key string) uint64 {
+	v, err := p.getUint64(key)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return v
+}
+
+func (p *Properties) getUint64(key string) (value uint64, err error) {
+	if v, ok := p.Get(key); ok {
+		value, err = strconv.ParseUint(v, 10, 64)
+		if err != nil {
+			return 0, err
+		}
+		return value, nil
+	}
+	return 0, invalidKeyError(key)
+}
+
+// ----------------------------------------------------------------------------
+
+// GetString returns the expanded value for the given key if exists or
+// the default value otherwise.
+func (p *Properties) GetString(key, def string) string {
+	if v, ok := p.Get(key); ok {
+		return v
+	}
+	return def
+}
+
+// MustGetString returns the expanded value for the given key if exists or
+// panics otherwise.
+func (p *Properties) MustGetString(key string) string {
+	if v, ok := p.Get(key); ok {
+		return v
+	}
+	ErrorHandler(invalidKeyError(key))
+	panic("ErrorHandler should exit")
+}
+
+// ----------------------------------------------------------------------------
+
+// Filter returns a new properties object which contains all properties
+// for which the key matches the pattern.
+func (p *Properties) Filter(pattern string) (*Properties, error) {
+	re, err := regexp.Compile(pattern)
+	if err != nil {
+		return nil, err
+	}
+
+	return p.FilterRegexp(re), nil
+}
+
+// FilterRegexp returns a new properties object which contains all properties
+// for which the key matches the regular expression.
+func (p *Properties) FilterRegexp(re *regexp.Regexp) *Properties {
+	pp := NewProperties()
+	for _, k := range p.k {
+		if re.MatchString(k) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+			pp.Set(k, p.m[k])
+		}
+	}
+	return pp
+}
+
+// FilterPrefix returns a new properties object with a subset of all keys
+// with the given prefix.
+func (p *Properties) FilterPrefix(prefix string) *Properties {
+	pp := NewProperties()
+	for _, k := range p.k {
+		if strings.HasPrefix(k, prefix) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are just copying a subset of keys this cannot happen (fingers crossed)
+			pp.Set(k, p.m[k])
+		}
+	}
+	return pp
+}
+
+// FilterStripPrefix returns a new properties object with a subset of all keys
+// with the given prefix and the prefix removed from the keys.
+func (p *Properties) FilterStripPrefix(prefix string) *Properties {
+	pp := NewProperties()
+	n := len(prefix)
+	for _, k := range p.k {
+		if len(k) > len(prefix) && strings.HasPrefix(k, prefix) {
+			// TODO(fs): we are ignoring the error which flags a circular reference.
+			// TODO(fs): since we are modifying keys I am not entirely sure whether we can create a circular reference
+			// TODO(fs): this function should probably return an error but the signature is fixed
+			pp.Set(k[n:], p.m[k])
+		}
+	}
+	return pp
+}
+
+// Len returns the number of keys.
+func (p *Properties) Len() int {
+	return len(p.m)
+}
+
+// Keys returns all keys in the same order as in the input.
+func (p *Properties) Keys() []string {
+	keys := make([]string, len(p.k))
+	copy(keys, p.k)
+	return keys
+}
+
+// Set sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. If the value contains a
+// circular reference or a malformed expression then
+// an error is returned.
+// An empty key is silently ignored.
+func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
+	if key == "" {
+		return "", false, nil
+	}
+
+	// if expansion is disabled we allow circular references
+	if p.DisableExpansion {
+		prev, ok = p.Get(key)
+		p.m[key] = value
+		if !ok {
+			p.k = append(p.k, key)
+		}
+		return prev, ok, nil
+	}
+
+	// to check for a circular reference we temporarily need
+	// to set the new value. If there is an error then revert
+	// to the previous state. Only if all tests are successful
+	// then we add the key to the p.k list.
+	prev, ok = p.Get(key)
+	p.m[key] = value
+
+	// now check for a circular reference
+	_, err = p.expand(value)
+	if err != nil {
+
+		// revert to the previous state
+		if ok {
+			p.m[key] = prev
+		} else {
+			delete(p.m, key)
+		}
+
+		return "", false, err
+	}
+
+	if !ok {
+		p.k = append(p.k, key)
+	}
+
+	return prev, ok, nil
+}
+
+// SetValue sets property key to the default string value
+// as defined by fmt.Sprintf("%v").
+func (p *Properties) SetValue(key string, value interface{}) error {
+	_, _, err := p.Set(key, fmt.Sprintf("%v", value))
+	return err
+}
+
+// MustSet sets the property key to the corresponding value.
+// If a value for key existed before then ok is true and prev
+// contains the previous value. An empty key is silently ignored.
+func (p *Properties) MustSet(key, value string) (prev string, ok bool) {
+	prev, ok, err := p.Set(key, value)
+	if err != nil {
+		ErrorHandler(err)
+	}
+	return prev, ok
+}
+
+// String returns a string of all expanded 'key = value' pairs.
+func (p *Properties) String() string {
+	var s string
+	for _, key := range p.k {
+		value, _ := p.Get(key)
+		s = fmt.Sprintf("%s%s = %s\n", s, key, value)
+	}
+	return s
+}
+
+// Write writes all unexpanded 'key = value' pairs to the given writer.
+// Write returns the number of bytes written and any write error encountered.
+func (p *Properties) Write(w io.Writer, enc Encoding) (n int, err error) {
+	return p.WriteComment(w, "", enc)
+}
+
+// WriteComment writes all unexpanced 'key = value' pairs to the given writer.
+// If prefix is not empty then comments are written with a blank line and the
+// given prefix. The prefix should be either "# " or "! " to be compatible with
+// the properties file format. Otherwise, the properties parser will not be
+// able to read the file back in. It returns the number of bytes written and
+// any write error encountered.
+func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n int, err error) {
+	var x int
+
+	for _, key := range p.k {
+		value := p.m[key]
+
+		if prefix != "" {
+			if comments, ok := p.c[key]; ok {
+				// don't print comments if they are all empty
+				allEmpty := true
+				for _, c := range comments {
+					if c != "" {
+						allEmpty = false
+						break
+					}
+				}
+
+				if !allEmpty {
+					// add a blank line between entries but not at the top
+					if len(comments) > 0 && n > 0 {
+						x, err = fmt.Fprintln(w)
+						if err != nil {
+							return
+						}
+						n += x
+					}
+
+					for _, c := range comments {
+						x, err = fmt.Fprintf(w, "%s%s\n", prefix, encode(c, "", enc))
+						if err != nil {
+							return
+						}
+						n += x
+					}
+				}
+			}
+		}
+
+		x, err = fmt.Fprintf(w, "%s = %s\n", encode(key, " :", enc), encode(value, "", enc))
+		if err != nil {
+			return
+		}
+		n += x
+	}
+	return
+}
+
+// Map returns a copy of the properties as a map.
+func (p *Properties) Map() map[string]string {
+	m := make(map[string]string)
+	for k, v := range p.m {
+		m[k] = v
+	}
+	return m
+}
+
+// FilterFunc returns a copy of the properties which includes the values which passed all filters.
+func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties {
+	pp := NewProperties()
+outer:
+	for k, v := range p.m {
+		for _, f := range filters {
+			if !f(k, v) {
+				continue outer
+			}
+			pp.Set(k, v)
+		}
+	}
+	return pp
+}
+
+// ----------------------------------------------------------------------------
+
+// Delete removes the key and its comments.
+func (p *Properties) Delete(key string) {
+	delete(p.m, key)
+	delete(p.c, key)
+	newKeys := []string{}
+	for _, k := range p.k {
+		if k != key {
+			newKeys = append(newKeys, k)
+		}
+	}
+	p.k = newKeys
+}
+
+// Merge merges properties, comments and keys from other *Properties into p
+func (p *Properties) Merge(other *Properties) {
+	for k, v := range other.m {
+		p.m[k] = v
+	}
+	for k, v := range other.c {
+		p.c[k] = v
+	}
+
+outer:
+	for _, otherKey := range other.k {
+		for _, key := range p.k {
+			if otherKey == key {
+				continue outer
+			}
+		}
+		p.k = append(p.k, otherKey)
+	}
+}
+
+// ----------------------------------------------------------------------------
+
+// check expands all values and returns an error if a circular reference or
+// a malformed expression was found.
+func (p *Properties) check() error {
+	for _, value := range p.m {
+		if _, err := p.expand(value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *Properties) expand(input string) (string, error) {
+	// no pre/postfix -> nothing to expand
+	if p.Prefix == "" && p.Postfix == "" {
+		return input, nil
+	}
+
+	return expand(input, make(map[string]bool), p.Prefix, p.Postfix, p.m)
+}
+
+// expand recursively expands expressions of '(prefix)key(postfix)' to their corresponding values.
+// The function keeps track of the keys that were already expanded and stops if it
+// detects a circular reference or a malformed expression of the form '(prefix)key'.
+func expand(s string, keys map[string]bool, prefix, postfix string, values map[string]string) (string, error) {
+	start := strings.Index(s, prefix)
+	if start == -1 {
+		return s, nil
+	}
+
+	keyStart := start + len(prefix)
+	keyLen := strings.Index(s[keyStart:], postfix)
+	if keyLen == -1 {
+		return "", fmt.Errorf("malformed expression")
+	}
+
+	end := keyStart + keyLen + len(postfix) - 1
+	key := s[keyStart : keyStart+keyLen]
+
+	// fmt.Printf("s:%q pp:%q start:%d end:%d keyStart:%d keyLen:%d key:%q\n", s, prefix + "..." + postfix, start, end, keyStart, keyLen, key)
+
+	if _, ok := keys[key]; ok {
+		return "", fmt.Errorf("circular reference")
+	}
+
+	val, ok := values[key]
+	if !ok {
+		val = os.Getenv(key)
+	}
+
+	// remember that we've seen the key
+	keys[key] = true
+
+	return expand(s[:start]+val+s[end+1:], keys, prefix, postfix, values)
+}
+
+// encode encodes a UTF-8 string to ISO-8859-1 and escapes some characters.
+func encode(s string, special string, enc Encoding) string {
+	switch enc {
+	case UTF8:
+		return encodeUtf8(s, special)
+	case ISO_8859_1:
+		return encodeIso(s, special)
+	default:
+		panic(fmt.Sprintf("unsupported encoding %v", enc))
+	}
+}
+
+func encodeUtf8(s string, special string) string {
+	v := ""
+	for pos := 0; pos < len(s); {
+		r, w := utf8.DecodeRuneInString(s[pos:])
+		pos += w
+		v += escape(r, special)
+	}
+	return v
+}
+
+func encodeIso(s string, special string) string {
+	var r rune
+	var w int
+	var v string
+	for pos := 0; pos < len(s); {
+		switch r, w = utf8.DecodeRuneInString(s[pos:]); {
+		case r < 1<<8: // single byte rune -> escape special chars only
+			v += escape(r, special)
+		case r < 1<<16: // two byte rune -> unicode literal
+			v += fmt.Sprintf("\\u%04x", r)
+		default: // more than two bytes per rune -> can't encode
+			v += "?"
+		}
+		pos += w
+	}
+	return v
+}
+
+func escape(r rune, special string) string {
+	switch r {
+	case '\f':
+		return "\\f"
+	case '\n':
+		return "\\n"
+	case '\r':
+		return "\\r"
+	case '\t':
+		return "\\t"
+	default:
+		if strings.ContainsRune(special, r) {
+			return "\\" + string(r)
+		}
+		return string(r)
+	}
+}
+
+func invalidKeyError(key string) error {
+	return fmt.Errorf("unknown property: %s", key)
+}
diff --git a/vendor/github.com/magiconair/properties/rangecheck.go b/vendor/github.com/magiconair/properties/rangecheck.go
new file mode 100644
index 0000000000000000000000000000000000000000..2e907d540b9dc178aee1077570f01a5bc9a6b93e
--- /dev/null
+++ b/vendor/github.com/magiconair/properties/rangecheck.go
@@ -0,0 +1,31 @@
+// Copyright 2017 Frank Schroeder. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package properties
+
+import (
+	"fmt"
+	"math"
+)
+
+// make this a var to overwrite it in a test
+var is32Bit = ^uint(0) == math.MaxUint32
+
+// intRangeCheck checks if the value fits into the int type and
+// panics if it does not.
+func intRangeCheck(key string, v int64) int {
+	if is32Bit && (v < math.MinInt32 || v > math.MaxInt32) {
+		panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+	}
+	return int(v)
+}
+
+// uintRangeCheck checks if the value fits into the uint type and
+// panics if it does not.
+func uintRangeCheck(key string, v uint64) uint {
+	if is32Bit && v > math.MaxUint32 {
+		panic(fmt.Sprintf("Value %d for key %s out of range", v, key))
+	}
+	return uint(v)
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f9c841a51e0d11ec20c19ff7600e88da826867fa
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..659d6885fc7ecb005e194d56c75fd08a5808945b
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/README.md
@@ -0,0 +1,46 @@
+# mapstructure
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/mapstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+  "type": "person",
+  "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
new file mode 100644
index 0000000000000000000000000000000000000000..afcfd5eed69c0111683c08f976068f1793dac60e
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -0,0 +1,152 @@
+package mapstructure
+
+import (
+	"errors"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+	// Create variables here so we can reference them with the reflect pkg
+	var f1 DecodeHookFuncType
+	var f2 DecodeHookFuncKind
+
+	// Fill in the variables into this interface and the rest is done
+	// automatically using the reflect package.
+	potential := []interface{}{f1, f2}
+
+	v := reflect.ValueOf(h)
+	vt := v.Type()
+	for _, raw := range potential {
+		pt := reflect.ValueOf(raw).Type()
+		if vt.ConvertibleTo(pt) {
+			return v.Convert(pt).Interface()
+		}
+	}
+
+	return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+	raw DecodeHookFunc,
+	from reflect.Type, to reflect.Type,
+	data interface{}) (interface{}, error) {
+	switch f := typedDecodeHook(raw).(type) {
+	case DecodeHookFuncType:
+		return f(from, to, data)
+	case DecodeHookFuncKind:
+		return f(from.Kind(), to.Kind(), data)
+	default:
+		return nil, errors.New("invalid decode hook signature")
+	}
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		var err error
+		for _, f1 := range fs {
+			data, err = DecodeHookExec(f1, f, t, data)
+			if err != nil {
+				return nil, err
+			}
+
+			// Modify the from kind to be correct with the new data
+			f = nil
+			if val := reflect.ValueOf(data); val.IsValid() {
+				f = val.Type()
+			}
+		}
+
+		return data, nil
+	}
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+	return func(
+		f reflect.Kind,
+		t reflect.Kind,
+		data interface{}) (interface{}, error) {
+		if f != reflect.String || t != reflect.Slice {
+			return data, nil
+		}
+
+		raw := data.(string)
+		if raw == "" {
+			return []string{}, nil
+		}
+
+		return strings.Split(raw, sep), nil
+	}
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+	return func(
+		f reflect.Type,
+		t reflect.Type,
+		data interface{}) (interface{}, error) {
+		if f.Kind() != reflect.String {
+			return data, nil
+		}
+		if t != reflect.TypeOf(time.Duration(5)) {
+			return data, nil
+		}
+
+		// Convert it by parsing
+		return time.ParseDuration(data.(string))
+	}
+}
+
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
+func WeaklyTypedHook(
+	f reflect.Kind,
+	t reflect.Kind,
+	data interface{}) (interface{}, error) {
+	dataVal := reflect.ValueOf(data)
+	switch t {
+	case reflect.String:
+		switch f {
+		case reflect.Bool:
+			if dataVal.Bool() {
+				return "1", nil
+			}
+			return "0", nil
+		case reflect.Float32:
+			return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+		case reflect.Int:
+			return strconv.FormatInt(dataVal.Int(), 10), nil
+		case reflect.Slice:
+			dataType := dataVal.Type()
+			elemKind := dataType.Elem().Kind()
+			if elemKind == reflect.Uint8 {
+				return string(dataVal.Interface().([]uint8)), nil
+			}
+		case reflect.Uint:
+			return strconv.FormatUint(dataVal.Uint(), 10), nil
+		}
+	}
+
+	return data, nil
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
new file mode 100644
index 0000000000000000000000000000000000000000..47a99e5af3f1b700db374eca24b48d9d8fc21647
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/error.go
@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+	Errors []string
+}
+
+func (e *Error) Error() string {
+	points := make([]string, len(e.Errors))
+	for i, err := range e.Errors {
+		points[i] = fmt.Sprintf("* %s", err)
+	}
+
+	sort.Strings(points)
+	return fmt.Sprintf(
+		"%d error(s) decoding:\n\n%s",
+		len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+	if e == nil {
+		return nil
+	}
+
+	result := make([]error, len(e.Errors))
+	for i, e := range e.Errors {
+		result[i] = errors.New(e)
+	}
+
+	return result
+}
+
+func appendErrors(errors []string, err error) []string {
+	switch e := err.(type) {
+	case *Error:
+		return append(errors, e.Errors...)
+	default:
+		return append(errors, e.Error())
+	}
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
new file mode 100644
index 0000000000000000000000000000000000000000..30a9957c65dbf66a80abc1cec2258ae5a08d4cec
--- /dev/null
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -0,0 +1,834 @@
+// Package mapstructure exposes functionality to convert an arbitrary
+// map[string]interface{} into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+package mapstructure
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type should be DecodeHookFuncType or DecodeHookFuncKind.
+// Either is accepted. Types are a superset of Kinds (Types can return
+// Kinds) and are generally a richer thing to use, but Kinds are simpler
+// if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+	// DecodeHook, if set, will be called before any decoding and any
+	// type conversion (if WeaklyTypedInput is on). This lets you modify
+	// the values before they're set down onto the resulting struct.
+	//
+	// If an error is returned, the entire decode will fail with that
+	// error.
+	DecodeHook DecodeHookFunc
+
+	// If ErrorUnused is true, then it is an error for there to exist
+	// keys in the original map that were unused in the decoding process
+	// (extra keys).
+	ErrorUnused bool
+
+	// ZeroFields, if set to true, will zero fields before writing them.
+	// For example, a map will be emptied before decoded values are put in
+	// it. If this is false, a map will be merged.
+	ZeroFields bool
+
+	// If WeaklyTypedInput is true, the decoder will make the following
+	// "weak" conversions:
+	//
+	//   - bools to string (true = "1", false = "0")
+	//   - numbers to string (base 10)
+	//   - bools to int/uint (true = 1, false = 0)
+	//   - strings to int/uint (base implied by prefix)
+	//   - int to bool (true if value != 0)
+	//   - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+	//     FALSE, false, False. Anything else is an error)
+	//   - empty array = empty map and vice versa
+	//   - negative numbers to overflowed uint values (base 10)
+	//   - slice of maps to a merged map
+	//   - single values are converted to slices if required. Each
+	//     element is weakly decoded. For example: "4" can become []int{4}
+	//     if the target type is an int slice.
+	//
+	WeaklyTypedInput bool
+
+	// Metadata is the struct that will contain extra metadata about
+	// the decoding. If this is nil, then no metadata will be tracked.
+	Metadata *Metadata
+
+	// Result is a pointer to the struct that will contain the decoded
+	// value.
+	Result interface{}
+
+	// The tag name that mapstructure reads for field names. This
+	// defaults to "mapstructure"
+	TagName string
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+	config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+	// Keys are the keys of the structure which were successfully decoded
+	Keys []string
+
+	// Unused is a slice of keys that were found in the raw value but
+	// weren't decoded since there was no matching field in the result interface
+	Unused []string
+}
+
+// Decode takes a map and uses reflection to convert it into the
+// given Go native structure. val must be a pointer to a struct.
+func Decode(m interface{}, rawVal interface{}) error {
+	config := &DecoderConfig{
+		Metadata: nil,
+		Result:   rawVal,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(m)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+	config := &DecoderConfig{
+		Metadata:         nil,
+		Result:           output,
+		WeaklyTypedInput: true,
+	}
+
+	decoder, err := NewDecoder(config)
+	if err != nil {
+		return err
+	}
+
+	return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+	val := reflect.ValueOf(config.Result)
+	if val.Kind() != reflect.Ptr {
+		return nil, errors.New("result must be a pointer")
+	}
+
+	val = val.Elem()
+	if !val.CanAddr() {
+		return nil, errors.New("result must be addressable (a pointer)")
+	}
+
+	if config.Metadata != nil {
+		if config.Metadata.Keys == nil {
+			config.Metadata.Keys = make([]string, 0)
+		}
+
+		if config.Metadata.Unused == nil {
+			config.Metadata.Unused = make([]string, 0)
+		}
+	}
+
+	if config.TagName == "" {
+		config.TagName = "mapstructure"
+	}
+
+	result := &Decoder{
+		config: config,
+	}
+
+	return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(raw interface{}) error {
+	return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
+	if data == nil {
+		// If the data is nil, then we don't set anything.
+		return nil
+	}
+
+	dataVal := reflect.ValueOf(data)
+	if !dataVal.IsValid() {
+		// If the data value is invalid, then we just set the value
+		// to be the zero value.
+		val.Set(reflect.Zero(val.Type()))
+		return nil
+	}
+
+	if d.config.DecodeHook != nil {
+		// We have a DecodeHook, so let's pre-process the data.
+		var err error
+		data, err = DecodeHookExec(
+			d.config.DecodeHook,
+			dataVal.Type(), val.Type(), data)
+		if err != nil {
+			return fmt.Errorf("error decoding '%s': %s", name, err)
+		}
+	}
+
+	var err error
+	dataKind := getKind(val)
+	switch dataKind {
+	case reflect.Bool:
+		err = d.decodeBool(name, data, val)
+	case reflect.Interface:
+		err = d.decodeBasic(name, data, val)
+	case reflect.String:
+		err = d.decodeString(name, data, val)
+	case reflect.Int:
+		err = d.decodeInt(name, data, val)
+	case reflect.Uint:
+		err = d.decodeUint(name, data, val)
+	case reflect.Float32:
+		err = d.decodeFloat(name, data, val)
+	case reflect.Struct:
+		err = d.decodeStruct(name, data, val)
+	case reflect.Map:
+		err = d.decodeMap(name, data, val)
+	case reflect.Ptr:
+		err = d.decodePtr(name, data, val)
+	case reflect.Slice:
+		err = d.decodeSlice(name, data, val)
+	case reflect.Func:
+		err = d.decodeFunc(name, data, val)
+	default:
+		// If we reached this point then we weren't able to decode it
+		return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
+	}
+
+	// If we reached here, then we successfully decoded SOMETHING, so
+	// mark the key as used if we're tracking metadata.
+	if d.config.Metadata != nil && name != "" {
+		d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+	}
+
+	return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	if !dataVal.IsValid() {
+		dataVal = reflect.Zero(val.Type())
+	}
+
+	dataValType := dataVal.Type()
+	if !dataValType.AssignableTo(val.Type()) {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got '%s'",
+			name, val.Type(), dataValType)
+	}
+
+	val.Set(dataVal)
+	return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+
+	converted := true
+	switch {
+	case dataKind == reflect.String:
+		val.SetString(dataVal.String())
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetString("1")
+		} else {
+			val.SetString("0")
+		}
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+	case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
+		dataType := dataVal.Type()
+		elemKind := dataType.Elem().Kind()
+		switch {
+		case elemKind == reflect.Uint8:
+			val.SetString(string(dataVal.Interface().([]uint8)))
+		default:
+			converted = false
+		}
+	default:
+		converted = false
+	}
+
+	if !converted {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetInt(dataVal.Int())
+	case dataKind == reflect.Uint:
+		val.SetInt(int64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetInt(int64(dataVal.Float()))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetInt(1)
+		} else {
+			val.SetInt(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetInt(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Int64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetInt(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Int:
+		i := dataVal.Int()
+		if i < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %d overflows uint",
+				name, i)
+		}
+		val.SetUint(uint64(i))
+	case dataKind == reflect.Uint:
+		val.SetUint(dataVal.Uint())
+	case dataKind == reflect.Float32:
+		f := dataVal.Float()
+		if f < 0 && !d.config.WeaklyTypedInput {
+			return fmt.Errorf("cannot parse '%s', %f overflows uint",
+				name, f)
+		}
+		val.SetUint(uint64(f))
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetUint(1)
+		} else {
+			val.SetUint(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
+		if err == nil {
+			val.SetUint(i)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+
+	switch {
+	case dataKind == reflect.Bool:
+		val.SetBool(dataVal.Bool())
+	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Int() != 0)
+	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Uint() != 0)
+	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+		val.SetBool(dataVal.Float() != 0)
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		b, err := strconv.ParseBool(dataVal.String())
+		if err == nil {
+			val.SetBool(b)
+		} else if dataVal.String() == "" {
+			val.SetBool(false)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+		}
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.ValueOf(data)
+	dataKind := getKind(dataVal)
+	dataType := dataVal.Type()
+
+	switch {
+	case dataKind == reflect.Int:
+		val.SetFloat(float64(dataVal.Int()))
+	case dataKind == reflect.Uint:
+		val.SetFloat(float64(dataVal.Uint()))
+	case dataKind == reflect.Float32:
+		val.SetFloat(dataVal.Float())
+	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+		if dataVal.Bool() {
+			val.SetFloat(1)
+		} else {
+			val.SetFloat(0)
+		}
+	case dataKind == reflect.String && d.config.WeaklyTypedInput:
+		f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
+		if err == nil {
+			val.SetFloat(f)
+		} else {
+			return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+		}
+	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+		jn := data.(json.Number)
+		i, err := jn.Float64()
+		if err != nil {
+			return fmt.Errorf(
+				"error decoding json.Number into %s: %s", name, err)
+		}
+		val.SetFloat(i)
+	default:
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+	valType := val.Type()
+	valKeyType := valType.Key()
+	valElemType := valType.Elem()
+
+	// By default we overwrite keys in the current map
+	valMap := val
+
+	// If the map is nil or we're purposely zeroing fields, make a new map
+	if valMap.IsNil() || d.config.ZeroFields {
+		// Make a new map to hold our result
+		mapType := reflect.MapOf(valKeyType, valElemType)
+		valMap = reflect.MakeMap(mapType)
+	}
+
+	// Check input type
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	if dataVal.Kind() != reflect.Map {
+		// In weak mode, we accept a slice of maps as an input...
+		if d.config.WeaklyTypedInput {
+			switch dataVal.Kind() {
+			case reflect.Array, reflect.Slice:
+				// Special case for BC reasons (covered by tests)
+				if dataVal.Len() == 0 {
+					val.Set(valMap)
+					return nil
+				}
+
+				for i := 0; i < dataVal.Len(); i++ {
+					err := d.decode(
+						fmt.Sprintf("%s[%d]", name, i),
+						dataVal.Index(i).Interface(), val)
+					if err != nil {
+						return err
+					}
+				}
+
+				return nil
+			}
+		}
+
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+	}
+
+	// Accumulate errors
+	errors := make([]string, 0)
+
+	for _, k := range dataVal.MapKeys() {
+		fieldName := fmt.Sprintf("%s[%s]", name, k)
+
+		// First decode the key into the proper type
+		currentKey := reflect.Indirect(reflect.New(valKeyType))
+		if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		// Next decode the data into the proper type
+		v := dataVal.MapIndex(k).Interface()
+		currentVal := reflect.Indirect(reflect.New(valElemType))
+		if err := d.decode(fieldName, v, currentVal); err != nil {
+			errors = appendErrors(errors, err)
+			continue
+		}
+
+		valMap.SetMapIndex(currentKey, currentVal)
+	}
+
+	// Set the built up map to the value
+	val.Set(valMap)
+
+	// If we had errors, return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	valType := val.Type()
+	valElemType := valType.Elem()
+
+	realVal := val
+	if realVal.IsNil() || d.config.ZeroFields {
+		realVal = reflect.New(valElemType)
+	}
+
+	if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+		return err
+	}
+
+	val.Set(realVal)
+	return nil
+}
+
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+	// Create an element of the concrete (non pointer) type and decode
+	// into that. Then set the value of the pointer to this type.
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	if val.Type() != dataVal.Type() {
+		return fmt.Errorf(
+			"'%s' expected type '%s', got unconvertible type '%s'",
+			name, val.Type(), dataVal.Type())
+	}
+	val.Set(dataVal)
+	return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+	dataValKind := dataVal.Kind()
+	valType := val.Type()
+	valElemType := valType.Elem()
+	sliceType := reflect.SliceOf(valElemType)
+
+	valSlice := val
+	if valSlice.IsNil() || d.config.ZeroFields {
+		// Check input type
+		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+			if d.config.WeaklyTypedInput {
+				switch {
+				// Empty maps turn into empty slices
+				case dataValKind == reflect.Map:
+					if dataVal.Len() == 0 {
+						val.Set(reflect.MakeSlice(sliceType, 0, 0))
+						return nil
+					}
+
+				// All other types we try to convert to the slice type
+				// and "lift" it into it. i.e. a string becomes a string slice.
+				default:
+					// Just re-try this function with data as a slice.
+					return d.decodeSlice(name, []interface{}{data}, val)
+				}
+			}
+
+			return fmt.Errorf(
+				"'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+		}
+
+		// Make a new slice to hold our result, same size as the original data.
+		valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+	}
+
+	// Accumulate any errors
+	errors := make([]string, 0)
+
+	for i := 0; i < dataVal.Len(); i++ {
+		currentData := dataVal.Index(i).Interface()
+		for valSlice.Len() <= i {
+			valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+		}
+		currentField := valSlice.Index(i)
+
+		fieldName := fmt.Sprintf("%s[%d]", name, i)
+		if err := d.decode(fieldName, currentData, currentField); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	// Finally, set the value to the slice we built up
+	val.Set(valSlice)
+
+	// If there were errors, we return those
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+	dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+	// If the type of the value to write to and the data match directly,
+	// then we just set it directly instead of recursing into the structure.
+	if dataVal.Type() == val.Type() {
+		val.Set(dataVal)
+		return nil
+	}
+
+	dataValKind := dataVal.Kind()
+	if dataValKind != reflect.Map {
+		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
+	}
+
+	dataValType := dataVal.Type()
+	if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+		return fmt.Errorf(
+			"'%s' needs a map with string keys, has '%s' keys",
+			name, dataValType.Key().Kind())
+	}
+
+	dataValKeys := make(map[reflect.Value]struct{})
+	dataValKeysUnused := make(map[interface{}]struct{})
+	for _, dataValKey := range dataVal.MapKeys() {
+		dataValKeys[dataValKey] = struct{}{}
+		dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+	}
+
+	errors := make([]string, 0)
+
+	// This slice will keep track of all the structs we'll be decoding.
+	// There can be more than one struct if there are embedded structs
+	// that are squashed.
+	structs := make([]reflect.Value, 1, 5)
+	structs[0] = val
+
+	// Compile the list of all the fields that we're going to be decoding
+	// from all the structs.
+	type field struct {
+		field reflect.StructField
+		val   reflect.Value
+	}
+	fields := []field{}
+	for len(structs) > 0 {
+		structVal := structs[0]
+		structs = structs[1:]
+
+		structType := structVal.Type()
+
+		for i := 0; i < structType.NumField(); i++ {
+			fieldType := structType.Field(i)
+			fieldKind := fieldType.Type.Kind()
+
+			// If "squash" is specified in the tag, we squash the field down.
+			squash := false
+			tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+			for _, tag := range tagParts[1:] {
+				if tag == "squash" {
+					squash = true
+					break
+				}
+			}
+
+			if squash {
+				if fieldKind != reflect.Struct {
+					errors = appendErrors(errors,
+						fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
+				} else {
+					structs = append(structs, val.FieldByName(fieldType.Name))
+				}
+				continue
+			}
+
+			// Normal struct field, store it away
+			fields = append(fields, field{fieldType, structVal.Field(i)})
+		}
+	}
+
+	// for fieldType, field := range fields {
+	for _, f := range fields {
+		field, fieldValue := f.field, f.val
+		fieldName := field.Name
+
+		tagValue := field.Tag.Get(d.config.TagName)
+		tagValue = strings.SplitN(tagValue, ",", 2)[0]
+		if tagValue != "" {
+			fieldName = tagValue
+		}
+
+		rawMapKey := reflect.ValueOf(fieldName)
+		rawMapVal := dataVal.MapIndex(rawMapKey)
+		if !rawMapVal.IsValid() {
+			// Do a slower search by iterating over each key and
+			// doing case-insensitive search.
+			for dataValKey := range dataValKeys {
+				mK, ok := dataValKey.Interface().(string)
+				if !ok {
+					// Not a string key
+					continue
+				}
+
+				if strings.EqualFold(mK, fieldName) {
+					rawMapKey = dataValKey
+					rawMapVal = dataVal.MapIndex(dataValKey)
+					break
+				}
+			}
+
+			if !rawMapVal.IsValid() {
+				// There was no matching key in the map for the value in
+				// the struct. Just ignore.
+				continue
+			}
+		}
+
+		// Delete the key we're using from the unused map so we stop tracking
+		delete(dataValKeysUnused, rawMapKey.Interface())
+
+		if !fieldValue.IsValid() {
+			// This should never happen
+			panic("field is not valid")
+		}
+
+		// If we can't set the field, then it is unexported or something,
+		// and we just continue onwards.
+		if !fieldValue.CanSet() {
+			continue
+		}
+
+		// If the name is empty string, then we're at the root, and we
+		// don't dot-join the fields.
+		if name != "" {
+			fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+		}
+
+		if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
+			errors = appendErrors(errors, err)
+		}
+	}
+
+	if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+		keys := make([]string, 0, len(dataValKeysUnused))
+		for rawKey := range dataValKeysUnused {
+			keys = append(keys, rawKey.(string))
+		}
+		sort.Strings(keys)
+
+		err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+		errors = appendErrors(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return &Error{errors}
+	}
+
+	// Add the unused keys to the list of unused keys if we're tracking metadata
+	if d.config.Metadata != nil {
+		for rawKey := range dataValKeysUnused {
+			key := rawKey.(string)
+			if name != "" {
+				key = fmt.Sprintf("%s.%s", name, key)
+			}
+
+			d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+		}
+	}
+
+	return nil
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+	kind := val.Kind()
+
+	switch {
+	case kind >= reflect.Int && kind <= reflect.Int64:
+		return reflect.Int
+	case kind >= reflect.Uint && kind <= reflect.Uint64:
+		return reflect.Uint
+	case kind >= reflect.Float32 && kind <= reflect.Float64:
+		return reflect.Float32
+	default:
+		return kind
+	}
+}
diff --git a/vendor/github.com/pelletier/go-toml/LICENSE b/vendor/github.com/pelletier/go-toml/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..583bdae628238f630723b41fc6e39d8d959de57f
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 - 2017 Thomas Pelletier, Eric Anderton
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0d357acf35dbbecb8a8fd35bf5b4ac6daa10c08d
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/README.md
@@ -0,0 +1,131 @@
+# go-toml
+
+Go library for the [TOML](https://github.com/mojombo/toml) format.
+
+This library supports TOML version
+[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
+
+[![GoDoc](https://godoc.org/github.com/pelletier/go-toml?status.svg)](http://godoc.org/github.com/pelletier/go-toml)
+[![license](https://img.shields.io/github/license/pelletier/go-toml.svg)](https://github.com/pelletier/go-toml/blob/master/LICENSE)
+[![Build Status](https://travis-ci.org/pelletier/go-toml.svg?branch=master)](https://travis-ci.org/pelletier/go-toml)
+[![Coverage Status](https://coveralls.io/repos/github/pelletier/go-toml/badge.svg?branch=master)](https://coveralls.io/github/pelletier/go-toml?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pelletier/go-toml)](https://goreportcard.com/report/github.com/pelletier/go-toml)
+
+## Features
+
+Go-toml provides the following features for using data parsed from TOML documents:
+
+* Load TOML documents from files and string data
+* Easily navigate TOML structure using Tree
+* Mashaling and unmarshaling to and from data structures
+* Line & column position data for all parsed elements
+* [Query support similar to JSON-Path](query/)
+* Syntax errors contain line and column numbers
+
+## Import
+
+```go
+import "github.com/pelletier/go-toml"
+```
+
+## Usage example
+
+Read a TOML document:
+
+```go
+config, _ := toml.Load(`
+[postgres]
+user = "pelletier"
+password = "mypassword"`)
+// retrieve data directly
+user := config.Get("postgres.user").(string)
+
+// or using an intermediate object
+postgresConfig := config.Get("postgres").(*toml.Tree)
+password := postgresConfig.Get("password").(string)
+```
+
+Or use Unmarshal:
+
+```go
+type Postgres struct {
+    User     string
+    Password string
+}
+type Config struct {
+    Postgres Postgres
+}
+
+doc := []byte(`
+[Postgres]
+User = "pelletier"
+Password = "mypassword"`)
+
+config := Config{}
+toml.Unmarshal(doc, &config)
+fmt.Println("user=", config.Postgres.User)
+```
+
+Or use a query:
+
+```go
+// use a query to gather elements without walking the tree
+q, _ := query.Compile("$..[user,password]")
+results := q.Execute(config)
+for ii, item := range results.Values() {
+    fmt.Println("Query result %d: %v", ii, item)
+}
+```
+
+## Documentation
+
+The documentation and additional examples are available at
+[godoc.org](http://godoc.org/github.com/pelletier/go-toml).
+
+## Tools
+
+Go-toml provides two handy command line tools:
+
+* `tomll`: Reads TOML files and lint them.
+
+    ```
+    go install github.com/pelletier/go-toml/cmd/tomll
+    tomll --help
+    ```
+* `tomljson`: Reads a TOML file and outputs its JSON representation.
+
+    ```
+    go install github.com/pelletier/go-toml/cmd/tomljson
+    tomljson --help
+    ```
+
+## Contribute
+
+Feel free to report bugs and patches using GitHub's pull requests system on
+[pelletier/go-toml](https://github.com/pelletier/go-toml). Any feedback would be
+much appreciated!
+
+### Run tests
+
+You have to make sure two kind of tests run:
+
+1. The Go unit tests
+2. The TOML examples base
+
+You can run both of them using `./test.sh`.
+
+### Fuzzing
+
+The script `./fuzz.sh` is available to
+run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml.
+
+## Versioning
+
+Go-toml follows [Semantic Versioning](http://semver.org/). The supported version
+of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of
+this document. The last two major versions of Go are supported
+(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)).
+
+## License
+
+The MIT License (MIT). Read [LICENSE](LICENSE).
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.json b/vendor/github.com/pelletier/go-toml/benchmark.json
new file mode 100644
index 0000000000000000000000000000000000000000..86f99c6a87766c1ef05e0ee4869e25750ec1d436
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.json
@@ -0,0 +1,164 @@
+{
+    "array": {
+        "key1": [
+            1,
+            2,
+            3
+        ],
+        "key2": [
+            "red",
+            "yellow",
+            "green"
+        ],
+        "key3": [
+            [
+                1,
+                2
+            ],
+            [
+                3,
+                4,
+                5
+            ]
+        ],
+        "key4": [
+            [
+                1,
+                2
+            ],
+            [
+                "a",
+                "b",
+                "c"
+            ]
+        ],
+        "key5": [
+            1,
+            2,
+            3
+        ],
+        "key6": [
+            1,
+            2
+        ]
+    },
+    "boolean": {
+        "False": false,
+        "True": true
+    },
+    "datetime": {
+        "key1": "1979-05-27T07:32:00Z",
+        "key2": "1979-05-27T00:32:00-07:00",
+        "key3": "1979-05-27T00:32:00.999999-07:00"
+    },
+    "float": {
+        "both": {
+            "key": 6.626e-34
+        },
+        "exponent": {
+            "key1": 5e+22,
+            "key2": 1000000,
+            "key3": -0.02
+        },
+        "fractional": {
+            "key1": 1,
+            "key2": 3.1415,
+            "key3": -0.01
+        },
+        "underscores": {
+            "key1": 9224617.445991227,
+            "key2": 1e+100
+        }
+    },
+    "fruit": [{
+            "name": "apple",
+            "physical": {
+                "color": "red",
+                "shape": "round"
+            },
+            "variety": [{
+                    "name": "red delicious"
+                },
+                {
+                    "name": "granny smith"
+                }
+            ]
+        },
+        {
+            "name": "banana",
+            "variety": [{
+                "name": "plantain"
+            }]
+        }
+    ],
+    "integer": {
+        "key1": 99,
+        "key2": 42,
+        "key3": 0,
+        "key4": -17,
+        "underscores": {
+            "key1": 1000,
+            "key2": 5349221,
+            "key3": 12345
+        }
+    },
+    "products": [{
+            "name": "Hammer",
+            "sku": 738594937
+        },
+        {},
+        {
+            "color": "gray",
+            "name": "Nail",
+            "sku": 284758393
+        }
+    ],
+    "string": {
+        "basic": {
+            "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+        },
+        "literal": {
+            "multiline": {
+                "lines": "The first newline is\ntrimmed in raw strings.\n   All other whitespace\n   is preserved.\n",
+                "regex2": "I [dw]on't need \\d{2} apples"
+            },
+            "quoted": "Tom \"Dubs\" Preston-Werner",
+            "regex": "\u003c\\i\\c*\\s*\u003e",
+            "winpath": "C:\\Users\\nodejs\\templates",
+            "winpath2": "\\\\ServerX\\admin$\\system32\\"
+        },
+        "multiline": {
+            "continued": {
+                "key1": "The quick brown fox jumps over the lazy dog.",
+                "key2": "The quick brown fox jumps over the lazy dog.",
+                "key3": "The quick brown fox jumps over the lazy dog."
+            },
+            "key1": "One\nTwo",
+            "key2": "One\nTwo",
+            "key3": "One\nTwo"
+        }
+    },
+    "table": {
+        "inline": {
+            "name": {
+                "first": "Tom",
+                "last": "Preston-Werner"
+            },
+            "point": {
+                "x": 1,
+                "y": 2
+            }
+        },
+        "key": "value",
+        "subtable": {
+            "key": "another value"
+        }
+    },
+    "x": {
+        "y": {
+            "z": {
+                "w": {}
+            }
+        }
+    }
+}
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh
new file mode 100755
index 0000000000000000000000000000000000000000..8b8bb528e75d0547a2aa7c75c535974a59933c3a
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -e
+
+reference_ref=${1:-master}
+reference_git=${2:-.}
+
+if ! `hash benchstat 2>/dev/null`; then
+    echo "Installing benchstat"
+    go get golang.org/x/perf/cmd/benchstat
+    go install golang.org/x/perf/cmd/benchstat
+fi
+
+tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
+ref_tempdir="${tempdir}/ref"
+ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
+local_benchmark="`pwd`/benchmark-local.txt"
+
+echo "=== ${reference_ref} (${ref_tempdir})"
+git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
+pushd ${ref_tempdir} >/dev/null
+git checkout ${reference_ref} >/dev/null 2>/dev/null
+go test -bench=. -benchmem | tee ${ref_benchmark}
+popd >/dev/null
+
+echo ""
+echo "=== local"
+go test -bench=. -benchmem  | tee ${local_benchmark}
+
+echo ""
+echo "=== diff"
+benchstat -delta-test=none ${ref_benchmark} ${local_benchmark}
\ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.toml b/vendor/github.com/pelletier/go-toml/benchmark.toml
new file mode 100644
index 0000000000000000000000000000000000000000..dfd77e09622b3015d7d718cf80674883d9d9ade0
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.toml
@@ -0,0 +1,244 @@
+################################################################################
+## Comment
+
+# Speak your mind with the hash symbol. They go from the symbol to the end of
+# the line.
+
+
+################################################################################
+## Table
+
+# Tables (also known as hash tables or dictionaries) are collections of
+# key/value pairs. They appear in square brackets on a line by themselves.
+
+[table]
+
+key = "value" # Yeah, you can do this.
+
+# Nested tables are denoted by table names with dots in them. Name your tables
+# whatever crap you please, just don't use #, ., [ or ].
+
+[table.subtable]
+
+key = "another value"
+
+# You don't need to specify all the super-tables if you don't want to. TOML
+# knows how to do it for you.
+
+# [x] you
+# [x.y] don't
+# [x.y.z] need these
+[x.y.z.w] # for this to work
+
+
+################################################################################
+## Inline Table
+
+# Inline tables provide a more compact syntax for expressing tables. They are
+# especially useful for grouped data that can otherwise quickly become verbose.
+# Inline tables are enclosed in curly braces `{` and `}`. No newlines are
+# allowed between the curly braces unless they are valid within a value.
+
+[table.inline]
+
+name = { first = "Tom", last = "Preston-Werner" }
+point = { x = 1, y = 2 }
+
+
+################################################################################
+## String
+
+# There are four ways to express strings: basic, multi-line basic, literal, and
+# multi-line literal. All strings must contain only valid UTF-8 characters.
+
+[string.basic]
+
+basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
+
+[string.multiline]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "One\nTwo"
+key2 = """One\nTwo"""
+key3 = """
+One
+Two"""
+
+[string.multiline.continued]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "The quick brown fox jumps over the lazy dog."
+
+key2 = """
+The quick brown \
+
+
+  fox jumps over \
+    the lazy dog."""
+
+key3 = """\
+       The quick brown \
+       fox jumps over \
+       the lazy dog.\
+       """
+
+[string.literal]
+
+# What you see is what you get.
+winpath  = 'C:\Users\nodejs\templates'
+winpath2 = '\\ServerX\admin$\system32\'
+quoted   = 'Tom "Dubs" Preston-Werner'
+regex    = '<\i\c*\s*>'
+
+
+[string.literal.multiline]
+
+regex2 = '''I [dw]on't need \d{2} apples'''
+lines  = '''
+The first newline is
+trimmed in raw strings.
+   All other whitespace
+   is preserved.
+'''
+
+
+################################################################################
+## Integer
+
+# Integers are whole numbers. Positive numbers may be prefixed with a plus sign.
+# Negative numbers are prefixed with a minus sign.
+
+[integer]
+
+key1 = +99
+key2 = 42
+key3 = 0
+key4 = -17
+
+[integer.underscores]
+
+# For large numbers, you may use underscores to enhance readability. Each
+# underscore must be surrounded by at least one digit.
+key1 = 1_000
+key2 = 5_349_221
+key3 = 1_2_3_4_5     # valid but inadvisable
+
+
+################################################################################
+## Float
+
+# A float consists of an integer part (which may be prefixed with a plus or
+# minus sign) followed by a fractional part and/or an exponent part.
+
+[float.fractional]
+
+key1 = +1.0
+key2 = 3.1415
+key3 = -0.01
+
+[float.exponent]
+
+key1 = 5e+22
+key2 = 1e6
+key3 = -2E-2
+
+[float.both]
+
+key = 6.626e-34
+
+[float.underscores]
+
+key1 = 9_224_617.445_991_228_313
+key2 = 1e1_00
+
+
+################################################################################
+## Boolean
+
+# Booleans are just the tokens you're used to. Always lowercase.
+
+[boolean]
+
+True = true
+False = false
+
+
+################################################################################
+## Datetime
+
+# Datetimes are RFC 3339 dates.
+
+[datetime]
+
+key1 = 1979-05-27T07:32:00Z
+key2 = 1979-05-27T00:32:00-07:00
+key3 = 1979-05-27T00:32:00.999999-07:00
+
+
+################################################################################
+## Array
+
+# Arrays are square brackets with other primitives inside. Whitespace is
+# ignored. Elements are separated by commas. Data types may not be mixed.
+
+[array]
+
+key1 = [ 1, 2, 3 ]
+key2 = [ "red", "yellow", "green" ]
+key3 = [ [ 1, 2 ], [3, 4, 5] ]
+#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok
+
+# Arrays can also be multiline. So in addition to ignoring whitespace, arrays
+# also ignore newlines between the brackets.  Terminating commas are ok before
+# the closing bracket.
+
+key5 = [
+  1, 2, 3
+]
+key6 = [
+  1,
+  2, # this is ok
+]
+
+
+################################################################################
+## Array of Tables
+
+# These can be expressed by using a table name in double brackets. Each table
+# with the same double bracketed name will be an element in the array. The
+# tables are inserted in the order encountered.
+
+[[products]]
+
+name = "Hammer"
+sku = 738594937
+
+[[products]]
+
+[[products]]
+
+name = "Nail"
+sku = 284758393
+color = "gray"
+
+
+# You can create nested arrays of tables as well.
+
+[[fruit]]
+  name = "apple"
+
+  [fruit.physical]
+    color = "red"
+    shape = "round"
+
+  [[fruit.variety]]
+    name = "red delicious"
+
+  [[fruit.variety]]
+    name = "granny smith"
+
+[[fruit]]
+  name = "banana"
+
+  [[fruit.variety]]
+    name = "plantain"
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.yml b/vendor/github.com/pelletier/go-toml/benchmark.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0bd19f08a6980f0c99f7ea7a544fe9a7c7993a49
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.yml
@@ -0,0 +1,121 @@
+---
+array:
+  key1:
+  - 1
+  - 2
+  - 3
+  key2:
+  - red
+  - yellow
+  - green
+  key3:
+  - - 1
+    - 2
+  - - 3
+    - 4
+    - 5
+  key4:
+  - - 1
+    - 2
+  - - a
+    - b
+    - c
+  key5:
+  - 1
+  - 2
+  - 3
+  key6:
+  - 1
+  - 2
+boolean:
+  'False': false
+  'True': true
+datetime:
+  key1: '1979-05-27T07:32:00Z'
+  key2: '1979-05-27T00:32:00-07:00'
+  key3: '1979-05-27T00:32:00.999999-07:00'
+float:
+  both:
+    key: 6.626e-34
+  exponent:
+    key1: 5.0e+22
+    key2: 1000000
+    key3: -0.02
+  fractional:
+    key1: 1
+    key2: 3.1415
+    key3: -0.01
+  underscores:
+    key1: 9224617.445991227
+    key2: 1.0e+100
+fruit:
+- name: apple
+  physical:
+    color: red
+    shape: round
+  variety:
+  - name: red delicious
+  - name: granny smith
+- name: banana
+  variety:
+  - name: plantain
+integer:
+  key1: 99
+  key2: 42
+  key3: 0
+  key4: -17
+  underscores:
+    key1: 1000
+    key2: 5349221
+    key3: 12345
+products:
+- name: Hammer
+  sku: 738594937
+- {}
+- color: gray
+  name: Nail
+  sku: 284758393
+string:
+  basic:
+    basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+  literal:
+    multiline:
+      lines: |
+        The first newline is
+        trimmed in raw strings.
+           All other whitespace
+           is preserved.
+      regex2: I [dw]on't need \d{2} apples
+    quoted: Tom "Dubs" Preston-Werner
+    regex: "<\\i\\c*\\s*>"
+    winpath: C:\Users\nodejs\templates
+    winpath2: "\\\\ServerX\\admin$\\system32\\"
+  multiline:
+    continued:
+      key1: The quick brown fox jumps over the lazy dog.
+      key2: The quick brown fox jumps over the lazy dog.
+      key3: The quick brown fox jumps over the lazy dog.
+    key1: |-
+      One
+      Two
+    key2: |-
+      One
+      Two
+    key3: |-
+      One
+      Two
+table:
+  inline:
+    name:
+      first: Tom
+      last: Preston-Werner
+    point:
+      x: 1
+      y: 2
+  key: value
+  subtable:
+    key: another value
+x:
+  y:
+    z:
+      w: {}
diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go
new file mode 100644
index 0000000000000000000000000000000000000000..d5fd98c0211a25f2164c3b63a103024b9ba2b915
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/doc.go
@@ -0,0 +1,23 @@
+// Package toml is a TOML parser and manipulation library.
+//
+// This version supports the specification as described in
+// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md
+//
+// Marshaling
+//
+// Go-toml can marshal and unmarshal TOML documents from and to data
+// structures.
+//
+// TOML document as a tree
+//
+// Go-toml can operate on a TOML document as a tree. Use one of the Load*
+// functions to parse TOML data and obtain a Tree instance, then one of its
+// methods to manipulate the tree.
+//
+// JSONPath-like queries
+//
+// The package github.com/pelletier/go-toml/query implements a system
+// similar to JSONPath to quickly retrieve elements of a TOML document using a
+// single expression. See the package documentation for more information.
+//
+package toml
diff --git a/vendor/github.com/pelletier/go-toml/example-crlf.toml b/vendor/github.com/pelletier/go-toml/example-crlf.toml
new file mode 100644
index 0000000000000000000000000000000000000000..12950a163d33e15fe5901393b9f10df1c1ee74a1
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/example-crlf.toml
@@ -0,0 +1,29 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+  # You can indent as you please. Tabs or spaces. TOML don't care.
+  [servers.alpha]
+  ip = "10.0.0.1"
+  dc = "eqdc10"
+
+  [servers.beta]
+  ip = "10.0.0.2"
+  dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
diff --git a/vendor/github.com/pelletier/go-toml/example.toml b/vendor/github.com/pelletier/go-toml/example.toml
new file mode 100644
index 0000000000000000000000000000000000000000..3d902f28207e12f34bb16065c23a8253098eca45
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/example.toml
@@ -0,0 +1,29 @@
+# This is a TOML document. Boom.
+
+title = "TOML Example"
+
+[owner]
+name = "Tom Preston-Werner"
+organization = "GitHub"
+bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
+dob = 1979-05-27T07:32:00Z # First class dates? Why not?
+
+[database]
+server = "192.168.1.1"
+ports = [ 8001, 8001, 8002 ]
+connection_max = 5000
+enabled = true
+
+[servers]
+
+  # You can indent as you please. Tabs or spaces. TOML don't care.
+  [servers.alpha]
+  ip = "10.0.0.1"
+  dc = "eqdc10"
+
+  [servers.beta]
+  ip = "10.0.0.2"
+  dc = "eqdc10"
+
+[clients]
+data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go
new file mode 100644
index 0000000000000000000000000000000000000000..14570c8d35773290e558baf8098cbb10e78f5182
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzz.go
@@ -0,0 +1,31 @@
+// +build gofuzz
+
+package toml
+
+func Fuzz(data []byte) int {
+	tree, err := LoadBytes(data)
+	if err != nil {
+		if tree != nil {
+			panic("tree must be nil if there is an error")
+		}
+		return 0
+	}
+
+	str, err := tree.ToTomlString()
+	if err != nil {
+		if str != "" {
+			panic(`str must be "" if there is an error`)
+		}
+		panic(err)
+	}
+
+	tree, err = Load(str)
+	if err != nil {
+		if tree != nil {
+			panic("tree must be nil if there is an error")
+		}
+		return 0
+	}
+
+	return 1
+}
diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3204b4c4463a93a191e8553df2722e2fec92628b
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/fuzz.sh
@@ -0,0 +1,15 @@
+#! /bin/sh
+set -eu
+
+go get github.com/dvyukov/go-fuzz/go-fuzz
+go get github.com/dvyukov/go-fuzz/go-fuzz-build
+
+if [ ! -e toml-fuzz.zip ]; then
+    go-fuzz-build github.com/pelletier/go-toml
+fi
+
+rm -fr fuzz
+mkdir -p fuzz/corpus
+cp *.toml fuzz/corpus
+
+go-fuzz -bin=toml-fuzz.zip -workdir=fuzz
diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go
new file mode 100644
index 0000000000000000000000000000000000000000..9707c688405bb6c6bde2b8c7532a1d16fb00adfb
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/keysparsing.go
@@ -0,0 +1,161 @@
+// Parsing keys handling both bare and quoted keys.
+
+package toml
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strconv"
+	"unicode"
+)
+
+var escapeSequenceMap = map[rune]rune{
+	'b':  '\b',
+	't':  '\t',
+	'n':  '\n',
+	'f':  '\f',
+	'r':  '\r',
+	'"':  '"',
+	'\\': '\\',
+}
+
+type parseKeyState int
+
+const (
+	BARE parseKeyState = iota
+	BASIC
+	LITERAL
+	ESC
+	UNICODE_4
+	UNICODE_8
+)
+
+func parseKey(key string) ([]string, error) {
+	groups := []string{}
+	var buffer bytes.Buffer
+	var hex bytes.Buffer
+	state := BARE
+	wasInQuotes := false
+	ignoreSpace := true
+	expectDot := false
+
+	for _, char := range key {
+		if ignoreSpace {
+			if char == ' ' {
+				continue
+			}
+			ignoreSpace = false
+		}
+
+		if state == ESC {
+			if char == 'u' {
+				state = UNICODE_4
+				hex.Reset()
+			} else if char == 'U' {
+				state = UNICODE_8
+				hex.Reset()
+			} else if newChar, ok := escapeSequenceMap[char]; ok {
+				buffer.WriteRune(newChar)
+				state = BASIC
+			} else {
+				return nil, fmt.Errorf(`invalid escape sequence \%c`, char)
+			}
+			continue
+		}
+
+		if state == UNICODE_4 || state == UNICODE_8 {
+			if isHexDigit(char) {
+				hex.WriteRune(char)
+			}
+			if (state == UNICODE_4 && hex.Len() == 4) || (state == UNICODE_8 && hex.Len() == 8) {
+				if value, err := strconv.ParseInt(hex.String(), 16, 32); err == nil {
+					buffer.WriteRune(rune(value))
+				} else {
+					return nil, err
+				}
+				state = BASIC
+			}
+			continue
+		}
+
+		switch char {
+		case '\\':
+			if state == BASIC {
+				state = ESC
+			} else if state == LITERAL {
+				buffer.WriteRune(char)
+			}
+		case '\'':
+			if state == BARE {
+				state = LITERAL
+			} else if state == LITERAL {
+				groups = append(groups, buffer.String())
+				buffer.Reset()
+				wasInQuotes = true
+				state = BARE
+			}
+			expectDot = false
+		case '"':
+			if state == BARE {
+				state = BASIC
+			} else if state == BASIC {
+				groups = append(groups, buffer.String())
+				buffer.Reset()
+				state = BARE
+				wasInQuotes = true
+			}
+			expectDot = false
+		case '.':
+			if state != BARE {
+				buffer.WriteRune(char)
+			} else {
+				if !wasInQuotes {
+					if buffer.Len() == 0 {
+						return nil, errors.New("empty table key")
+					}
+					groups = append(groups, buffer.String())
+					buffer.Reset()
+				}
+				ignoreSpace = true
+				expectDot = false
+				wasInQuotes = false
+			}
+		case ' ':
+			if state == BASIC {
+				buffer.WriteRune(char)
+			} else {
+				expectDot = true
+			}
+		default:
+			if state == BARE {
+				if !isValidBareChar(char) {
+					return nil, fmt.Errorf("invalid bare character: %c", char)
+				} else if expectDot {
+					return nil, errors.New("what?")
+				}
+			}
+			buffer.WriteRune(char)
+			expectDot = false
+		}
+	}
+
+	// state must be BARE at the end
+	if state == ESC {
+		return nil, errors.New("unfinished escape sequence")
+	} else if state != BARE {
+		return nil, errors.New("mismatched quotes")
+	}
+
+	if buffer.Len() > 0 {
+		groups = append(groups, buffer.String())
+	}
+	if len(groups) == 0 {
+		return nil, errors.New("empty key")
+	}
+	return groups, nil
+}
+
+func isValidBareChar(r rune) bool {
+	return isAlphanumeric(r) || r == '-' || unicode.IsNumber(r)
+}
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b6647d66fa15b0e7710a610a03ba2eadbe87130
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -0,0 +1,651 @@
+// TOML lexer.
+//
+// Written using the principles developed by Rob Pike in
+// http://www.youtube.com/watch?v=HxaD_trXwRE
+
+package toml
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var dateRegexp *regexp.Regexp
+
+// Define state functions
+type tomlLexStateFn func() tomlLexStateFn
+
+// Define lexer
+type tomlLexer struct {
+	inputIdx          int
+	input             []rune // Textual source
+	currentTokenStart int
+	currentTokenStop  int
+	tokens            []token
+	depth             int
+	line              int
+	col               int
+	endbufferLine     int
+	endbufferCol      int
+}
+
+// Basic read operations on input
+
+func (l *tomlLexer) read() rune {
+	r := l.peek()
+	if r == '\n' {
+		l.endbufferLine++
+		l.endbufferCol = 1
+	} else {
+		l.endbufferCol++
+	}
+	l.inputIdx++
+	return r
+}
+
+func (l *tomlLexer) next() rune {
+	r := l.read()
+
+	if r != eof {
+		l.currentTokenStop++
+	}
+	return r
+}
+
+func (l *tomlLexer) ignore() {
+	l.currentTokenStart = l.currentTokenStop
+	l.line = l.endbufferLine
+	l.col = l.endbufferCol
+}
+
+func (l *tomlLexer) skip() {
+	l.next()
+	l.ignore()
+}
+
+func (l *tomlLexer) fastForward(n int) {
+	for i := 0; i < n; i++ {
+		l.next()
+	}
+}
+
+func (l *tomlLexer) emitWithValue(t tokenType, value string) {
+	l.tokens = append(l.tokens, token{
+		Position: Position{l.line, l.col},
+		typ:      t,
+		val:      value,
+	})
+	l.ignore()
+}
+
+func (l *tomlLexer) emit(t tokenType) {
+	l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop]))
+}
+
+func (l *tomlLexer) peek() rune {
+	if l.inputIdx >= len(l.input) {
+		return eof
+	}
+	return l.input[l.inputIdx]
+}
+
+func (l *tomlLexer) peekString(size int) string {
+	maxIdx := len(l.input)
+	upperIdx := l.inputIdx + size // FIXME: potential overflow
+	if upperIdx > maxIdx {
+		upperIdx = maxIdx
+	}
+	return string(l.input[l.inputIdx:upperIdx])
+}
+
+func (l *tomlLexer) follow(next string) bool {
+	return next == l.peekString(len(next))
+}
+
+// Error management
+
+func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
+	l.tokens = append(l.tokens, token{
+		Position: Position{l.line, l.col},
+		typ:      tokenError,
+		val:      fmt.Sprintf(format, args...),
+	})
+	return nil
+}
+
+// State functions
+
+func (l *tomlLexer) lexVoid() tomlLexStateFn {
+	for {
+		next := l.peek()
+		switch next {
+		case '[':
+			return l.lexTableKey
+		case '#':
+			return l.lexComment(l.lexVoid)
+		case '=':
+			return l.lexEqual
+		case '\r':
+			fallthrough
+		case '\n':
+			l.skip()
+			continue
+		}
+
+		if isSpace(next) {
+			l.skip()
+		}
+
+		if l.depth > 0 {
+			return l.lexRvalue
+		}
+
+		if isKeyStartChar(next) {
+			return l.lexKey
+		}
+
+		if next == eof {
+			l.next()
+			break
+		}
+	}
+
+	l.emit(tokenEOF)
+	return nil
+}
+
+func (l *tomlLexer) lexRvalue() tomlLexStateFn {
+	for {
+		next := l.peek()
+		switch next {
+		case '.':
+			return l.errorf("cannot start float with a dot")
+		case '=':
+			return l.lexEqual
+		case '[':
+			l.depth++
+			return l.lexLeftBracket
+		case ']':
+			l.depth--
+			return l.lexRightBracket
+		case '{':
+			return l.lexLeftCurlyBrace
+		case '}':
+			return l.lexRightCurlyBrace
+		case '#':
+			return l.lexComment(l.lexRvalue)
+		case '"':
+			return l.lexString
+		case '\'':
+			return l.lexLiteralString
+		case ',':
+			return l.lexComma
+		case '\r':
+			fallthrough
+		case '\n':
+			l.skip()
+			if l.depth == 0 {
+				return l.lexVoid
+			}
+			return l.lexRvalue
+		case '_':
+			return l.errorf("cannot start number with underscore")
+		}
+
+		if l.follow("true") {
+			return l.lexTrue
+		}
+
+		if l.follow("false") {
+			return l.lexFalse
+		}
+
+		if isSpace(next) {
+			l.skip()
+			continue
+		}
+
+		if next == eof {
+			l.next()
+			break
+		}
+
+		possibleDate := l.peekString(35)
+		dateMatch := dateRegexp.FindString(possibleDate)
+		if dateMatch != "" {
+			l.fastForward(len(dateMatch))
+			return l.lexDate
+		}
+
+		if next == '+' || next == '-' || isDigit(next) {
+			return l.lexNumber
+		}
+
+		if isAlphanumeric(next) {
+			return l.lexKey
+		}
+
+		return l.errorf("no value can start with %c", next)
+	}
+
+	l.emit(tokenEOF)
+	return nil
+}
+
+func (l *tomlLexer) lexLeftCurlyBrace() tomlLexStateFn {
+	l.next()
+	l.emit(tokenLeftCurlyBrace)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexRightCurlyBrace() tomlLexStateFn {
+	l.next()
+	l.emit(tokenRightCurlyBrace)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexDate() tomlLexStateFn {
+	l.emit(tokenDate)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTrue() tomlLexStateFn {
+	l.fastForward(4)
+	l.emit(tokenTrue)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexFalse() tomlLexStateFn {
+	l.fastForward(5)
+	l.emit(tokenFalse)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexEqual() tomlLexStateFn {
+	l.next()
+	l.emit(tokenEqual)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexComma() tomlLexStateFn {
+	l.next()
+	l.emit(tokenComma)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexKey() tomlLexStateFn {
+	growingString := ""
+
+	for r := l.peek(); isKeyChar(r) || r == '\n' || r == '\r'; r = l.peek() {
+		if r == '"' {
+			l.next()
+			str, err := l.lexStringAsString(`"`, false, true)
+			if err != nil {
+				return l.errorf(err.Error())
+			}
+			growingString += `"` + str + `"`
+			l.next()
+			continue
+		} else if r == '\n' {
+			return l.errorf("keys cannot contain new lines")
+		} else if isSpace(r) {
+			break
+		} else if !isValidBareChar(r) {
+			return l.errorf("keys cannot contain %c character", r)
+		}
+		growingString += string(r)
+		l.next()
+	}
+	l.emitWithValue(tokenKey, growingString)
+	return l.lexVoid
+}
+
+func (l *tomlLexer) lexComment(previousState tomlLexStateFn) tomlLexStateFn {
+	return func() tomlLexStateFn {
+		for next := l.peek(); next != '\n' && next != eof; next = l.peek() {
+			if next == '\r' && l.follow("\r\n") {
+				break
+			}
+			l.next()
+		}
+		l.ignore()
+		return previousState
+	}
+}
+
+func (l *tomlLexer) lexLeftBracket() tomlLexStateFn {
+	l.next()
+	l.emit(tokenLeftBracket)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexLiteralStringAsString(terminator string, discardLeadingNewLine bool) (string, error) {
+	growingString := ""
+
+	if discardLeadingNewLine {
+		if l.follow("\r\n") {
+			l.skip()
+			l.skip()
+		} else if l.peek() == '\n' {
+			l.skip()
+		}
+	}
+
+	// find end of string
+	for {
+		if l.follow(terminator) {
+			return growingString, nil
+		}
+
+		next := l.peek()
+		if next == eof {
+			break
+		}
+		growingString += string(l.next())
+	}
+
+	return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexLiteralString() tomlLexStateFn {
+	l.skip()
+
+	// handle special case for triple-quote
+	terminator := "'"
+	discardLeadingNewLine := false
+	if l.follow("''") {
+		l.skip()
+		l.skip()
+		terminator = "'''"
+		discardLeadingNewLine = true
+	}
+
+	str, err := l.lexLiteralStringAsString(terminator, discardLeadingNewLine)
+	if err != nil {
+		return l.errorf(err.Error())
+	}
+
+	l.emitWithValue(tokenString, str)
+	l.fastForward(len(terminator))
+	l.ignore()
+	return l.lexRvalue
+}
+
+// Lex a string and return the results as a string.
+// Terminator is the substring indicating the end of the token.
+// The resulting string does not include the terminator.
+func (l *tomlLexer) lexStringAsString(terminator string, discardLeadingNewLine, acceptNewLines bool) (string, error) {
+	growingString := ""
+
+	if discardLeadingNewLine {
+		if l.follow("\r\n") {
+			l.skip()
+			l.skip()
+		} else if l.peek() == '\n' {
+			l.skip()
+		}
+	}
+
+	for {
+		if l.follow(terminator) {
+			return growingString, nil
+		}
+
+		if l.follow("\\") {
+			l.next()
+			switch l.peek() {
+			case '\r':
+				fallthrough
+			case '\n':
+				fallthrough
+			case '\t':
+				fallthrough
+			case ' ':
+				// skip all whitespace chars following backslash
+				for strings.ContainsRune("\r\n\t ", l.peek()) {
+					l.next()
+				}
+			case '"':
+				growingString += "\""
+				l.next()
+			case 'n':
+				growingString += "\n"
+				l.next()
+			case 'b':
+				growingString += "\b"
+				l.next()
+			case 'f':
+				growingString += "\f"
+				l.next()
+			case '/':
+				growingString += "/"
+				l.next()
+			case 't':
+				growingString += "\t"
+				l.next()
+			case 'r':
+				growingString += "\r"
+				l.next()
+			case '\\':
+				growingString += "\\"
+				l.next()
+			case 'u':
+				l.next()
+				code := ""
+				for i := 0; i < 4; i++ {
+					c := l.peek()
+					if !isHexDigit(c) {
+						return "", errors.New("unfinished unicode escape")
+					}
+					l.next()
+					code = code + string(c)
+				}
+				intcode, err := strconv.ParseInt(code, 16, 32)
+				if err != nil {
+					return "", errors.New("invalid unicode escape: \\u" + code)
+				}
+				growingString += string(rune(intcode))
+			case 'U':
+				l.next()
+				code := ""
+				for i := 0; i < 8; i++ {
+					c := l.peek()
+					if !isHexDigit(c) {
+						return "", errors.New("unfinished unicode escape")
+					}
+					l.next()
+					code = code + string(c)
+				}
+				intcode, err := strconv.ParseInt(code, 16, 64)
+				if err != nil {
+					return "", errors.New("invalid unicode escape: \\U" + code)
+				}
+				growingString += string(rune(intcode))
+			default:
+				return "", errors.New("invalid escape sequence: \\" + string(l.peek()))
+			}
+		} else {
+			r := l.peek()
+
+			if 0x00 <= r && r <= 0x1F && !(acceptNewLines && (r == '\n' || r == '\r')) {
+				return "", fmt.Errorf("unescaped control character %U", r)
+			}
+			l.next()
+			growingString += string(r)
+		}
+
+		if l.peek() == eof {
+			break
+		}
+	}
+
+	return "", errors.New("unclosed string")
+}
+
+func (l *tomlLexer) lexString() tomlLexStateFn {
+	l.skip()
+
+	// handle special case for triple-quote
+	terminator := `"`
+	discardLeadingNewLine := false
+	acceptNewLines := false
+	if l.follow(`""`) {
+		l.skip()
+		l.skip()
+		terminator = `"""`
+		discardLeadingNewLine = true
+		acceptNewLines = true
+	}
+
+	str, err := l.lexStringAsString(terminator, discardLeadingNewLine, acceptNewLines)
+
+	if err != nil {
+		return l.errorf(err.Error())
+	}
+
+	l.emitWithValue(tokenString, str)
+	l.fastForward(len(terminator))
+	l.ignore()
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexTableKey() tomlLexStateFn {
+	l.next()
+
+	if l.peek() == '[' {
+		// token '[[' signifies an array of tables
+		l.next()
+		l.emit(tokenDoubleLeftBracket)
+		return l.lexInsideTableArrayKey
+	}
+	// vanilla table key
+	l.emit(tokenLeftBracket)
+	return l.lexInsideTableKey
+}
+
+func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
+	for r := l.peek(); r != eof; r = l.peek() {
+		switch r {
+		case ']':
+			if l.currentTokenStop > l.currentTokenStart {
+				l.emit(tokenKeyGroupArray)
+			}
+			l.next()
+			if l.peek() != ']' {
+				break
+			}
+			l.next()
+			l.emit(tokenDoubleRightBracket)
+			return l.lexVoid
+		case '[':
+			return l.errorf("table array key cannot contain ']'")
+		default:
+			l.next()
+		}
+	}
+	return l.errorf("unclosed table array key")
+}
+
+func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
+	for r := l.peek(); r != eof; r = l.peek() {
+		switch r {
+		case ']':
+			if l.currentTokenStop > l.currentTokenStart {
+				l.emit(tokenKeyGroup)
+			}
+			l.next()
+			l.emit(tokenRightBracket)
+			return l.lexVoid
+		case '[':
+			return l.errorf("table key cannot contain ']'")
+		default:
+			l.next()
+		}
+	}
+	return l.errorf("unclosed table key")
+}
+
+func (l *tomlLexer) lexRightBracket() tomlLexStateFn {
+	l.next()
+	l.emit(tokenRightBracket)
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) lexNumber() tomlLexStateFn {
+	r := l.peek()
+	if r == '+' || r == '-' {
+		l.next()
+	}
+	pointSeen := false
+	expSeen := false
+	digitSeen := false
+	for {
+		next := l.peek()
+		if next == '.' {
+			if pointSeen {
+				return l.errorf("cannot have two dots in one float")
+			}
+			l.next()
+			if !isDigit(l.peek()) {
+				return l.errorf("float cannot end with a dot")
+			}
+			pointSeen = true
+		} else if next == 'e' || next == 'E' {
+			expSeen = true
+			l.next()
+			r := l.peek()
+			if r == '+' || r == '-' {
+				l.next()
+			}
+		} else if isDigit(next) {
+			digitSeen = true
+			l.next()
+		} else if next == '_' {
+			l.next()
+		} else {
+			break
+		}
+		if pointSeen && !digitSeen {
+			return l.errorf("cannot start float with a dot")
+		}
+	}
+
+	if !digitSeen {
+		return l.errorf("no digit in that number")
+	}
+	if pointSeen || expSeen {
+		l.emit(tokenFloat)
+	} else {
+		l.emit(tokenInteger)
+	}
+	return l.lexRvalue
+}
+
+func (l *tomlLexer) run() {
+	for state := l.lexVoid; state != nil; {
+		state = state()
+	}
+}
+
+func init() {
+	dateRegexp = regexp.MustCompile(`^\d{1,4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{1,9})?(Z|[+-]\d{2}:\d{2})`)
+}
+
+// Entry point
+func lexToml(inputBytes []byte) []token {
+	runes := bytes.Runes(inputBytes)
+	l := &tomlLexer{
+		input:         runes,
+		tokens:        make([]token, 0, 256),
+		line:          1,
+		col:           1,
+		endbufferLine: 1,
+		endbufferCol:  1,
+	}
+	l.run()
+	return l.tokens
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
new file mode 100644
index 0000000000000000000000000000000000000000..1bbdfa1d895baf96b168d8ce1457b196a44ac1c1
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -0,0 +1,621 @@
+package toml
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type tomlOpts struct {
+	name      string
+	comment   string
+	commented bool
+	include   bool
+	omitempty bool
+}
+
+type encOpts struct {
+	quoteMapKeys bool
+}
+
+var encOptsDefaults = encOpts{
+	quoteMapKeys: false,
+}
+
+var timeType = reflect.TypeOf(time.Time{})
+var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+
+// Check if the given marshall type maps to a Tree primitive
+func isPrimitive(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Ptr:
+		return isPrimitive(mtype.Elem())
+	case reflect.Bool:
+		return true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Struct:
+		return mtype == timeType || isCustomMarshaler(mtype)
+	default:
+		return false
+	}
+}
+
+// Check if the given marshall type maps to a Tree slice
+func isTreeSlice(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Slice:
+		return !isOtherSlice(mtype)
+	default:
+		return false
+	}
+}
+
+// Check if the given marshall type maps to a non-Tree slice
+func isOtherSlice(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Ptr:
+		return isOtherSlice(mtype.Elem())
+	case reflect.Slice:
+		return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem())
+	default:
+		return false
+	}
+}
+
+// Check if the given marshall type maps to a Tree
+func isTree(mtype reflect.Type) bool {
+	switch mtype.Kind() {
+	case reflect.Map:
+		return true
+	case reflect.Struct:
+		return !isPrimitive(mtype)
+	default:
+		return false
+	}
+}
+
+func isCustomMarshaler(mtype reflect.Type) bool {
+	return mtype.Implements(marshalerType)
+}
+
+func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
+	return mval.Interface().(Marshaler).MarshalTOML()
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid TOML.
+type Marshaler interface {
+	MarshalTOML() ([]byte, error)
+}
+
+/*
+Marshal returns the TOML encoding of v.  Behavior is similar to the Go json
+encoder, except that there is no concept of a Marshaler interface or MarshalTOML
+function for sub-structs, and currently only definite types can be marshaled
+(i.e. no `interface{}`).
+
+The following struct annotations are supported:
+
+  toml:"Field"      Overrides the field's name to output.
+  omitempty         When set, empty values and groups are not emitted.
+  comment:"comment" Emits a # comment on the same line. This supports new lines.
+  commented:"true"  Emits the value as commented.
+
+Note that pointers are automatically assigned the "omitempty" option, as TOML
+explicitly does not handle null values (saying instead the label should be
+dropped).
+
+Tree structural types and corresponding marshal types:
+
+  *Tree                            (*)struct, (*)map[string]interface{}
+  []*Tree                          (*)[](*)struct, (*)[](*)map[string]interface{}
+  []interface{} (as interface{})   (*)[]primitive, (*)[]([]interface{})
+  interface{}                      (*)primitive
+
+Tree primitive types and corresponding marshal types:
+
+  uint64     uint, uint8-uint64, pointers to same
+  int64      int, int8-uint64, pointers to same
+  float64    float32, float64, pointers to same
+  string     string, pointers to same
+  bool       bool, pointers to same
+  time.Time  time.Time{}, pointers to same
+*/
+func Marshal(v interface{}) ([]byte, error) {
+	return NewEncoder(nil).marshal(v)
+}
+
+// Encoder writes TOML values to an output stream.
+type Encoder struct {
+	w io.Writer
+	encOpts
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		w:       w,
+		encOpts: encOptsDefaults,
+	}
+}
+
+// Encode writes the TOML encoding of v to the stream.
+//
+// See the documentation for Marshal for details.
+func (e *Encoder) Encode(v interface{}) error {
+	b, err := e.marshal(v)
+	if err != nil {
+		return err
+	}
+	if _, err := e.w.Write(b); err != nil {
+		return err
+	}
+	return nil
+}
+
+// QuoteMapKeys sets up the encoder to encode
+// maps with string type keys with quoted TOML keys.
+//
+// This relieves the character limitations on map keys.
+func (e *Encoder) QuoteMapKeys(v bool) *Encoder {
+	e.quoteMapKeys = v
+	return e
+}
+
+func (e *Encoder) marshal(v interface{}) ([]byte, error) {
+	mtype := reflect.TypeOf(v)
+	if mtype.Kind() != reflect.Struct {
+		return []byte{}, errors.New("Only a struct can be marshaled to TOML")
+	}
+	sval := reflect.ValueOf(v)
+	if isCustomMarshaler(mtype) {
+		return callCustomMarshaler(sval)
+	}
+	t, err := e.valueToTree(mtype, sval)
+	if err != nil {
+		return []byte{}, err
+	}
+	s, err := t.ToTomlString()
+	return []byte(s), err
+}
+
+// Convert given marshal struct or map value to toml tree
+func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return e.valueToTree(mtype.Elem(), mval.Elem())
+	}
+	tval := newTree()
+	switch mtype.Kind() {
+	case reflect.Struct:
+		for i := 0; i < mtype.NumField(); i++ {
+			mtypef, mvalf := mtype.Field(i), mval.Field(i)
+			opts := tomlOptions(mtypef)
+			if opts.include && (!opts.omitempty || !isZero(mvalf)) {
+				val, err := e.valueToToml(mtypef.Type, mvalf)
+				if err != nil {
+					return nil, err
+				}
+				tval.Set(opts.name, opts.comment, opts.commented, val)
+			}
+		}
+	case reflect.Map:
+		for _, key := range mval.MapKeys() {
+			mvalf := mval.MapIndex(key)
+			val, err := e.valueToToml(mtype.Elem(), mvalf)
+			if err != nil {
+				return nil, err
+			}
+			if e.quoteMapKeys {
+				keyStr, err := tomlValueStringRepresentation(key.String())
+				if err != nil {
+					return nil, err
+				}
+				tval.SetPath([]string{keyStr}, "", false, val)
+			} else {
+				tval.Set(key.String(), "", false, val)
+			}
+		}
+	}
+	return tval, nil
+}
+
+// Convert given marshal slice to slice of Toml trees
+func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
+	tval := make([]*Tree, mval.Len(), mval.Len())
+	for i := 0; i < mval.Len(); i++ {
+		val, err := e.valueToTree(mtype.Elem(), mval.Index(i))
+		if err != nil {
+			return nil, err
+		}
+		tval[i] = val
+	}
+	return tval, nil
+}
+
+// Convert given marshal slice to slice of toml values
+func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+	tval := make([]interface{}, mval.Len(), mval.Len())
+	for i := 0; i < mval.Len(); i++ {
+		val, err := e.valueToToml(mtype.Elem(), mval.Index(i))
+		if err != nil {
+			return nil, err
+		}
+		tval[i] = val
+	}
+	return tval, nil
+}
+
+// Convert given marshal value to toml value
+func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return e.valueToToml(mtype.Elem(), mval.Elem())
+	}
+	switch {
+	case isCustomMarshaler(mtype):
+		return callCustomMarshaler(mval)
+	case isTree(mtype):
+		return e.valueToTree(mtype, mval)
+	case isTreeSlice(mtype):
+		return e.valueToTreeSlice(mtype, mval)
+	case isOtherSlice(mtype):
+		return e.valueToOtherSlice(mtype, mval)
+	default:
+		switch mtype.Kind() {
+		case reflect.Bool:
+			return mval.Bool(), nil
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			return mval.Int(), nil
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			return mval.Uint(), nil
+		case reflect.Float32, reflect.Float64:
+			return mval.Float(), nil
+		case reflect.String:
+			return mval.String(), nil
+		case reflect.Struct:
+			return mval.Interface().(time.Time), nil
+		default:
+			return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
+		}
+	}
+}
+
+// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
+// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
+// sub-structs, and only definite types can be unmarshaled.
+func (t *Tree) Unmarshal(v interface{}) error {
+	d := Decoder{tval: t}
+	return d.unmarshal(v)
+}
+
+// Marshal returns the TOML encoding of Tree.
+// See Marshal() documentation for types mapping table.
+func (t *Tree) Marshal() ([]byte, error) {
+	var buf bytes.Buffer
+	err := NewEncoder(&buf).Encode(t)
+	return buf.Bytes(), err
+}
+
+// Unmarshal parses the TOML-encoded data and stores the result in the value
+// pointed to by v. Behavior is similar to the Go json encoder, except that there
+// is no concept of an Unmarshaler interface or UnmarshalTOML function for
+// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
+// `interface{}`).
+//
+// The following struct annotations are supported:
+//
+//   toml:"Field" Overrides the field's name to map to.
+//
+// See Marshal() documentation for types mapping table.
+func Unmarshal(data []byte, v interface{}) error {
+	t, err := LoadReader(bytes.NewReader(data))
+	if err != nil {
+		return err
+	}
+	return t.Unmarshal(v)
+}
+
+// Decoder reads and decodes TOML values from an input stream.
+type Decoder struct {
+	r    io.Reader
+	tval *Tree
+	encOpts
+}
+
+// NewDecoder returns a new decoder that reads from r.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		r:       r,
+		encOpts: encOptsDefaults,
+	}
+}
+
+// Decode reads a TOML-encoded value from it's input
+// and unmarshals it in the value pointed at by v.
+//
+// See the documentation for Marshal for details.
+func (d *Decoder) Decode(v interface{}) error {
+	var err error
+	d.tval, err = LoadReader(d.r)
+	if err != nil {
+		return err
+	}
+	return d.unmarshal(v)
+}
+
+func (d *Decoder) unmarshal(v interface{}) error {
+	mtype := reflect.TypeOf(v)
+	if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
+		return errors.New("Only a pointer to struct can be unmarshaled from TOML")
+	}
+
+	sval, err := d.valueFromTree(mtype.Elem(), d.tval)
+	if err != nil {
+		return err
+	}
+	reflect.ValueOf(v).Elem().Set(sval)
+	return nil
+}
+
+// Convert toml tree to marshal struct or map, using marshal type
+func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return d.unwrapPointer(mtype, tval)
+	}
+	var mval reflect.Value
+	switch mtype.Kind() {
+	case reflect.Struct:
+		mval = reflect.New(mtype).Elem()
+		for i := 0; i < mtype.NumField(); i++ {
+			mtypef := mtype.Field(i)
+			opts := tomlOptions(mtypef)
+			if opts.include {
+				baseKey := opts.name
+				keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)}
+				for _, key := range keysToTry {
+					exists := tval.Has(key)
+					if !exists {
+						continue
+					}
+					val := tval.Get(key)
+					mvalf, err := d.valueFromToml(mtypef.Type, val)
+					if err != nil {
+						return mval, formatError(err, tval.GetPosition(key))
+					}
+					mval.Field(i).Set(mvalf)
+					break
+				}
+			}
+		}
+	case reflect.Map:
+		mval = reflect.MakeMap(mtype)
+		for _, key := range tval.Keys() {
+			// TODO: path splits key
+			val := tval.GetPath([]string{key})
+			mvalf, err := d.valueFromToml(mtype.Elem(), val)
+			if err != nil {
+				return mval, formatError(err, tval.GetPosition(key))
+			}
+			mval.SetMapIndex(reflect.ValueOf(key), mvalf)
+		}
+	}
+	return mval, nil
+}
+
+// Convert toml value to marshal struct/map slice, using marshal type
+func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
+	mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+	for i := 0; i < len(tval); i++ {
+		val, err := d.valueFromTree(mtype.Elem(), tval[i])
+		if err != nil {
+			return mval, err
+		}
+		mval.Index(i).Set(val)
+	}
+	return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
+	mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+	for i := 0; i < len(tval); i++ {
+		val, err := d.valueFromToml(mtype.Elem(), tval[i])
+		if err != nil {
+			return mval, err
+		}
+		mval.Index(i).Set(val)
+	}
+	return mval, nil
+}
+
+// Convert toml value to marshal value, using marshal type
+func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+	if mtype.Kind() == reflect.Ptr {
+		return d.unwrapPointer(mtype, tval)
+	}
+
+	switch tval.(type) {
+	case *Tree:
+		if isTree(mtype) {
+			return d.valueFromTree(mtype, tval.(*Tree))
+		} else {
+			return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval)
+		}
+	case []*Tree:
+		if isTreeSlice(mtype) {
+			return d.valueFromTreeSlice(mtype, tval.([]*Tree))
+		} else {
+			return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval)
+		}
+	case []interface{}:
+		if isOtherSlice(mtype) {
+			return d.valueFromOtherSlice(mtype, tval.([]interface{}))
+		} else {
+			return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval)
+		}
+	default:
+		switch mtype.Kind() {
+		case reflect.Bool:
+			val, ok := tval.(bool)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to bool", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		case reflect.Int:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(int(val)), nil
+		case reflect.Int8:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(int8(val)), nil
+		case reflect.Int16:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(int16(val)), nil
+		case reflect.Int32:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(int32(val)), nil
+		case reflect.Int64:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		case reflect.Uint:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint(val)), nil
+		case reflect.Uint8:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint8(val)), nil
+		case reflect.Uint16:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint16(val)), nil
+		case reflect.Uint32:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint32(val)), nil
+		case reflect.Uint64:
+			val, ok := tval.(int64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+			}
+			return reflect.ValueOf(uint64(val)), nil
+		case reflect.Float32:
+			val, ok := tval.(float64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
+			}
+			return reflect.ValueOf(float32(val)), nil
+		case reflect.Float64:
+			val, ok := tval.(float64)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		case reflect.String:
+			val, ok := tval.(string)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to string", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		case reflect.Struct:
+			val, ok := tval.(time.Time)
+			if !ok {
+				return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to time", tval, tval)
+			}
+			return reflect.ValueOf(val), nil
+		default:
+			return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind())
+		}
+	}
+}
+
+func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+	val, err := d.valueFromToml(mtype.Elem(), tval)
+	if err != nil {
+		return reflect.ValueOf(nil), err
+	}
+	mval := reflect.New(mtype.Elem())
+	mval.Elem().Set(val)
+	return mval, nil
+}
+
+func tomlOptions(vf reflect.StructField) tomlOpts {
+	tag := vf.Tag.Get("toml")
+	parse := strings.Split(tag, ",")
+	var comment string
+	if c := vf.Tag.Get("comment"); c != "" {
+		comment = c
+	}
+	commented, _ := strconv.ParseBool(vf.Tag.Get("commented"))
+	result := tomlOpts{name: vf.Name, comment: comment, commented: commented, include: true, omitempty: false}
+	if parse[0] != "" {
+		if parse[0] == "-" && len(parse) == 1 {
+			result.include = false
+		} else {
+			result.name = strings.Trim(parse[0], " ")
+		}
+	}
+	if vf.PkgPath != "" {
+		result.include = false
+	}
+	if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" {
+		result.omitempty = true
+	}
+	if vf.Type.Kind() == reflect.Ptr {
+		result.omitempty = true
+	}
+	return result
+}
+
+func isZero(val reflect.Value) bool {
+	switch val.Type().Kind() {
+	case reflect.Map:
+		fallthrough
+	case reflect.Array:
+		fallthrough
+	case reflect.Slice:
+		return val.Len() == 0
+	default:
+		return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
+	}
+}
+
+func formatError(err error, pos Position) error {
+	if err.Error()[0] == '(' { // Error already contains position information
+		return err
+	}
+	return fmt.Errorf("%s: %s", pos, err)
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml
new file mode 100644
index 0000000000000000000000000000000000000000..1c5f98e7a8477b0b9c5add08b5aa3041a3161683
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml
@@ -0,0 +1,38 @@
+title = "TOML Marshal Testing"
+
+[basic]
+  bool = true
+  date = 1979-05-27T07:32:00Z
+  float = 123.4
+  int = 5000
+  string = "Bite me"
+  uint = 5001
+
+[basic_lists]
+  bools = [true,false,true]
+  dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
+  floats = [12.3,45.6,78.9]
+  ints = [8001,8001,8002]
+  strings = ["One","Two","Three"]
+  uints = [5002,5003]
+
+[basic_map]
+  one = "one"
+  two = "two"
+
+[subdoc]
+
+  [subdoc.first]
+    name = "First"
+
+  [subdoc.second]
+    name = "Second"
+
+[[subdoclist]]
+  name = "List.First"
+
+[[subdoclist]]
+  name = "List.Second"
+
+[[subdocptrs]]
+  name = "Second"
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
new file mode 100644
index 0000000000000000000000000000000000000000..d492a1e6ffd449d3531eea50555987e08e653d13
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -0,0 +1,383 @@
+// TOML Parser.
+
+package toml
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type tomlParser struct {
+	flowIdx       int
+	flow          []token
+	tree          *Tree
+	currentTable  []string
+	seenTableKeys []string
+}
+
+type tomlParserStateFn func() tomlParserStateFn
+
+// Formats and panics an error message based on a token
+func (p *tomlParser) raiseError(tok *token, msg string, args ...interface{}) {
+	panic(tok.Position.String() + ": " + fmt.Sprintf(msg, args...))
+}
+
+func (p *tomlParser) run() {
+	for state := p.parseStart; state != nil; {
+		state = state()
+	}
+}
+
+func (p *tomlParser) peek() *token {
+	if p.flowIdx >= len(p.flow) {
+		return nil
+	}
+	return &p.flow[p.flowIdx]
+}
+
+func (p *tomlParser) assume(typ tokenType) {
+	tok := p.getToken()
+	if tok == nil {
+		p.raiseError(tok, "was expecting token %s, but token stream is empty", tok)
+	}
+	if tok.typ != typ {
+		p.raiseError(tok, "was expecting token %s, but got %s instead", typ, tok)
+	}
+}
+
+func (p *tomlParser) getToken() *token {
+	tok := p.peek()
+	if tok == nil {
+		return nil
+	}
+	p.flowIdx++
+	return tok
+}
+
+func (p *tomlParser) parseStart() tomlParserStateFn {
+	tok := p.peek()
+
+	// end of stream, parsing is finished
+	if tok == nil {
+		return nil
+	}
+
+	switch tok.typ {
+	case tokenDoubleLeftBracket:
+		return p.parseGroupArray
+	case tokenLeftBracket:
+		return p.parseGroup
+	case tokenKey:
+		return p.parseAssign
+	case tokenEOF:
+		return nil
+	default:
+		p.raiseError(tok, "unexpected token")
+	}
+	return nil
+}
+
+func (p *tomlParser) parseGroupArray() tomlParserStateFn {
+	startToken := p.getToken() // discard the [[
+	key := p.getToken()
+	if key.typ != tokenKeyGroupArray {
+		p.raiseError(key, "unexpected token %s, was expecting a table array key", key)
+	}
+
+	// get or create table array element at the indicated part in the path
+	keys, err := parseKey(key.val)
+	if err != nil {
+		p.raiseError(key, "invalid table array key: %s", err)
+	}
+	p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
+	destTree := p.tree.GetPath(keys)
+	var array []*Tree
+	if destTree == nil {
+		array = make([]*Tree, 0)
+	} else if target, ok := destTree.([]*Tree); ok && target != nil {
+		array = destTree.([]*Tree)
+	} else {
+		p.raiseError(key, "key %s is already assigned and not of type table array", key)
+	}
+	p.currentTable = keys
+
+	// add a new tree to the end of the table array
+	newTree := newTree()
+	newTree.position = startToken.Position
+	array = append(array, newTree)
+	p.tree.SetPath(p.currentTable, "", false, array)
+
+	// remove all keys that were children of this table array
+	prefix := key.val + "."
+	found := false
+	for ii := 0; ii < len(p.seenTableKeys); {
+		tableKey := p.seenTableKeys[ii]
+		if strings.HasPrefix(tableKey, prefix) {
+			p.seenTableKeys = append(p.seenTableKeys[:ii], p.seenTableKeys[ii+1:]...)
+		} else {
+			found = (tableKey == key.val)
+			ii++
+		}
+	}
+
+	// keep this key name from use by other kinds of assignments
+	if !found {
+		p.seenTableKeys = append(p.seenTableKeys, key.val)
+	}
+
+	// move to next parser state
+	p.assume(tokenDoubleRightBracket)
+	return p.parseStart
+}
+
+func (p *tomlParser) parseGroup() tomlParserStateFn {
+	startToken := p.getToken() // discard the [
+	key := p.getToken()
+	if key.typ != tokenKeyGroup {
+		p.raiseError(key, "unexpected token %s, was expecting a table key", key)
+	}
+	for _, item := range p.seenTableKeys {
+		if item == key.val {
+			p.raiseError(key, "duplicated tables")
+		}
+	}
+
+	p.seenTableKeys = append(p.seenTableKeys, key.val)
+	keys, err := parseKey(key.val)
+	if err != nil {
+		p.raiseError(key, "invalid table array key: %s", err)
+	}
+	if err := p.tree.createSubTree(keys, startToken.Position); err != nil {
+		p.raiseError(key, "%s", err)
+	}
+	p.assume(tokenRightBracket)
+	p.currentTable = keys
+	return p.parseStart
+}
+
+func (p *tomlParser) parseAssign() tomlParserStateFn {
+	key := p.getToken()
+	p.assume(tokenEqual)
+
+	value := p.parseRvalue()
+	var tableKey []string
+	if len(p.currentTable) > 0 {
+		tableKey = p.currentTable
+	} else {
+		tableKey = []string{}
+	}
+
+	// find the table to assign, looking out for arrays of tables
+	var targetNode *Tree
+	switch node := p.tree.GetPath(tableKey).(type) {
+	case []*Tree:
+		targetNode = node[len(node)-1]
+	case *Tree:
+		targetNode = node
+	default:
+		p.raiseError(key, "Unknown table type for path: %s",
+			strings.Join(tableKey, "."))
+	}
+
+	// assign value to the found table
+	keyVals, err := parseKey(key.val)
+	if err != nil {
+		p.raiseError(key, "%s", err)
+	}
+	if len(keyVals) != 1 {
+		p.raiseError(key, "Invalid key")
+	}
+	keyVal := keyVals[0]
+	localKey := []string{keyVal}
+	finalKey := append(tableKey, keyVal)
+	if targetNode.GetPath(localKey) != nil {
+		p.raiseError(key, "The following key was defined twice: %s",
+			strings.Join(finalKey, "."))
+	}
+	var toInsert interface{}
+
+	switch value.(type) {
+	case *Tree, []*Tree:
+		toInsert = value
+	default:
+		toInsert = &tomlValue{value: value, position: key.Position}
+	}
+	targetNode.values[keyVal] = toInsert
+	return p.parseStart
+}
+
+var numberUnderscoreInvalidRegexp *regexp.Regexp
+
+func cleanupNumberToken(value string) (string, error) {
+	if numberUnderscoreInvalidRegexp.MatchString(value) {
+		return "", errors.New("invalid use of _ in number")
+	}
+	cleanedVal := strings.Replace(value, "_", "", -1)
+	return cleanedVal, nil
+}
+
+func (p *tomlParser) parseRvalue() interface{} {
+	tok := p.getToken()
+	if tok == nil || tok.typ == tokenEOF {
+		p.raiseError(tok, "expecting a value")
+	}
+
+	switch tok.typ {
+	case tokenString:
+		return tok.val
+	case tokenTrue:
+		return true
+	case tokenFalse:
+		return false
+	case tokenInteger:
+		cleanedVal, err := cleanupNumberToken(tok.val)
+		if err != nil {
+			p.raiseError(tok, "%s", err)
+		}
+		val, err := strconv.ParseInt(cleanedVal, 10, 64)
+		if err != nil {
+			p.raiseError(tok, "%s", err)
+		}
+		return val
+	case tokenFloat:
+		cleanedVal, err := cleanupNumberToken(tok.val)
+		if err != nil {
+			p.raiseError(tok, "%s", err)
+		}
+		val, err := strconv.ParseFloat(cleanedVal, 64)
+		if err != nil {
+			p.raiseError(tok, "%s", err)
+		}
+		return val
+	case tokenDate:
+		val, err := time.ParseInLocation(time.RFC3339Nano, tok.val, time.UTC)
+		if err != nil {
+			p.raiseError(tok, "%s", err)
+		}
+		return val
+	case tokenLeftBracket:
+		return p.parseArray()
+	case tokenLeftCurlyBrace:
+		return p.parseInlineTable()
+	case tokenEqual:
+		p.raiseError(tok, "cannot have multiple equals for the same key")
+	case tokenError:
+		p.raiseError(tok, "%s", tok)
+	}
+
+	p.raiseError(tok, "never reached")
+
+	return nil
+}
+
+func tokenIsComma(t *token) bool {
+	return t != nil && t.typ == tokenComma
+}
+
+func (p *tomlParser) parseInlineTable() *Tree {
+	tree := newTree()
+	var previous *token
+Loop:
+	for {
+		follow := p.peek()
+		if follow == nil || follow.typ == tokenEOF {
+			p.raiseError(follow, "unterminated inline table")
+		}
+		switch follow.typ {
+		case tokenRightCurlyBrace:
+			p.getToken()
+			break Loop
+		case tokenKey:
+			if !tokenIsComma(previous) && previous != nil {
+				p.raiseError(follow, "comma expected between fields in inline table")
+			}
+			key := p.getToken()
+			p.assume(tokenEqual)
+			value := p.parseRvalue()
+			tree.Set(key.val, "", false, value)
+		case tokenComma:
+			if previous == nil {
+				p.raiseError(follow, "inline table cannot start with a comma")
+			}
+			if tokenIsComma(previous) {
+				p.raiseError(follow, "need field between two commas in inline table")
+			}
+			p.getToken()
+		default:
+			p.raiseError(follow, "unexpected token type in inline table: %s", follow.typ.String())
+		}
+		previous = follow
+	}
+	if tokenIsComma(previous) {
+		p.raiseError(previous, "trailing comma at the end of inline table")
+	}
+	return tree
+}
+
+func (p *tomlParser) parseArray() interface{} {
+	var array []interface{}
+	arrayType := reflect.TypeOf(nil)
+	for {
+		follow := p.peek()
+		if follow == nil || follow.typ == tokenEOF {
+			p.raiseError(follow, "unterminated array")
+		}
+		if follow.typ == tokenRightBracket {
+			p.getToken()
+			break
+		}
+		val := p.parseRvalue()
+		if arrayType == nil {
+			arrayType = reflect.TypeOf(val)
+		}
+		if reflect.TypeOf(val) != arrayType {
+			p.raiseError(follow, "mixed types in array")
+		}
+		array = append(array, val)
+		follow = p.peek()
+		if follow == nil || follow.typ == tokenEOF {
+			p.raiseError(follow, "unterminated array")
+		}
+		if follow.typ != tokenRightBracket && follow.typ != tokenComma {
+			p.raiseError(follow, "missing comma")
+		}
+		if follow.typ == tokenComma {
+			p.getToken()
+		}
+	}
+	// An array of Trees is actually an array of inline
+	// tables, which is a shorthand for a table array. If the
+	// array was not converted from []interface{} to []*Tree,
+	// the two notations would not be equivalent.
+	if arrayType == reflect.TypeOf(newTree()) {
+		tomlArray := make([]*Tree, len(array))
+		for i, v := range array {
+			tomlArray[i] = v.(*Tree)
+		}
+		return tomlArray
+	}
+	return array
+}
+
+func parseToml(flow []token) *Tree {
+	result := newTree()
+	result.position = Position{1, 1}
+	parser := &tomlParser{
+		flowIdx:       0,
+		flow:          flow,
+		tree:          result,
+		currentTable:  make([]string, 0),
+		seenTableKeys: make([]string, 0),
+	}
+	parser.run()
+	return result
+}
+
+func init() {
+	numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d]|_$|^_)`)
+}
diff --git a/vendor/github.com/pelletier/go-toml/position.go b/vendor/github.com/pelletier/go-toml/position.go
new file mode 100644
index 0000000000000000000000000000000000000000..c17bff87baaa6b2cf886e12b594b1dd263bd72aa
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/position.go
@@ -0,0 +1,29 @@
+// Position support for go-toml
+
+package toml
+
+import (
+	"fmt"
+)
+
+// Position of a document element within a TOML document.
+//
+// Line and Col are both 1-indexed positions for the element's line number and
+// column number, respectively.  Values of zero or less will cause Invalid(),
+// to return true.
+type Position struct {
+	Line int // line within the document
+	Col  int // column within the line
+}
+
+// String representation of the position.
+// Displays 1-indexed line and column numbers.
+func (p Position) String() string {
+	return fmt.Sprintf("(%d, %d)", p.Line, p.Col)
+}
+
+// Invalid returns whether or not the position is valid (i.e. with negative or
+// null values)
+func (p Position) Invalid() bool {
+	return p.Line <= 0 || p.Col <= 0
+}
diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh
new file mode 100755
index 0000000000000000000000000000000000000000..91a889670f0f8552fb6699b2af19f05838abc6e2
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/test.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+# fail out of the script if anything here fails
+set -e
+
+# set the path to the present working directory
+export GOPATH=`pwd`
+
+function git_clone() {
+  path=$1
+  branch=$2
+  version=$3
+  if [ ! -d "src/$path" ]; then
+    mkdir -p src/$path
+    git clone https://$path.git src/$path
+  fi
+  pushd src/$path
+  git checkout "$branch"
+  git reset --hard "$version"
+  popd
+}
+
+# Remove potential previous runs
+rm -rf src test_program_bin toml-test
+
+# Run go vet
+go vet ./...
+
+go get github.com/pelletier/go-buffruneio
+go get github.com/davecgh/go-spew/spew
+go get gopkg.in/yaml.v2
+go get github.com/BurntSushi/toml
+
+# get code for BurntSushi TOML validation
+# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize)
+git_clone github.com/BurntSushi/toml master HEAD
+git_clone github.com/BurntSushi/toml-test master HEAD #was: 0.2.0 HEAD
+
+# build the BurntSushi test application
+go build -o toml-test github.com/BurntSushi/toml-test
+
+# vendorize the current lib for testing
+# NOTE: this basically mocks an install without having to go back out to github for code
+mkdir -p src/github.com/pelletier/go-toml/cmd
+mkdir -p src/github.com/pelletier/go-toml/query
+cp *.go *.toml src/github.com/pelletier/go-toml
+cp -R cmd/* src/github.com/pelletier/go-toml/cmd
+cp -R query/* src/github.com/pelletier/go-toml/query
+go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go
+
+# Run basic unit tests
+go test github.com/pelletier/go-toml -covermode=count -coverprofile=coverage.out
+go test github.com/pelletier/go-toml/cmd/tomljson
+go test github.com/pelletier/go-toml/query
+
+# run the entire BurntSushi test suite
+if [[ $# -eq 0 ]] ; then
+  echo "Running all BurntSushi tests"
+  ./toml-test ./test_program_bin | tee test_out
+else
+  # run a specific test
+  test=$1
+  test_path='src/github.com/BurntSushi/toml-test/tests'
+  valid_test="$test_path/valid/$test"
+  invalid_test="$test_path/invalid/$test"
+
+  if [ -e "$valid_test.toml" ]; then
+    echo "Valid Test TOML for $test:"
+    echo "===="
+    cat "$valid_test.toml"
+
+    echo "Valid Test JSON for $test:"
+    echo "===="
+    cat "$valid_test.json"
+
+    echo "Go-TOML Output for $test:"
+    echo "===="
+    cat "$valid_test.toml" | ./test_program_bin
+  fi
+
+  if [ -e "$invalid_test.toml" ]; then
+    echo "Invalid Test TOML for $test:"
+    echo "===="
+    cat "$invalid_test.toml"
+
+    echo "Go-TOML Output for $test:"
+    echo "===="
+    echo "go-toml Output:"
+    cat "$invalid_test.toml" | ./test_program_bin
+  fi
+fi
diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go
new file mode 100644
index 0000000000000000000000000000000000000000..5581fe0bcc1d681d6879d60a04b60e4c941207d1
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/token.go
@@ -0,0 +1,140 @@
+package toml
+
+import (
+	"fmt"
+	"strconv"
+	"unicode"
+)
+
+// Define tokens
+type tokenType int
+
+const (
+	eof = -(iota + 1)
+)
+
+const (
+	tokenError tokenType = iota
+	tokenEOF
+	tokenComment
+	tokenKey
+	tokenString
+	tokenInteger
+	tokenTrue
+	tokenFalse
+	tokenFloat
+	tokenEqual
+	tokenLeftBracket
+	tokenRightBracket
+	tokenLeftCurlyBrace
+	tokenRightCurlyBrace
+	tokenLeftParen
+	tokenRightParen
+	tokenDoubleLeftBracket
+	tokenDoubleRightBracket
+	tokenDate
+	tokenKeyGroup
+	tokenKeyGroupArray
+	tokenComma
+	tokenColon
+	tokenDollar
+	tokenStar
+	tokenQuestion
+	tokenDot
+	tokenDotDot
+	tokenEOL
+)
+
+var tokenTypeNames = []string{
+	"Error",
+	"EOF",
+	"Comment",
+	"Key",
+	"String",
+	"Integer",
+	"True",
+	"False",
+	"Float",
+	"=",
+	"[",
+	"]",
+	"{",
+	"}",
+	"(",
+	")",
+	"]]",
+	"[[",
+	"Date",
+	"KeyGroup",
+	"KeyGroupArray",
+	",",
+	":",
+	"$",
+	"*",
+	"?",
+	".",
+	"..",
+	"EOL",
+}
+
+type token struct {
+	Position
+	typ tokenType
+	val string
+}
+
+func (tt tokenType) String() string {
+	idx := int(tt)
+	if idx < len(tokenTypeNames) {
+		return tokenTypeNames[idx]
+	}
+	return "Unknown"
+}
+
+func (t token) Int() int {
+	if result, err := strconv.Atoi(t.val); err != nil {
+		panic(err)
+	} else {
+		return result
+	}
+}
+
+func (t token) String() string {
+	switch t.typ {
+	case tokenEOF:
+		return "EOF"
+	case tokenError:
+		return t.val
+	}
+
+	return fmt.Sprintf("%q", t.val)
+}
+
+func isSpace(r rune) bool {
+	return r == ' ' || r == '\t'
+}
+
+func isAlphanumeric(r rune) bool {
+	return unicode.IsLetter(r) || r == '_'
+}
+
+func isKeyChar(r rune) bool {
+	// Keys start with the first character that isn't whitespace or [ and end
+	// with the last non-whitespace character before the equals sign. Keys
+	// cannot contain a # character."
+	return !(r == '\r' || r == '\n' || r == eof || r == '=')
+}
+
+func isKeyStartChar(r rune) bool {
+	return !(isSpace(r) || r == '\r' || r == '\n' || r == eof || r == '[')
+}
+
+func isDigit(r rune) bool {
+	return unicode.IsNumber(r)
+}
+
+func isHexDigit(r rune) bool {
+	return isDigit(r) ||
+		(r >= 'a' && r <= 'f') ||
+		(r >= 'A' && r <= 'F')
+}
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
new file mode 100644
index 0000000000000000000000000000000000000000..c3e324374a80f42db57be658cf05723bbaf20eef
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -0,0 +1,300 @@
+package toml
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"runtime"
+	"strings"
+)
+
+type tomlValue struct {
+	value     interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
+	comment   string
+	commented bool
+	position  Position
+}
+
+// Tree is the result of the parsing of a TOML file.
+type Tree struct {
+	values    map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
+	comment   string
+	commented bool
+	position  Position
+}
+
+func newTree() *Tree {
+	return &Tree{
+		values:   make(map[string]interface{}),
+		position: Position{},
+	}
+}
+
+// TreeFromMap initializes a new Tree object using the given map.
+func TreeFromMap(m map[string]interface{}) (*Tree, error) {
+	result, err := toTree(m)
+	if err != nil {
+		return nil, err
+	}
+	return result.(*Tree), nil
+}
+
+// Position returns the position of the tree.
+func (t *Tree) Position() Position {
+	return t.position
+}
+
+// Has returns a boolean indicating if the given key exists.
+func (t *Tree) Has(key string) bool {
+	if key == "" {
+		return false
+	}
+	return t.HasPath(strings.Split(key, "."))
+}
+
+// HasPath returns true if the given path of keys exists, false otherwise.
+func (t *Tree) HasPath(keys []string) bool {
+	return t.GetPath(keys) != nil
+}
+
+// Keys returns the keys of the toplevel tree (does not recurse).
+func (t *Tree) Keys() []string {
+	keys := make([]string, len(t.values))
+	i := 0
+	for k := range t.values {
+		keys[i] = k
+		i++
+	}
+	return keys
+}
+
+// Get the value at key in the Tree.
+// Key is a dot-separated path (e.g. a.b.c).
+// Returns nil if the path does not exist in the tree.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) Get(key string) interface{} {
+	if key == "" {
+		return t
+	}
+	comps, err := parseKey(key)
+	if err != nil {
+		return nil
+	}
+	return t.GetPath(comps)
+}
+
+// GetPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPath(keys []string) interface{} {
+	if len(keys) == 0 {
+		return t
+	}
+	subtree := t
+	for _, intermediateKey := range keys[:len(keys)-1] {
+		value, exists := subtree.values[intermediateKey]
+		if !exists {
+			return nil
+		}
+		switch node := value.(type) {
+		case *Tree:
+			subtree = node
+		case []*Tree:
+			// go to most recent element
+			if len(node) == 0 {
+				return nil
+			}
+			subtree = node[len(node)-1]
+		default:
+			return nil // cannot navigate through other node types
+		}
+	}
+	// branch based on final node type
+	switch node := subtree.values[keys[len(keys)-1]].(type) {
+	case *tomlValue:
+		return node.value
+	default:
+		return node
+	}
+}
+
+// GetPosition returns the position of the given key.
+func (t *Tree) GetPosition(key string) Position {
+	if key == "" {
+		return t.position
+	}
+	return t.GetPositionPath(strings.Split(key, "."))
+}
+
+// GetPositionPath returns the element in the tree indicated by 'keys'.
+// If keys is of length zero, the current tree is returned.
+func (t *Tree) GetPositionPath(keys []string) Position {
+	if len(keys) == 0 {
+		return t.position
+	}
+	subtree := t
+	for _, intermediateKey := range keys[:len(keys)-1] {
+		value, exists := subtree.values[intermediateKey]
+		if !exists {
+			return Position{0, 0}
+		}
+		switch node := value.(type) {
+		case *Tree:
+			subtree = node
+		case []*Tree:
+			// go to most recent element
+			if len(node) == 0 {
+				return Position{0, 0}
+			}
+			subtree = node[len(node)-1]
+		default:
+			return Position{0, 0}
+		}
+	}
+	// branch based on final node type
+	switch node := subtree.values[keys[len(keys)-1]].(type) {
+	case *tomlValue:
+		return node.position
+	case *Tree:
+		return node.position
+	case []*Tree:
+		// go to most recent element
+		if len(node) == 0 {
+			return Position{0, 0}
+		}
+		return node[len(node)-1].position
+	default:
+		return Position{0, 0}
+	}
+}
+
+// GetDefault works like Get but with a default value
+func (t *Tree) GetDefault(key string, def interface{}) interface{} {
+	val := t.Get(key)
+	if val == nil {
+		return def
+	}
+	return val
+}
+
+// Set an element in the tree.
+// Key is a dot-separated path (e.g. a.b.c).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) Set(key string, comment string, commented bool, value interface{}) {
+	t.SetPath(strings.Split(key, "."), comment, commented, value)
+}
+
+// SetPath sets an element in the tree.
+// Keys is an array of path elements (e.g. {"a","b","c"}).
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) SetPath(keys []string, comment string, commented bool, value interface{}) {
+	subtree := t
+	for _, intermediateKey := range keys[:len(keys)-1] {
+		nextTree, exists := subtree.values[intermediateKey]
+		if !exists {
+			nextTree = newTree()
+			subtree.values[intermediateKey] = nextTree // add new element here
+		}
+		switch node := nextTree.(type) {
+		case *Tree:
+			subtree = node
+		case []*Tree:
+			// go to most recent element
+			if len(node) == 0 {
+				// create element if it does not exist
+				subtree.values[intermediateKey] = append(node, newTree())
+			}
+			subtree = node[len(node)-1]
+		}
+	}
+
+	var toInsert interface{}
+
+	switch value.(type) {
+	case *Tree:
+		tt := value.(*Tree)
+		tt.comment = comment
+		toInsert = value
+	case []*Tree:
+		toInsert = value
+	case *tomlValue:
+		tt := value.(*tomlValue)
+		tt.comment = comment
+		toInsert = tt
+	default:
+		toInsert = &tomlValue{value: value, comment: comment, commented: commented}
+	}
+
+	subtree.values[keys[len(keys)-1]] = toInsert
+}
+
+// createSubTree takes a tree and a key and create the necessary intermediate
+// subtrees to create a subtree at that point. In-place.
+//
+// e.g. passing a.b.c will create (assuming tree is empty) tree[a], tree[a][b]
+// and tree[a][b][c]
+//
+// Returns nil on success, error object on failure
+func (t *Tree) createSubTree(keys []string, pos Position) error {
+	subtree := t
+	for _, intermediateKey := range keys {
+		nextTree, exists := subtree.values[intermediateKey]
+		if !exists {
+			tree := newTree()
+			tree.position = pos
+			subtree.values[intermediateKey] = tree
+			nextTree = tree
+		}
+
+		switch node := nextTree.(type) {
+		case []*Tree:
+			subtree = node[len(node)-1]
+		case *Tree:
+			subtree = node
+		default:
+			return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
+				strings.Join(keys, "."), intermediateKey, nextTree, nextTree)
+		}
+	}
+	return nil
+}
+
+// LoadBytes creates a Tree from a []byte.
+func LoadBytes(b []byte) (tree *Tree, err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			err = errors.New(r.(string))
+		}
+	}()
+	tree = parseToml(lexToml(b))
+	return
+}
+
+// LoadReader creates a Tree from any io.Reader.
+func LoadReader(reader io.Reader) (tree *Tree, err error) {
+	inputBytes, err := ioutil.ReadAll(reader)
+	if err != nil {
+		return
+	}
+	tree, err = LoadBytes(inputBytes)
+	return
+}
+
+// Load creates a Tree from a string.
+func Load(content string) (tree *Tree, err error) {
+	return LoadBytes([]byte(content))
+}
+
+// LoadFile creates a Tree from a file.
+func LoadFile(path string) (tree *Tree, err error) {
+	file, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	return LoadReader(file)
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go
new file mode 100644
index 0000000000000000000000000000000000000000..79610e9b340c8a49512a2e94e29db5c8cbb7db09
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go
@@ -0,0 +1,142 @@
+package toml
+
+import (
+	"fmt"
+	"reflect"
+	"time"
+)
+
+var kindToType = [reflect.String + 1]reflect.Type{
+	reflect.Bool:    reflect.TypeOf(true),
+	reflect.String:  reflect.TypeOf(""),
+	reflect.Float32: reflect.TypeOf(float64(1)),
+	reflect.Float64: reflect.TypeOf(float64(1)),
+	reflect.Int:     reflect.TypeOf(int64(1)),
+	reflect.Int8:    reflect.TypeOf(int64(1)),
+	reflect.Int16:   reflect.TypeOf(int64(1)),
+	reflect.Int32:   reflect.TypeOf(int64(1)),
+	reflect.Int64:   reflect.TypeOf(int64(1)),
+	reflect.Uint:    reflect.TypeOf(uint64(1)),
+	reflect.Uint8:   reflect.TypeOf(uint64(1)),
+	reflect.Uint16:  reflect.TypeOf(uint64(1)),
+	reflect.Uint32:  reflect.TypeOf(uint64(1)),
+	reflect.Uint64:  reflect.TypeOf(uint64(1)),
+}
+
+// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
+// supported values:
+// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
+func typeFor(k reflect.Kind) reflect.Type {
+	if k > 0 && int(k) < len(kindToType) {
+		return kindToType[k]
+	}
+	return nil
+}
+
+func simpleValueCoercion(object interface{}) (interface{}, error) {
+	switch original := object.(type) {
+	case string, bool, int64, uint64, float64, time.Time:
+		return original, nil
+	case int:
+		return int64(original), nil
+	case int8:
+		return int64(original), nil
+	case int16:
+		return int64(original), nil
+	case int32:
+		return int64(original), nil
+	case uint:
+		return uint64(original), nil
+	case uint8:
+		return uint64(original), nil
+	case uint16:
+		return uint64(original), nil
+	case uint32:
+		return uint64(original), nil
+	case float32:
+		return float64(original), nil
+	case fmt.Stringer:
+		return original.String(), nil
+	default:
+		return nil, fmt.Errorf("cannot convert type %T to Tree", object)
+	}
+}
+
+func sliceToTree(object interface{}) (interface{}, error) {
+	// arrays are a bit tricky, since they can represent either a
+	// collection of simple values, which is represented by one
+	// *tomlValue, or an array of tables, which is represented by an
+	// array of *Tree.
+
+	// holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
+	value := reflect.ValueOf(object)
+	insideType := value.Type().Elem()
+	length := value.Len()
+	if length > 0 {
+		insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
+	}
+	if insideType.Kind() == reflect.Map {
+		// this is considered as an array of tables
+		tablesArray := make([]*Tree, 0, length)
+		for i := 0; i < length; i++ {
+			table := value.Index(i)
+			tree, err := toTree(table.Interface())
+			if err != nil {
+				return nil, err
+			}
+			tablesArray = append(tablesArray, tree.(*Tree))
+		}
+		return tablesArray, nil
+	}
+
+	sliceType := typeFor(insideType.Kind())
+	if sliceType == nil {
+		sliceType = insideType
+	}
+
+	arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
+
+	for i := 0; i < length; i++ {
+		val := value.Index(i).Interface()
+		simpleValue, err := simpleValueCoercion(val)
+		if err != nil {
+			return nil, err
+		}
+		arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+	}
+	return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil
+}
+
+func toTree(object interface{}) (interface{}, error) {
+	value := reflect.ValueOf(object)
+
+	if value.Kind() == reflect.Map {
+		values := map[string]interface{}{}
+		keys := value.MapKeys()
+		for _, key := range keys {
+			if key.Kind() != reflect.String {
+				if _, ok := key.Interface().(string); !ok {
+					return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
+				}
+			}
+
+			v := value.MapIndex(key)
+			newValue, err := toTree(v.Interface())
+			if err != nil {
+				return nil, err
+			}
+			values[key.String()] = newValue
+		}
+		return &Tree{values: values, position: Position{}}, nil
+	}
+
+	if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
+		return sliceToTree(object)
+	}
+
+	simpleValue, err := simpleValueCoercion(object)
+	if err != nil {
+		return nil, err
+	}
+	return &tomlValue{value: simpleValue, position: Position{}}, nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
new file mode 100644
index 0000000000000000000000000000000000000000..449f35a441b34b7ecadcc8ec6d45433600256bda
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -0,0 +1,270 @@
+package toml
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// encodes a string to a TOML-compliant string value
+func encodeTomlString(value string) string {
+	var b bytes.Buffer
+
+	for _, rr := range value {
+		switch rr {
+		case '\b':
+			b.WriteString(`\b`)
+		case '\t':
+			b.WriteString(`\t`)
+		case '\n':
+			b.WriteString(`\n`)
+		case '\f':
+			b.WriteString(`\f`)
+		case '\r':
+			b.WriteString(`\r`)
+		case '"':
+			b.WriteString(`\"`)
+		case '\\':
+			b.WriteString(`\\`)
+		default:
+			intRr := uint16(rr)
+			if intRr < 0x001F {
+				b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
+			} else {
+				b.WriteRune(rr)
+			}
+		}
+	}
+	return b.String()
+}
+
+func tomlValueStringRepresentation(v interface{}) (string, error) {
+	switch value := v.(type) {
+	case uint64:
+		return strconv.FormatUint(value, 10), nil
+	case int64:
+		return strconv.FormatInt(value, 10), nil
+	case float64:
+		// Ensure a round float does contain a decimal point. Otherwise feeding
+		// the output back to the parser would convert to an integer.
+		if math.Trunc(value) == value {
+			return strconv.FormatFloat(value, 'f', 1, 32), nil
+		}
+		return strconv.FormatFloat(value, 'f', -1, 32), nil
+	case string:
+		return "\"" + encodeTomlString(value) + "\"", nil
+	case []byte:
+		b, _ := v.([]byte)
+		return tomlValueStringRepresentation(string(b))
+	case bool:
+		if value {
+			return "true", nil
+		}
+		return "false", nil
+	case time.Time:
+		return value.Format(time.RFC3339), nil
+	case nil:
+		return "", nil
+	}
+
+	rv := reflect.ValueOf(v)
+
+	if rv.Kind() == reflect.Slice {
+		values := []string{}
+		for i := 0; i < rv.Len(); i++ {
+			item := rv.Index(i).Interface()
+			itemRepr, err := tomlValueStringRepresentation(item)
+			if err != nil {
+				return "", err
+			}
+			values = append(values, itemRepr)
+		}
+		return "[" + strings.Join(values, ",") + "]", nil
+	}
+	return "", fmt.Errorf("unsupported value type %T: %v", v, v)
+}
+
+func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) {
+	simpleValuesKeys := make([]string, 0)
+	complexValuesKeys := make([]string, 0)
+
+	for k := range t.values {
+		v := t.values[k]
+		switch v.(type) {
+		case *Tree, []*Tree:
+			complexValuesKeys = append(complexValuesKeys, k)
+		default:
+			simpleValuesKeys = append(simpleValuesKeys, k)
+		}
+	}
+
+	sort.Strings(simpleValuesKeys)
+	sort.Strings(complexValuesKeys)
+
+	for _, k := range simpleValuesKeys {
+		v, ok := t.values[k].(*tomlValue)
+		if !ok {
+			return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+		}
+
+		repr, err := tomlValueStringRepresentation(v.value)
+		if err != nil {
+			return bytesCount, err
+		}
+
+		if v.comment != "" {
+			comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
+			start := "# "
+			if strings.HasPrefix(comment, "#") {
+				start = ""
+			}
+			writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n")
+			bytesCount += int64(writtenBytesCountComment)
+			if errc != nil {
+				return bytesCount, errc
+			}
+		}
+
+		var commented string
+		if v.commented {
+			commented = "# "
+		}
+		writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n")
+		bytesCount += int64(writtenBytesCount)
+		if err != nil {
+			return bytesCount, err
+		}
+	}
+
+	for _, k := range complexValuesKeys {
+		v := t.values[k]
+
+		combinedKey := k
+		if keyspace != "" {
+			combinedKey = keyspace + "." + combinedKey
+		}
+		var commented string
+		if t.commented {
+			commented = "# "
+		}
+
+		switch node := v.(type) {
+		// node has to be of those two types given how keys are sorted above
+		case *Tree:
+			tv, ok := t.values[k].(*Tree)
+			if !ok {
+				return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+			}
+			if tv.comment != "" {
+				comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
+				start := "# "
+				if strings.HasPrefix(comment, "#") {
+					start = ""
+				}
+				writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
+				bytesCount += int64(writtenBytesCountComment)
+				if errc != nil {
+					return bytesCount, errc
+				}
+			}
+			writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
+			bytesCount += int64(writtenBytesCount)
+			if err != nil {
+				return bytesCount, err
+			}
+			bytesCount, err = node.writeTo(w, indent+"  ", combinedKey, bytesCount)
+			if err != nil {
+				return bytesCount, err
+			}
+		case []*Tree:
+			for _, subTree := range node {
+				writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
+				bytesCount += int64(writtenBytesCount)
+				if err != nil {
+					return bytesCount, err
+				}
+
+				bytesCount, err = subTree.writeTo(w, indent+"  ", combinedKey, bytesCount)
+				if err != nil {
+					return bytesCount, err
+				}
+			}
+		}
+	}
+
+	return bytesCount, nil
+}
+
+func writeStrings(w io.Writer, s ...string) (int, error) {
+	var n int
+	for i := range s {
+		b, err := io.WriteString(w, s[i])
+		n += b
+		if err != nil {
+			return n, err
+		}
+	}
+	return n, nil
+}
+
+// WriteTo encode the Tree as Toml and writes it to the writer w.
+// Returns the number of bytes written in case of success, or an error if anything happened.
+func (t *Tree) WriteTo(w io.Writer) (int64, error) {
+	return t.writeTo(w, "", "", 0)
+}
+
+// ToTomlString generates a human-readable representation of the current tree.
+// Output spans multiple lines, and is suitable for ingest by a TOML parser.
+// If the conversion cannot be performed, ToString returns a non-nil error.
+func (t *Tree) ToTomlString() (string, error) {
+	var buf bytes.Buffer
+	_, err := t.WriteTo(&buf)
+	if err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+// String generates a human-readable representation of the current tree.
+// Alias of ToString. Present to implement the fmt.Stringer interface.
+func (t *Tree) String() string {
+	result, _ := t.ToTomlString()
+	return result
+}
+
+// ToMap recursively generates a representation of the tree using Go built-in structures.
+// The following types are used:
+//
+//	* bool
+//	* float64
+//	* int64
+//	* string
+//	* uint64
+//	* time.Time
+//	* map[string]interface{} (where interface{} is any of this list)
+//	* []interface{} (where interface{} is any of this list)
+func (t *Tree) ToMap() map[string]interface{} {
+	result := map[string]interface{}{}
+
+	for k, v := range t.values {
+		switch node := v.(type) {
+		case []*Tree:
+			var array []interface{}
+			for _, item := range node {
+				array = append(array, item.ToMap())
+			}
+			result[k] = array
+		case *Tree:
+			result[k] = node.ToMap()
+		case *tomlValue:
+			result[k] = node.value
+		}
+	}
+	return result
+}
diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..298f0e2665e512a7d5053faf2ce4793c281efe6a
--- /dev/null
+++ b/vendor/github.com/spf13/afero/LICENSE.txt
@@ -0,0 +1,174 @@
+                                Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..0c9b04b53fcb64d5f5e9c3095c5d3a7745db1f41
--- /dev/null
+++ b/vendor/github.com/spf13/afero/README.md
@@ -0,0 +1,452 @@
+![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png)
+
+A FileSystem Abstraction System for Go
+
+[![Build Status](https://travis-ci.org/spf13/afero.svg)](https://travis-ci.org/spf13/afero) [![Build status](https://ci.appveyor.com/api/projects/status/github/spf13/afero?branch=master&svg=true)](https://ci.appveyor.com/project/spf13/afero) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+# Overview
+
+Afero is an filesystem framework providing a simple, uniform and universal API
+interacting with any filesystem, as an abstraction layer providing interfaces,
+types and methods. Afero has an exceptionally clean interface and simple design
+without needless constructors or initialization methods.
+
+Afero is also a library providing a base set of interoperable backend
+filesystems that make it easy to work with afero while retaining all the power
+and benefit of the os and ioutil packages.
+
+Afero provides significant improvements over using the os package alone, most
+notably the ability to create mock and testing filesystems without relying on the disk.
+
+It is suitable for use in a any situation where you would consider using the OS
+package as it provides an additional abstraction that makes it easy to use a
+memory backed file system during testing. It also adds support for the http
+filesystem for full interoperability.
+
+
+## Afero Features
+
+* A single consistent API for accessing a variety of filesystems
+* Interoperation between a variety of file system types
+* A set of interfaces to encourage and enforce interoperability between backends
+* An atomic cross platform memory backed file system
+* Support for compositional (union) file systems by combining multiple file systems acting as one
+* Specialized backends which modify existing filesystems (Read Only, Regexp filtered)
+* A set of utility functions ported from io, ioutil & hugo to be afero aware
+
+
+# Using Afero
+
+Afero is easy to use and easier to adopt.
+
+A few different ways you could use Afero:
+
+* Use the interfaces alone to define you own file system.
+* Wrap for the OS packages.
+* Define different filesystems for different parts of your application.
+* Use Afero for mock filesystems while testing
+
+## Step 1: Install Afero
+
+First use go get to install the latest version of the library.
+
+    $ go get github.com/spf13/afero
+
+Next include Afero in your application.
+```go
+import "github.com/spf13/afero"
+```
+
+## Step 2: Declare a backend
+
+First define a package variable and set it to a pointer to a filesystem.
+```go
+var AppFs = afero.NewMemMapFs()
+
+or
+
+var AppFs = afero.NewOsFs()
+```
+It is important to note that if you repeat the composite literal you
+will be using a completely new and isolated filesystem. In the case of
+OsFs it will still use the same underlying filesystem but will reduce
+the ability to drop in other filesystems as desired.
+
+## Step 3: Use it like you would the OS package
+
+Throughout your application use any function and method like you normally
+would.
+
+So if my application before had:
+```go
+os.Open('/tmp/foo')
+```
+We would replace it with:
+```go
+AppFs.Open('/tmp/foo')
+```
+
+`AppFs` being the variable we defined above.
+
+
+## List of all available functions
+
+File System Methods Available:
+```go
+Chmod(name string, mode os.FileMode) : error
+Chtimes(name string, atime time.Time, mtime time.Time) : error
+Create(name string) : File, error
+Mkdir(name string, perm os.FileMode) : error
+MkdirAll(path string, perm os.FileMode) : error
+Name() : string
+Open(name string) : File, error
+OpenFile(name string, flag int, perm os.FileMode) : File, error
+Remove(name string) : error
+RemoveAll(path string) : error
+Rename(oldname, newname string) : error
+Stat(name string) : os.FileInfo, error
+```
+File Interfaces and Methods Available:
+```go
+io.Closer
+io.Reader
+io.ReaderAt
+io.Seeker
+io.Writer
+io.WriterAt
+
+Name() : string
+Readdir(count int) : []os.FileInfo, error
+Readdirnames(n int) : []string, error
+Stat() : os.FileInfo, error
+Sync() : error
+Truncate(size int64) : error
+WriteString(s string) : ret int, err error
+```
+In some applications it may make sense to define a new package that
+simply exports the file system variable for easy access from anywhere.
+
+## Using Afero's utility functions
+
+Afero provides a set of functions to make it easier to use the underlying file systems.
+These functions have been primarily ported from io & ioutil with some developed for Hugo.
+
+The afero utilities support all afero compatible backends.
+
+The list of utilities includes:
+
+```go
+DirExists(path string) (bool, error)
+Exists(path string) (bool, error)
+FileContainsBytes(filename string, subslice []byte) (bool, error)
+GetTempDir(subPath string) string
+IsDir(path string) (bool, error)
+IsEmpty(path string) (bool, error)
+ReadDir(dirname string) ([]os.FileInfo, error)
+ReadFile(filename string) ([]byte, error)
+SafeWriteReader(path string, r io.Reader) (err error)
+TempDir(dir, prefix string) (name string, err error)
+TempFile(dir, prefix string) (f File, err error)
+Walk(root string, walkFn filepath.WalkFunc) error
+WriteFile(filename string, data []byte, perm os.FileMode) error
+WriteReader(path string, r io.Reader) (err error)
+```
+For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero)
+
+They are available under two different approaches to use. You can either call
+them directly where the first parameter of each function will be the file
+system, or you can declare a new `Afero`, a custom type used to bind these
+functions as methods to a given filesystem.
+
+### Calling utilities directly
+
+```go
+fs := new(afero.MemMapFs)
+f, err := afero.TempFile(fs,"", "ioutil-test")
+
+```
+
+### Calling via Afero
+
+```go
+fs := afero.NewMemMapFs()
+afs := &afero.Afero{Fs: fs}
+f, err := afs.TempFile("", "ioutil-test")
+```
+
+## Using Afero for Testing
+
+There is a large benefit to using a mock filesystem for testing. It has a
+completely blank state every time it is initialized and can be easily
+reproducible regardless of OS. You could create files to your heart’s content
+and the file access would be fast while also saving you from all the annoying
+issues with deleting temporary files, Windows file locking, etc. The MemMapFs
+backend is perfect for testing.
+
+* Much faster than performing I/O operations on disk
+* Avoid security issues and permissions
+* Far more control. 'rm -rf /' with confidence
+* Test setup is far more easier to do
+* No test cleanup needed
+
+One way to accomplish this is to define a variable as mentioned above.
+In your application this will be set to afero.NewOsFs() during testing you
+can set it to afero.NewMemMapFs().
+
+It wouldn't be uncommon to have each test initialize a blank slate memory
+backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere
+appropriate in my application code. This approach ensures that Tests are order
+independent, with no test relying on the state left by an earlier test.
+
+Then in my tests I would initialize a new MemMapFs for each test:
+```go
+func TestExist(t *testing.T) {
+	appFS := afero.NewMemMapFs()
+	// create test files and directories
+	appFS.MkdirAll("src/a", 0755)
+	afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644)
+	afero.WriteFile(appFS, "src/c", []byte("file c"), 0644)
+	name := "src/c"
+	_, err := appFS.Stat(name)
+	if os.IsNotExist(err) {
+		t.Errorf("file \"%s\" does not exist.\n", name)
+	}
+}
+```
+
+# Available Backends
+
+## Operating System Native
+
+### OsFs
+
+The first is simply a wrapper around the native OS calls. This makes it
+very easy to use as all of the calls are the same as the existing OS
+calls. It also makes it trivial to have your code use the OS during
+operation and a mock filesystem during testing or as needed.
+
+```go
+appfs := afero.NewOsFs()
+appfs.MkdirAll("src/a", 0755))
+```
+
+## Memory Backed Storage
+
+### MemMapFs
+
+Afero also provides a fully atomic memory backed filesystem perfect for use in
+mocking and to speed up unnecessary disk io when persistence isn’t
+necessary. It is fully concurrent and will work within go routines
+safely.
+
+```go
+mm := afero.NewMemMapFs()
+mm.MkdirAll("src/a", 0755))
+```
+
+#### InMemoryFile
+
+As part of MemMapFs, Afero also provides an atomic, fully concurrent memory
+backed file implementation. This can be used in other memory backed file
+systems with ease. Plans are to add a radix tree memory stored file
+system using InMemoryFile.
+
+## Network Interfaces
+
+### SftpFs
+
+Afero has experimental support for secure file transfer protocol (sftp). Which can
+be used to perform file operations over a encrypted channel.
+
+## Filtering Backends
+
+### BasePathFs
+
+The BasePathFs restricts all operations to a given path within an Fs.
+The given file name to the operations on this Fs will be prepended with
+the base path before calling the source Fs.
+
+```go
+bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path")
+```
+
+### ReadOnlyFs
+
+A thin wrapper around the source Fs providing a read only view.
+
+```go
+fs := afero.NewReadOnlyFs(afero.NewOsFs())
+_, err := fs.Create("/file.txt")
+// err = syscall.EPERM
+```
+
+# RegexpFs
+
+A filtered view on file names, any file NOT matching
+the passed regexp will be treated as non-existing.
+Files not matching the regexp provided will not be created.
+Directories are not filtered.
+
+```go
+fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`))
+_, err := fs.Create("/file.html")
+// err = syscall.ENOENT
+```
+
+### HttpFs
+
+Afero provides an http compatible backend which can wrap any of the existing
+backends.
+
+The Http package requires a slightly specific version of Open which
+returns an http.File type.
+
+Afero provides an httpFs file system which satisfies this requirement.
+Any Afero FileSystem can be used as an httpFs.
+
+```go
+httpFs := afero.NewHttpFs(<ExistingFS>)
+fileserver := http.FileServer(httpFs.Dir(<PATH>)))
+http.Handle("/", fileserver)
+```
+
+## Composite Backends
+
+Afero provides the ability have two filesystems (or more) act as a single
+file system.
+
+### CacheOnReadFs
+
+The CacheOnReadFs will lazily make copies of any accessed files from the base
+layer into the overlay. Subsequent reads will be pulled from the overlay
+directly permitting the request is within the cache duration of when it was
+created in the overlay.
+
+If the base filesystem is writeable, any changes to files will be
+done first to the base, then to the overlay layer. Write calls to open file
+handles like `Write()` or `Truncate()` to the overlay first.
+
+To writing files to the overlay only, you can use the overlay Fs directly (not
+via the union Fs).
+
+Cache files in the layer for the given time.Duration, a cache duration of 0
+means "forever" meaning the file will not be re-requested from the base ever.
+
+A read-only base will make the overlay also read-only but still copy files
+from the base to the overlay when they're not present (or outdated) in the
+caching layer.
+
+```go
+base := afero.NewOsFs()
+layer := afero.NewMemMapFs()
+ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second)
+```
+
+### CopyOnWriteFs()
+
+The CopyOnWriteFs is a read only base file system with a potentially
+writeable layer on top.
+
+Read operations will first look in the overlay and if not found there, will
+serve the file from the base.
+
+Changes to the file system will only be made in the overlay.
+
+Any attempt to modify a file found only in the base will copy the file to the
+overlay layer before modification (including opening a file with a writable
+handle).
+
+Removing and Renaming files present only in the base layer is not currently
+permitted. If a file is present in the base layer and the overlay, only the
+overlay will be removed/renamed.
+
+```go
+	base := afero.NewOsFs()
+	roBase := afero.NewReadOnlyFs(base)
+	ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs())
+
+	fh, _ = ufs.Create("/home/test/file2.txt")
+	fh.WriteString("This is a test")
+	fh.Close()
+```
+
+In this example all write operations will only occur in memory (MemMapFs)
+leaving the base filesystem (OsFs) untouched.
+
+
+## Desired/possible backends
+
+The following is a short list of possible backends we hope someone will
+implement:
+
+* SSH
+* ZIP
+* TAR
+* S3
+
+# About the project
+
+## What's in the name
+
+Afero comes from the latin roots Ad-Facere.
+
+**"Ad"** is a prefix meaning "to".
+
+**"Facere"** is a form of the root "faciō" making "make or do".
+
+The literal meaning of afero is "to make" or "to do" which seems very fitting
+for a library that allows one to make files and directories and do things with them.
+
+The English word that shares the same roots as Afero is "affair". Affair shares
+the same concept but as a noun it means "something that is made or done" or "an
+object of a particular type".
+
+It's also nice that unlike some of my other libraries (hugo, cobra, viper) it
+Googles very well.
+
+## Release Notes
+
+* **0.10.0** 2015.12.10
+  * Full compatibility with Windows
+  * Introduction of afero utilities
+  * Test suite rewritten to work cross platform
+  * Normalize paths for MemMapFs
+  * Adding Sync to the file interface
+  * **Breaking Change** Walk and ReadDir have changed parameter order
+  * Moving types used by MemMapFs to a subpackage
+  * General bugfixes and improvements
+* **0.9.0** 2015.11.05
+  * New Walk function similar to filepath.Walk
+  * MemMapFs.OpenFile handles O_CREATE, O_APPEND, O_TRUNC
+  * MemMapFs.Remove now really deletes the file
+  * InMemoryFile.Readdir and Readdirnames work correctly
+  * InMemoryFile functions lock it for concurrent access
+  * Test suite improvements
+* **0.8.0** 2014.10.28
+  * First public version
+  * Interfaces feel ready for people to build using
+  * Interfaces satisfy all known uses
+  * MemMapFs passes the majority of the OS test suite
+  * OsFs passes the majority of the OS test suite
+
+## Contributing
+
+1. Fork it
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Commit your changes (`git commit -am 'Add some feature'`)
+4. Push to the branch (`git push origin my-new-feature`)
+5. Create new Pull Request
+
+## Contributors
+
+Names in no particular order:
+
+* [spf13](https://github.com/spf13)
+* [jaqx0r](https://github.com/jaqx0r)
+* [mbertschler](https://github.com/mbertschler)
+* [xor-gate](https://github.com/xor-gate)
+
+## License
+
+Afero is released under the Apache 2.0 license. See
+[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt)
diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go
new file mode 100644
index 0000000000000000000000000000000000000000..f5b5e127cd6a793196506d007454eb8b244c247e
--- /dev/null
+++ b/vendor/github.com/spf13/afero/afero.go
@@ -0,0 +1,108 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package afero provides types and methods for interacting with the filesystem,
+// as an abstraction layer.
+
+// Afero also provides a few implementations that are mostly interoperable. One that
+// uses the operating system filesystem, one that uses memory to store files
+// (cross platform) and an interface that should be implemented if you want to
+// provide your own filesystem.
+
+package afero
+
+import (
+	"errors"
+	"io"
+	"os"
+	"time"
+)
+
+type Afero struct {
+	Fs
+}
+
+// File represents a file in the filesystem.
+type File interface {
+	io.Closer
+	io.Reader
+	io.ReaderAt
+	io.Seeker
+	io.Writer
+	io.WriterAt
+
+	Name() string
+	Readdir(count int) ([]os.FileInfo, error)
+	Readdirnames(n int) ([]string, error)
+	Stat() (os.FileInfo, error)
+	Sync() error
+	Truncate(size int64) error
+	WriteString(s string) (ret int, err error)
+}
+
+// Fs is the filesystem interface.
+//
+// Any simulated or real filesystem should implement this interface.
+type Fs interface {
+	// Create creates a file in the filesystem, returning the file and an
+	// error, if any happens.
+	Create(name string) (File, error)
+
+	// Mkdir creates a directory in the filesystem, return an error if any
+	// happens.
+	Mkdir(name string, perm os.FileMode) error
+
+	// MkdirAll creates a directory path and all parents that does not exist
+	// yet.
+	MkdirAll(path string, perm os.FileMode) error
+
+	// Open opens a file, returning it or an error, if any happens.
+	Open(name string) (File, error)
+
+	// OpenFile opens a file using the given flags and the given mode.
+	OpenFile(name string, flag int, perm os.FileMode) (File, error)
+
+	// Remove removes a file identified by name, returning an error, if any
+	// happens.
+	Remove(name string) error
+
+	// RemoveAll removes a directory path and any children it contains. It
+	// does not fail if the path does not exist (return nil).
+	RemoveAll(path string) error
+
+	// Rename renames a file.
+	Rename(oldname, newname string) error
+
+	// Stat returns a FileInfo describing the named file, or an error, if any
+	// happens.
+	Stat(name string) (os.FileInfo, error)
+
+	// The name of this FileSystem
+	Name() string
+
+	//Chmod changes the mode of the named file to mode.
+	Chmod(name string, mode os.FileMode) error
+
+	//Chtimes changes the access and modification times of the named file
+	Chtimes(name string, atime time.Time, mtime time.Time) error
+}
+
+var (
+	ErrFileClosed        = errors.New("File is closed")
+	ErrOutOfRange        = errors.New("Out of range")
+	ErrTooLarge          = errors.New("Too large")
+	ErrFileNotFound      = os.ErrNotExist
+	ErrFileExists        = os.ErrExist
+	ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a633ad500c16861a1de69026745675de2515ace5
--- /dev/null
+++ b/vendor/github.com/spf13/afero/appveyor.yml
@@ -0,0 +1,15 @@
+version: '{build}'
+clone_folder: C:\gopath\src\github.com\spf13\afero
+environment:
+  GOPATH: C:\gopath
+build_script:
+- cmd: >-
+    go version
+
+    go env
+
+    go get -v github.com/spf13/afero/...
+
+    go build github.com/spf13/afero
+test_script:
+- cmd: go test -race -v github.com/spf13/afero/...
diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go
new file mode 100644
index 0000000000000000000000000000000000000000..5e4fc2ec055e5fde456ee5ccb41c04d7861e2a1c
--- /dev/null
+++ b/vendor/github.com/spf13/afero/basepath.go
@@ -0,0 +1,145 @@
+package afero
+
+import (
+	"errors"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"time"
+)
+
+// The BasePathFs restricts all operations to a given path within an Fs.
+// The given file name to the operations on this Fs will be prepended with
+// the base path before calling the base Fs.
+// Any file name (after filepath.Clean()) outside this base path will be
+// treated as non existing file.
+//
+// Note that it does not clean the error messages on return, so you may
+// reveal the real path on errors.
+type BasePathFs struct {
+	source Fs
+	path   string
+}
+
+func NewBasePathFs(source Fs, path string) Fs {
+	return &BasePathFs{source: source, path: path}
+}
+
+// on a file outside the base path it returns the given file name and an error,
+// else the given file with the base path prepended
+func (b *BasePathFs) RealPath(name string) (path string, err error) {
+	if err := validateBasePathName(name); err != nil {
+		return "", err
+	}
+
+	bpath := filepath.Clean(b.path)
+	path = filepath.Clean(filepath.Join(bpath, name))
+	if !strings.HasPrefix(path, bpath) {
+		return name, os.ErrNotExist
+	}
+
+	return path, nil
+}
+
+func validateBasePathName(name string) error {
+	if runtime.GOOS != "windows" {
+		// Not much to do here;
+		// the virtual file paths all look absolute on *nix.
+		return nil
+	}
+
+	// On Windows a common mistake would be to provide an absolute OS path
+	// We could strip out the base part, but that would not be very portable.
+	if filepath.IsAbs(name) {
+		return &os.PathError{Op: "realPath", Path: name, Err: errors.New("got a real OS path instead of a virtual")}
+	}
+
+	return nil
+}
+
+func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "chtimes", Path: name, Err: err}
+	}
+	return b.source.Chtimes(name, atime, mtime)
+}
+
+func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "chmod", Path: name, Err: err}
+	}
+	return b.source.Chmod(name, mode)
+}
+
+func (b *BasePathFs) Name() string {
+	return "BasePathFs"
+}
+
+func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return nil, &os.PathError{Op: "stat", Path: name, Err: err}
+	}
+	return b.source.Stat(name)
+}
+
+func (b *BasePathFs) Rename(oldname, newname string) (err error) {
+	if oldname, err = b.RealPath(oldname); err != nil {
+		return &os.PathError{Op: "rename", Path: oldname, Err: err}
+	}
+	if newname, err = b.RealPath(newname); err != nil {
+		return &os.PathError{Op: "rename", Path: newname, Err: err}
+	}
+	return b.source.Rename(oldname, newname)
+}
+
+func (b *BasePathFs) RemoveAll(name string) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "remove_all", Path: name, Err: err}
+	}
+	return b.source.RemoveAll(name)
+}
+
+func (b *BasePathFs) Remove(name string) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "remove", Path: name, Err: err}
+	}
+	return b.source.Remove(name)
+}
+
+func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
+	}
+	return b.source.OpenFile(name, flag, mode)
+}
+
+func (b *BasePathFs) Open(name string) (f File, err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return nil, &os.PathError{Op: "open", Path: name, Err: err}
+	}
+	return b.source.Open(name)
+}
+
+func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
+	}
+	return b.source.Mkdir(name, mode)
+}
+
+func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
+	}
+	return b.source.MkdirAll(name, mode)
+}
+
+func (b *BasePathFs) Create(name string) (f File, err error) {
+	if name, err = b.RealPath(name); err != nil {
+		return nil, &os.PathError{Op: "create", Path: name, Err: err}
+	}
+	return b.source.Create(name)
+}
+
+// vim: ts=4 sw=4 noexpandtab nolist syn=go
diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go
new file mode 100644
index 0000000000000000000000000000000000000000..b026e0de838d85f12d4c2d341bf567d2200e2c05
--- /dev/null
+++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -0,0 +1,290 @@
+package afero
+
+import (
+	"os"
+	"syscall"
+	"time"
+)
+
+// If the cache duration is 0, cache time will be unlimited, i.e. once
+// a file is in the layer, the base will never be read again for this file.
+//
+// For cache times greater than 0, the modification time of a file is
+// checked. Note that a lot of file system implementations only allow a
+// resolution of a second for timestamps... or as the godoc for os.Chtimes()
+// states: "The underlying filesystem may truncate or round the values to a
+// less precise time unit."
+//
+// This caching union will forward all write calls also to the base file
+// system first. To prevent writing to the base Fs, wrap it in a read-only
+// filter - Note: this will also make the overlay read-only, for writing files
+// in the overlay, use the overlay Fs directly, not via the union Fs.
+type CacheOnReadFs struct {
+	base      Fs
+	layer     Fs
+	cacheTime time.Duration
+}
+
+func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
+	return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
+}
+
+type cacheState int
+
+const (
+	// not present in the overlay, unknown if it exists in the base:
+	cacheMiss cacheState = iota
+	// present in the overlay and in base, base file is newer:
+	cacheStale
+	// present in the overlay - with cache time == 0 it may exist in the base,
+	// with cacheTime > 0 it exists in the base and is same age or newer in the
+	// overlay
+	cacheHit
+	// happens if someone writes directly to the overlay without
+	// going through this union
+	cacheLocal
+)
+
+func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
+	var lfi, bfi os.FileInfo
+	lfi, err = u.layer.Stat(name)
+	if err == nil {
+		if u.cacheTime == 0 {
+			return cacheHit, lfi, nil
+		}
+		if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
+			bfi, err = u.base.Stat(name)
+			if err != nil {
+				return cacheLocal, lfi, nil
+			}
+			if bfi.ModTime().After(lfi.ModTime()) {
+				return cacheStale, bfi, nil
+			}
+		}
+		return cacheHit, lfi, nil
+	}
+
+	if err == syscall.ENOENT || os.IsNotExist(err) {
+		return cacheMiss, nil, nil
+	}
+
+	return cacheMiss, nil, err
+}
+
+func (u *CacheOnReadFs) copyToLayer(name string) error {
+	return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit:
+		err = u.base.Chtimes(name, atime, mtime)
+	case cacheStale, cacheMiss:
+		if err := u.copyToLayer(name); err != nil {
+			return err
+		}
+		err = u.base.Chtimes(name, atime, mtime)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit:
+		err = u.base.Chmod(name, mode)
+	case cacheStale, cacheMiss:
+		if err := u.copyToLayer(name); err != nil {
+			return err
+		}
+		err = u.base.Chmod(name, mode)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.Chmod(name, mode)
+}
+
+func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
+	st, fi, err := u.cacheStatus(name)
+	if err != nil {
+		return nil, err
+	}
+	switch st {
+	case cacheMiss:
+		return u.base.Stat(name)
+	default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
+		return fi, nil
+	}
+}
+
+func (u *CacheOnReadFs) Rename(oldname, newname string) error {
+	st, _, err := u.cacheStatus(oldname)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit:
+		err = u.base.Rename(oldname, newname)
+	case cacheStale, cacheMiss:
+		if err := u.copyToLayer(oldname); err != nil {
+			return err
+		}
+		err = u.base.Rename(oldname, newname)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.Rename(oldname, newname)
+}
+
+func (u *CacheOnReadFs) Remove(name string) error {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit, cacheStale, cacheMiss:
+		err = u.base.Remove(name)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.Remove(name)
+}
+
+func (u *CacheOnReadFs) RemoveAll(name string) error {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return err
+	}
+	switch st {
+	case cacheLocal:
+	case cacheHit, cacheStale, cacheMiss:
+		err = u.base.RemoveAll(name)
+	}
+	if err != nil {
+		return err
+	}
+	return u.layer.RemoveAll(name)
+}
+
+func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	st, _, err := u.cacheStatus(name)
+	if err != nil {
+		return nil, err
+	}
+	switch st {
+	case cacheLocal, cacheHit:
+	default:
+		if err := u.copyToLayer(name); err != nil {
+			return nil, err
+		}
+	}
+	if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+		bfi, err := u.base.OpenFile(name, flag, perm)
+		if err != nil {
+			return nil, err
+		}
+		lfi, err := u.layer.OpenFile(name, flag, perm)
+		if err != nil {
+			bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
+			return nil, err
+		}
+		return &UnionFile{base: bfi, layer: lfi}, nil
+	}
+	return u.layer.OpenFile(name, flag, perm)
+}
+
+func (u *CacheOnReadFs) Open(name string) (File, error) {
+	st, fi, err := u.cacheStatus(name)
+	if err != nil {
+		return nil, err
+	}
+
+	switch st {
+	case cacheLocal:
+		return u.layer.Open(name)
+
+	case cacheMiss:
+		bfi, err := u.base.Stat(name)
+		if err != nil {
+			return nil, err
+		}
+		if bfi.IsDir() {
+			return u.base.Open(name)
+		}
+		if err := u.copyToLayer(name); err != nil {
+			return nil, err
+		}
+		return u.layer.Open(name)
+
+	case cacheStale:
+		if !fi.IsDir() {
+			if err := u.copyToLayer(name); err != nil {
+				return nil, err
+			}
+			return u.layer.Open(name)
+		}
+	case cacheHit:
+		if !fi.IsDir() {
+			return u.layer.Open(name)
+		}
+	}
+	// the dirs from cacheHit, cacheStale fall down here:
+	bfile, _ := u.base.Open(name)
+	lfile, err := u.layer.Open(name)
+	if err != nil && bfile == nil {
+		return nil, err
+	}
+	return &UnionFile{base: bfile, layer: lfile}, nil
+}
+
+func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
+	err := u.base.Mkdir(name, perm)
+	if err != nil {
+		return err
+	}
+	return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
+}
+
+func (u *CacheOnReadFs) Name() string {
+	return "CacheOnReadFs"
+}
+
+func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
+	err := u.base.MkdirAll(name, perm)
+	if err != nil {
+		return err
+	}
+	return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CacheOnReadFs) Create(name string) (File, error) {
+	bfh, err := u.base.Create(name)
+	if err != nil {
+		return nil, err
+	}
+	lfh, err := u.layer.Create(name)
+	if err != nil {
+		// oops, see comment about OS_TRUNC above, should we remove? then we have to
+		// remember if the file did not exist before
+		bfh.Close()
+		return nil, err
+	}
+	return &UnionFile{base: bfh, layer: lfh}, nil
+}
diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go
new file mode 100644
index 0000000000000000000000000000000000000000..5728243d962ddc68f58aaba3c21d016f3444c8a4
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_bsds.go
@@ -0,0 +1,22 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build darwin openbsd freebsd netbsd dragonfly
+
+package afero
+
+import (
+	"syscall"
+)
+
+const BADFD = syscall.EBADF
diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go
new file mode 100644
index 0000000000000000000000000000000000000000..968fc2783e5e89de05143d7c213b964f2665d97d
--- /dev/null
+++ b/vendor/github.com/spf13/afero/const_win_unix.go
@@ -0,0 +1,25 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// +build !darwin
+// +build !openbsd
+// +build !freebsd
+// +build !dragonfly
+// +build !netbsd
+
+package afero
+
+import (
+	"syscall"
+)
+
+const BADFD = syscall.EBADFD
diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go
new file mode 100644
index 0000000000000000000000000000000000000000..f2ebcd2266e56f8c1bcfa7481f6f585e34c0ca4b
--- /dev/null
+++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go
@@ -0,0 +1,253 @@
+package afero
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"syscall"
+	"time"
+)
+
+// The CopyOnWriteFs is a union filesystem: a read only base file system with
+// a possibly writeable layer on top. Changes to the file system will only
+// be made in the overlay: Changing an existing file in the base layer which
+// is not present in the overlay will copy the file to the overlay ("changing"
+// includes also calls to e.g. Chtimes() and Chmod()).
+//
+// Reading directories is currently only supported via Open(), not OpenFile().
+type CopyOnWriteFs struct {
+	base  Fs
+	layer Fs
+}
+
+func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
+	return &CopyOnWriteFs{base: base, layer: layer}
+}
+
+// Returns true if the file is not in the overlay
+func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
+	if _, err := u.layer.Stat(name); err == nil {
+		return false, nil
+	}
+	_, err := u.base.Stat(name)
+	if err != nil {
+		if oerr, ok := err.(*os.PathError); ok {
+			if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR {
+				return false, nil
+			}
+		}
+		if err == syscall.ENOENT {
+			return false, nil
+		}
+	}
+	return true, err
+}
+
+func (u *CopyOnWriteFs) copyToLayer(name string) error {
+	return copyToLayer(u.base, u.layer, name)
+}
+
+func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
+	b, err := u.isBaseFile(name)
+	if err != nil {
+		return err
+	}
+	if b {
+		if err := u.copyToLayer(name); err != nil {
+			return err
+		}
+	}
+	return u.layer.Chtimes(name, atime, mtime)
+}
+
+func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
+	b, err := u.isBaseFile(name)
+	if err != nil {
+		return err
+	}
+	if b {
+		if err := u.copyToLayer(name); err != nil {
+			return err
+		}
+	}
+	return u.layer.Chmod(name, mode)
+}
+
+func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
+	fi, err := u.layer.Stat(name)
+	if err != nil {
+		origErr := err
+		if e, ok := err.(*os.PathError); ok {
+			err = e.Err
+		}
+		if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
+			return u.base.Stat(name)
+		}
+		return nil, origErr
+	}
+	return fi, nil
+}
+
+// Renaming files present only in the base layer is not permitted
+func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
+	b, err := u.isBaseFile(oldname)
+	if err != nil {
+		return err
+	}
+	if b {
+		return syscall.EPERM
+	}
+	return u.layer.Rename(oldname, newname)
+}
+
+// Removing files present only in the base layer is not permitted. If
+// a file is present in the base layer and the overlay, only the overlay
+// will be removed.
+func (u *CopyOnWriteFs) Remove(name string) error {
+	err := u.layer.Remove(name)
+	switch err {
+	case syscall.ENOENT:
+		_, err = u.base.Stat(name)
+		if err == nil {
+			return syscall.EPERM
+		}
+		return syscall.ENOENT
+	default:
+		return err
+	}
+}
+
+func (u *CopyOnWriteFs) RemoveAll(name string) error {
+	err := u.layer.RemoveAll(name)
+	switch err {
+	case syscall.ENOENT:
+		_, err = u.base.Stat(name)
+		if err == nil {
+			return syscall.EPERM
+		}
+		return syscall.ENOENT
+	default:
+		return err
+	}
+}
+
+func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	b, err := u.isBaseFile(name)
+	if err != nil {
+		return nil, err
+	}
+
+	if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+		if b {
+			if err = u.copyToLayer(name); err != nil {
+				return nil, err
+			}
+			return u.layer.OpenFile(name, flag, perm)
+		}
+
+		dir := filepath.Dir(name)
+		isaDir, err := IsDir(u.base, dir)
+		if err != nil && !os.IsNotExist(err) {
+			return nil, err
+		}
+		if isaDir {
+			if err = u.layer.MkdirAll(dir, 0777); err != nil {
+				return nil, err
+			}
+			return u.layer.OpenFile(name, flag, perm)
+		}
+
+		isaDir, err = IsDir(u.layer, dir)
+		if err != nil {
+			return nil, err
+		}
+		if isaDir {
+			return u.layer.OpenFile(name, flag, perm)
+		}
+
+		return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist?
+	}
+	if b {
+		return u.base.OpenFile(name, flag, perm)
+	}
+	return u.layer.OpenFile(name, flag, perm)
+}
+
+// This function handles the 9 different possibilities caused
+// by the union which are the intersection of the following...
+//  layer: doesn't exist, exists as a file, and exists as a directory
+//  base:  doesn't exist, exists as a file, and exists as a directory
+func (u *CopyOnWriteFs) Open(name string) (File, error) {
+	// Since the overlay overrides the base we check that first
+	b, err := u.isBaseFile(name)
+	if err != nil {
+		return nil, err
+	}
+
+	// If overlay doesn't exist, return the base (base state irrelevant)
+	if b {
+		return u.base.Open(name)
+	}
+
+	// If overlay is a file, return it (base state irrelevant)
+	dir, err := IsDir(u.layer, name)
+	if err != nil {
+		return nil, err
+	}
+	if !dir {
+		return u.layer.Open(name)
+	}
+
+	// Overlay is a directory, base state now matters.
+	// Base state has 3 states to check but 2 outcomes:
+	// A. It's a file or non-readable in the base (return just the overlay)
+	// B. It's an accessible directory in the base (return a UnionFile)
+
+	// If base is file or nonreadable, return overlay
+	dir, err = IsDir(u.base, name)
+	if !dir || err != nil {
+		return u.layer.Open(name)
+	}
+
+	// Both base & layer are directories
+	// Return union file (if opens are without error)
+	bfile, bErr := u.base.Open(name)
+	lfile, lErr := u.layer.Open(name)
+
+	// If either have errors at this point something is very wrong. Return nil and the errors
+	if bErr != nil || lErr != nil {
+		return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
+	}
+
+	return &UnionFile{base: bfile, layer: lfile}, nil
+}
+
+func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
+	dir, err := IsDir(u.base, name)
+	if err != nil {
+		return u.layer.MkdirAll(name, perm)
+	}
+	if dir {
+		return syscall.EEXIST
+	}
+	return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Name() string {
+	return "CopyOnWriteFs"
+}
+
+func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
+	dir, err := IsDir(u.base, name)
+	if err != nil {
+		return u.layer.MkdirAll(name, perm)
+	}
+	if dir {
+		return syscall.EEXIST
+	}
+	return u.layer.MkdirAll(name, perm)
+}
+
+func (u *CopyOnWriteFs) Create(name string) (File, error) {
+	return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666)
+}
diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go
new file mode 100644
index 0000000000000000000000000000000000000000..c42193688ceb626d75d6a4f97b4cfa6d30ec2e00
--- /dev/null
+++ b/vendor/github.com/spf13/afero/httpFs.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"errors"
+	"net/http"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+	"time"
+)
+
+type httpDir struct {
+	basePath string
+	fs       HttpFs
+}
+
+func (d httpDir) Open(name string) (http.File, error) {
+	if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 ||
+		strings.Contains(name, "\x00") {
+		return nil, errors.New("http: invalid character in file path")
+	}
+	dir := string(d.basePath)
+	if dir == "" {
+		dir = "."
+	}
+
+	f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
+	if err != nil {
+		return nil, err
+	}
+	return f, nil
+}
+
+type HttpFs struct {
+	source Fs
+}
+
+func NewHttpFs(source Fs) *HttpFs {
+	return &HttpFs{source: source}
+}
+
+func (h HttpFs) Dir(s string) *httpDir {
+	return &httpDir{basePath: s, fs: h}
+}
+
+func (h HttpFs) Name() string { return "h HttpFs" }
+
+func (h HttpFs) Create(name string) (File, error) {
+	return h.source.Create(name)
+}
+
+func (h HttpFs) Chmod(name string, mode os.FileMode) error {
+	return h.source.Chmod(name, mode)
+}
+
+func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+	return h.source.Chtimes(name, atime, mtime)
+}
+
+func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
+	return h.source.Mkdir(name, perm)
+}
+
+func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
+	return h.source.MkdirAll(path, perm)
+}
+
+func (h HttpFs) Open(name string) (http.File, error) {
+	f, err := h.source.Open(name)
+	if err == nil {
+		if httpfile, ok := f.(http.File); ok {
+			return httpfile, nil
+		}
+	}
+	return nil, err
+}
+
+func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	return h.source.OpenFile(name, flag, perm)
+}
+
+func (h HttpFs) Remove(name string) error {
+	return h.source.Remove(name)
+}
+
+func (h HttpFs) RemoveAll(path string) error {
+	return h.source.RemoveAll(path)
+}
+
+func (h HttpFs) Rename(oldname, newname string) error {
+	return h.source.Rename(oldname, newname)
+}
+
+func (h HttpFs) Stat(name string) (os.FileInfo, error) {
+	return h.source.Stat(name)
+}
diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go
new file mode 100644
index 0000000000000000000000000000000000000000..5c3a3d8fffc98b973a7a89ee5728de12a4e91ed9
--- /dev/null
+++ b/vendor/github.com/spf13/afero/ioutil.go
@@ -0,0 +1,230 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia <spf@spf13.com>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"bytes"
+	"io"
+	"os"
+	"path/filepath"
+	"sort"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// byName implements sort.Interface.
+type byName []os.FileInfo
+
+func (f byName) Len() int           { return len(f) }
+func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
+func (f byName) Swap(i, j int)      { f[i], f[j] = f[j], f[i] }
+
+// ReadDir reads the directory named by dirname and returns
+// a list of sorted directory entries.
+func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
+	return ReadDir(a.Fs, dirname)
+}
+
+func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
+	f, err := fs.Open(dirname)
+	if err != nil {
+		return nil, err
+	}
+	list, err := f.Readdir(-1)
+	f.Close()
+	if err != nil {
+		return nil, err
+	}
+	sort.Sort(byName(list))
+	return list, nil
+}
+
+// ReadFile reads the file named by filename and returns the contents.
+// A successful call returns err == nil, not err == EOF. Because ReadFile
+// reads the whole file, it does not treat an EOF from Read as an error
+// to be reported.
+func (a Afero) ReadFile(filename string) ([]byte, error) {
+	return ReadFile(a.Fs, filename)
+}
+
+func ReadFile(fs Fs, filename string) ([]byte, error) {
+	f, err := fs.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	// It's a good but not certain bet that FileInfo will tell us exactly how much to
+	// read, so let's try it but be prepared for the answer to be wrong.
+	var n int64
+
+	if fi, err := f.Stat(); err == nil {
+		// Don't preallocate a huge buffer, just in case.
+		if size := fi.Size(); size < 1e9 {
+			n = size
+		}
+	}
+	// As initial capacity for readAll, use n + a little extra in case Size is zero,
+	// and to avoid another allocation after Read has filled the buffer.  The readAll
+	// call will read into its allocated internal buffer cheaply.  If the size was
+	// wrong, we'll either waste some space off the end or reallocate as needed, but
+	// in the overwhelmingly common case we'll get it just right.
+	return readAll(f, n+bytes.MinRead)
+}
+
+// readAll reads from r until an error or EOF and returns the data it read
+// from the internal buffer allocated with a specified capacity.
+func readAll(r io.Reader, capacity int64) (b []byte, err error) {
+	buf := bytes.NewBuffer(make([]byte, 0, capacity))
+	// If the buffer overflows, we will get bytes.ErrTooLarge.
+	// Return that as an error. Any other panic remains.
+	defer func() {
+		e := recover()
+		if e == nil {
+			return
+		}
+		if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
+			err = panicErr
+		} else {
+			panic(e)
+		}
+	}()
+	_, err = buf.ReadFrom(r)
+	return buf.Bytes(), err
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+func ReadAll(r io.Reader) ([]byte, error) {
+	return readAll(r, bytes.MinRead)
+}
+
+// WriteFile writes data to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm;
+// otherwise WriteFile truncates it before writing.
+func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
+	return WriteFile(a.Fs, filename, data, perm)
+}
+
+func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
+	f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	if err != nil {
+		return err
+	}
+	n, err := f.Write(data)
+	if err == nil && n < len(data) {
+		err = io.ErrShortWrite
+	}
+	if err1 := f.Close(); err == nil {
+		err = err1
+	}
+	return err
+}
+
+// Random number state.
+// We generate random temporary file names so that there's a good
+// chance the file doesn't exist yet - keeps the number of tries in
+// TempFile to a minimum.
+var rand uint32
+var randmu sync.Mutex
+
+func reseed() uint32 {
+	return uint32(time.Now().UnixNano() + int64(os.Getpid()))
+}
+
+func nextSuffix() string {
+	randmu.Lock()
+	r := rand
+	if r == 0 {
+		r = reseed()
+	}
+	r = r*1664525 + 1013904223 // constants from Numerical Recipes
+	rand = r
+	randmu.Unlock()
+	return strconv.Itoa(int(1e9 + r%1e9))[1:]
+}
+
+// TempFile creates a new temporary file in the directory dir
+// with a name beginning with prefix, opens the file for reading
+// and writing, and returns the resulting *File.
+// If dir is the empty string, TempFile uses the default directory
+// for temporary files (see os.TempDir).
+// Multiple programs calling TempFile simultaneously
+// will not choose the same file.  The caller can use f.Name()
+// to find the pathname of the file.  It is the caller's responsibility
+// to remove the file when no longer needed.
+func (a Afero) TempFile(dir, prefix string) (f File, err error) {
+	return TempFile(a.Fs, dir, prefix)
+}
+
+func TempFile(fs Fs, dir, prefix string) (f File, err error) {
+	if dir == "" {
+		dir = os.TempDir()
+	}
+
+	nconflict := 0
+	for i := 0; i < 10000; i++ {
+		name := filepath.Join(dir, prefix+nextSuffix())
+		f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
+		if os.IsExist(err) {
+			if nconflict++; nconflict > 10 {
+				randmu.Lock()
+				rand = reseed()
+				randmu.Unlock()
+			}
+			continue
+		}
+		break
+	}
+	return
+}
+
+// TempDir creates a new temporary directory in the directory dir
+// with a name beginning with prefix and returns the path of the
+// new directory.  If dir is the empty string, TempDir uses the
+// default directory for temporary files (see os.TempDir).
+// Multiple programs calling TempDir simultaneously
+// will not choose the same directory.  It is the caller's responsibility
+// to remove the directory when no longer needed.
+func (a Afero) TempDir(dir, prefix string) (name string, err error) {
+	return TempDir(a.Fs, dir, prefix)
+}
+func TempDir(fs Fs, dir, prefix string) (name string, err error) {
+	if dir == "" {
+		dir = os.TempDir()
+	}
+
+	nconflict := 0
+	for i := 0; i < 10000; i++ {
+		try := filepath.Join(dir, prefix+nextSuffix())
+		err = fs.Mkdir(try, 0700)
+		if os.IsExist(err) {
+			if nconflict++; nconflict > 10 {
+				randmu.Lock()
+				rand = reseed()
+				randmu.Unlock()
+			}
+			continue
+		}
+		if err == nil {
+			name = try
+		}
+		break
+	}
+	return
+}
diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go
new file mode 100644
index 0000000000000000000000000000000000000000..08b3b7e0146bbfcda9b902776c6750021c3832ea
--- /dev/null
+++ b/vendor/github.com/spf13/afero/match.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2009 The Go Authors. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"path/filepath"
+	"sort"
+	"strings"
+)
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
+// built-ins from that package.
+func Glob(fs Fs, pattern string) (matches []string, err error) {
+	if !hasMeta(pattern) {
+		// afero does not support Lstat directly.
+		if _, err = lstatIfOs(fs, pattern); err != nil {
+			return nil, nil
+		}
+		return []string{pattern}, nil
+	}
+
+	dir, file := filepath.Split(pattern)
+	switch dir {
+	case "":
+		dir = "."
+	case string(filepath.Separator):
+	// nothing
+	default:
+		dir = dir[0 : len(dir)-1] // chop off trailing separator
+	}
+
+	if !hasMeta(dir) {
+		return glob(fs, dir, file, nil)
+	}
+
+	var m []string
+	m, err = Glob(fs, dir)
+	if err != nil {
+		return
+	}
+	for _, d := range m {
+		matches, err = glob(fs, d, file, matches)
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
+	m = matches
+	fi, err := fs.Stat(dir)
+	if err != nil {
+		return
+	}
+	if !fi.IsDir() {
+		return
+	}
+	d, err := fs.Open(dir)
+	if err != nil {
+		return
+	}
+	defer d.Close()
+
+	names, _ := d.Readdirnames(-1)
+	sort.Strings(names)
+
+	for _, n := range names {
+		matched, err := filepath.Match(pattern, n)
+		if err != nil {
+			return m, err
+		}
+		if matched {
+			m = append(m, filepath.Join(dir, n))
+		}
+	}
+	return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+	// TODO(niemeyer): Should other magic characters be added here?
+	return strings.IndexAny(path, "*?[") >= 0
+}
diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go
new file mode 100644
index 0000000000000000000000000000000000000000..e104013f45712024294836f2e8e90a333303cdec
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dir.go
@@ -0,0 +1,37 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+type Dir interface {
+	Len() int
+	Names() []string
+	Files() []*FileData
+	Add(*FileData)
+	Remove(*FileData)
+}
+
+func RemoveFromMemDir(dir *FileData, f *FileData) {
+	dir.memDir.Remove(f)
+}
+
+func AddToMemDir(dir *FileData, f *FileData) {
+	dir.memDir.Add(f)
+}
+
+func InitializeDir(d *FileData) {
+	if d.memDir == nil {
+		d.dir = true
+		d.memDir = &DirMap{}
+	}
+}
diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go
new file mode 100644
index 0000000000000000000000000000000000000000..03a57ee5b52e8d28663c2eb4dddc44841b604df6
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/dirmap.go
@@ -0,0 +1,43 @@
+// Copyright © 2015 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import "sort"
+
+type DirMap map[string]*FileData
+
+func (m DirMap) Len() int           { return len(m) }
+func (m DirMap) Add(f *FileData)    { m[f.name] = f }
+func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
+func (m DirMap) Files() (files []*FileData) {
+	for _, f := range m {
+		files = append(files, f)
+	}
+	sort.Sort(filesSorter(files))
+	return files
+}
+
+// implement sort.Interface for []*FileData
+type filesSorter []*FileData
+
+func (s filesSorter) Len() int           { return len(s) }
+func (s filesSorter) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
+
+func (m DirMap) Names() (names []string) {
+	for x := range m {
+		names = append(names, x)
+	}
+	return names
+}
diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go
new file mode 100644
index 0000000000000000000000000000000000000000..5401a3b7c02add2fa56e6c374e3627b944bf55ea
--- /dev/null
+++ b/vendor/github.com/spf13/afero/mem/file.go
@@ -0,0 +1,311 @@
+// Copyright © 2015 Steve Francia <spf@spf13.com>.
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mem
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"os"
+	"path/filepath"
+	"sync"
+	"sync/atomic"
+)
+
+import "time"
+
+const FilePathSeparator = string(filepath.Separator)
+
+type File struct {
+	// atomic requires 64-bit alignment for struct field access
+	at           int64
+	readDirCount int64
+	closed       bool
+	readOnly     bool
+	fileData     *FileData
+}
+
+func NewFileHandle(data *FileData) *File {
+	return &File{fileData: data}
+}
+
+func NewReadOnlyFileHandle(data *FileData) *File {
+	return &File{fileData: data, readOnly: true}
+}
+
+func (f File) Data() *FileData {
+	return f.fileData
+}
+
+type FileData struct {
+	sync.Mutex
+	name    string
+	data    []byte
+	memDir  Dir
+	dir     bool
+	mode    os.FileMode
+	modtime time.Time
+}
+
+func (d *FileData) Name() string {
+	d.Lock()
+	defer d.Unlock()
+	return d.name
+}
+
+func CreateFile(name string) *FileData {
+	return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
+}
+
+func CreateDir(name string) *FileData {
+	return &FileData{name: name, memDir: &DirMap{}, dir: true}
+}
+
+func ChangeFileName(f *FileData, newname string) {
+	f.Lock()
+	f.name = newname
+	f.Unlock()
+}
+
+func SetMode(f *FileData, mode os.FileMode) {
+	f.Lock()
+	f.mode = mode
+	f.Unlock()
+}
+
+func SetModTime(f *FileData, mtime time.Time) {
+	f.Lock()
+	setModTime(f, mtime)
+	f.Unlock()
+}
+
+func setModTime(f *FileData, mtime time.Time) {
+	f.modtime = mtime
+}
+
+func GetFileInfo(f *FileData) *FileInfo {
+	return &FileInfo{f}
+}
+
+func (f *File) Open() error {
+	atomic.StoreInt64(&f.at, 0)
+	atomic.StoreInt64(&f.readDirCount, 0)
+	f.fileData.Lock()
+	f.closed = false
+	f.fileData.Unlock()
+	return nil
+}
+
+func (f *File) Close() error {
+	f.fileData.Lock()
+	f.closed = true
+	if !f.readOnly {
+		setModTime(f.fileData, time.Now())
+	}
+	f.fileData.Unlock()
+	return nil
+}
+
+func (f *File) Name() string {
+	return f.fileData.Name()
+}
+
+func (f *File) Stat() (os.FileInfo, error) {
+	return &FileInfo{f.fileData}, nil
+}
+
+func (f *File) Sync() error {
+	return nil
+}
+
+func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
+	var outLength int64
+
+	f.fileData.Lock()
+	files := f.fileData.memDir.Files()[f.readDirCount:]
+	if count > 0 {
+		if len(files) < count {
+			outLength = int64(len(files))
+		} else {
+			outLength = int64(count)
+		}
+		if len(files) == 0 {
+			err = io.EOF
+		}
+	} else {
+		outLength = int64(len(files))
+	}
+	f.readDirCount += outLength
+	f.fileData.Unlock()
+
+	res = make([]os.FileInfo, outLength)
+	for i := range res {
+		res[i] = &FileInfo{files[i]}
+	}
+
+	return res, err
+}
+
+func (f *File) Readdirnames(n int) (names []string, err error) {
+	fi, err := f.Readdir(n)
+	names = make([]string, len(fi))
+	for i, f := range fi {
+		_, names[i] = filepath.Split(f.Name())
+	}
+	return names, err
+}
+
+func (f *File) Read(b []byte) (n int, err error) {
+	f.fileData.Lock()
+	defer f.fileData.Unlock()
+	if f.closed == true {
+		return 0, ErrFileClosed
+	}
+	if len(b) > 0 && int(f.at) == len(f.fileData.data) {
+		return 0, io.EOF
+	}
+	if len(f.fileData.data)-int(f.at) >= len(b) {
+		n = len(b)
+	} else {
+		n = len(f.fileData.data) - int(f.at)
+	}
+	copy(b, f.fileData.data[f.at:f.at+int64(n)])
+	atomic.AddInt64(&f.at, int64(n))
+	return
+}
+
+func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
+	atomic.StoreInt64(&f.at, off)
+	return f.Read(b)
+}
+
+func (f *File) Truncate(size int64) error {
+	if f.closed == true {
+		return ErrFileClosed
+	}
+	if f.readOnly {
+		return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+	}
+	if size < 0 {
+		return ErrOutOfRange
+	}
+	if size > int64(len(f.fileData.data)) {
+		diff := size - int64(len(f.fileData.data))
+		f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{00}, int(diff))...)
+	} else {
+		f.fileData.data = f.fileData.data[0:size]
+	}
+	setModTime(f.fileData, time.Now())
+	return nil
+}
+
+func (f *File) Seek(offset int64, whence int) (int64, error) {
+	if f.closed == true {
+		return 0, ErrFileClosed
+	}
+	switch whence {
+	case 0:
+		atomic.StoreInt64(&f.at, offset)
+	case 1:
+		atomic.AddInt64(&f.at, int64(offset))
+	case 2:
+		atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
+	}
+	return f.at, nil
+}
+
+func (f *File) Write(b []byte) (n int, err error) {
+	if f.readOnly {
+		return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")}
+	}
+	n = len(b)
+	cur := atomic.LoadInt64(&f.at)
+	f.fileData.Lock()
+	defer f.fileData.Unlock()
+	diff := cur - int64(len(f.fileData.data))
+	var tail []byte
+	if n+int(cur) < len(f.fileData.data) {
+		tail = f.fileData.data[n+int(cur):]
+	}
+	if diff > 0 {
+		f.fileData.data = append(bytes.Repeat([]byte{00}, int(diff)), b...)
+		f.fileData.data = append(f.fileData.data, tail...)
+	} else {
+		f.fileData.data = append(f.fileData.data[:cur], b...)
+		f.fileData.data = append(f.fileData.data, tail...)
+	}
+	setModTime(f.fileData, time.Now())
+
+	atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
+	return
+}
+
+func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
+	atomic.StoreInt64(&f.at, off)
+	return f.Write(b)
+}
+
+func (f *File) WriteString(s string) (ret int, err error) {
+	return f.Write([]byte(s))
+}
+
+func (f *File) Info() *FileInfo {
+	return &FileInfo{f.fileData}
+}
+
+type FileInfo struct {
+	*FileData
+}
+
+// Implements os.FileInfo
+func (s *FileInfo) Name() string {
+	s.Lock()
+	_, name := filepath.Split(s.name)
+	s.Unlock()
+	return name
+}
+func (s *FileInfo) Mode() os.FileMode {
+	s.Lock()
+	defer s.Unlock()
+	return s.mode
+}
+func (s *FileInfo) ModTime() time.Time {
+	s.Lock()
+	defer s.Unlock()
+	return s.modtime
+}
+func (s *FileInfo) IsDir() bool {
+	s.Lock()
+	defer s.Unlock()
+	return s.dir
+}
+func (s *FileInfo) Sys() interface{} { return nil }
+func (s *FileInfo) Size() int64 {
+	if s.IsDir() {
+		return int64(42)
+	}
+	s.Lock()
+	defer s.Unlock()
+	return int64(len(s.data))
+}
+
+var (
+	ErrFileClosed        = errors.New("File is closed")
+	ErrOutOfRange        = errors.New("Out of range")
+	ErrTooLarge          = errors.New("Too large")
+	ErrFileNotFound      = os.ErrNotExist
+	ErrFileExists        = os.ErrExist
+	ErrDestinationExists = os.ErrExist
+)
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
new file mode 100644
index 0000000000000000000000000000000000000000..09498e70fbaa6e350ba699a1895d602cd957a496
--- /dev/null
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -0,0 +1,365 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/spf13/afero/mem"
+)
+
+type MemMapFs struct {
+	mu   sync.RWMutex
+	data map[string]*mem.FileData
+	init sync.Once
+}
+
+func NewMemMapFs() Fs {
+	return &MemMapFs{}
+}
+
+func (m *MemMapFs) getData() map[string]*mem.FileData {
+	m.init.Do(func() {
+		m.data = make(map[string]*mem.FileData)
+		// Root should always exist, right?
+		// TODO: what about windows?
+		m.data[FilePathSeparator] = mem.CreateDir(FilePathSeparator)
+	})
+	return m.data
+}
+
+func (*MemMapFs) Name() string { return "MemMapFS" }
+
+func (m *MemMapFs) Create(name string) (File, error) {
+	name = normalizePath(name)
+	m.mu.Lock()
+	file := mem.CreateFile(name)
+	m.getData()[name] = file
+	m.registerWithParent(file)
+	m.mu.Unlock()
+	return mem.NewFileHandle(file), nil
+}
+
+func (m *MemMapFs) unRegisterWithParent(fileName string) error {
+	f, err := m.lockfreeOpen(fileName)
+	if err != nil {
+		return err
+	}
+	parent := m.findParent(f)
+	if parent == nil {
+		log.Panic("parent of ", f.Name(), " is nil")
+	}
+
+	parent.Lock()
+	mem.RemoveFromMemDir(parent, f)
+	parent.Unlock()
+	return nil
+}
+
+func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
+	pdir, _ := filepath.Split(f.Name())
+	pdir = filepath.Clean(pdir)
+	pfile, err := m.lockfreeOpen(pdir)
+	if err != nil {
+		return nil
+	}
+	return pfile
+}
+
+func (m *MemMapFs) registerWithParent(f *mem.FileData) {
+	if f == nil {
+		return
+	}
+	parent := m.findParent(f)
+	if parent == nil {
+		pdir := filepath.Dir(filepath.Clean(f.Name()))
+		err := m.lockfreeMkdir(pdir, 0777)
+		if err != nil {
+			//log.Println("Mkdir error:", err)
+			return
+		}
+		parent, err = m.lockfreeOpen(pdir)
+		if err != nil {
+			//log.Println("Open after Mkdir error:", err)
+			return
+		}
+	}
+
+	parent.Lock()
+	mem.InitializeDir(parent)
+	mem.AddToMemDir(parent, f)
+	parent.Unlock()
+}
+
+func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
+	name = normalizePath(name)
+	x, ok := m.getData()[name]
+	if ok {
+		// Only return ErrFileExists if it's a file, not a directory.
+		i := mem.FileInfo{FileData: x}
+		if !i.IsDir() {
+			return ErrFileExists
+		}
+	} else {
+		item := mem.CreateDir(name)
+		m.getData()[name] = item
+		m.registerWithParent(item)
+	}
+	return nil
+}
+
+func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
+	name = normalizePath(name)
+
+	m.mu.RLock()
+	_, ok := m.getData()[name]
+	m.mu.RUnlock()
+	if ok {
+		return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
+	}
+
+	m.mu.Lock()
+	item := mem.CreateDir(name)
+	m.getData()[name] = item
+	m.registerWithParent(item)
+	m.mu.Unlock()
+
+	m.Chmod(name, perm|os.ModeDir)
+
+	return nil
+}
+
+func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
+	err := m.Mkdir(path, perm)
+	if err != nil {
+		if err.(*os.PathError).Err == ErrFileExists {
+			return nil
+		}
+		return err
+	}
+	return nil
+}
+
+// Handle some relative paths
+func normalizePath(path string) string {
+	path = filepath.Clean(path)
+
+	switch path {
+	case ".":
+		return FilePathSeparator
+	case "..":
+		return FilePathSeparator
+	default:
+		return path
+	}
+}
+
+func (m *MemMapFs) Open(name string) (File, error) {
+	f, err := m.open(name)
+	if f != nil {
+		return mem.NewReadOnlyFileHandle(f), err
+	}
+	return nil, err
+}
+
+func (m *MemMapFs) openWrite(name string) (File, error) {
+	f, err := m.open(name)
+	if f != nil {
+		return mem.NewFileHandle(f), err
+	}
+	return nil, err
+}
+
+func (m *MemMapFs) open(name string) (*mem.FileData, error) {
+	name = normalizePath(name)
+
+	m.mu.RLock()
+	f, ok := m.getData()[name]
+	m.mu.RUnlock()
+	if !ok {
+		return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
+	}
+	return f, nil
+}
+
+func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
+	name = normalizePath(name)
+	f, ok := m.getData()[name]
+	if ok {
+		return f, nil
+	} else {
+		return nil, ErrFileNotFound
+	}
+}
+
+func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	chmod := false
+	file, err := m.openWrite(name)
+	if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
+		file, err = m.Create(name)
+		chmod = true
+	}
+	if err != nil {
+		return nil, err
+	}
+	if flag == os.O_RDONLY {
+		file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
+	}
+	if flag&os.O_APPEND > 0 {
+		_, err = file.Seek(0, os.SEEK_END)
+		if err != nil {
+			file.Close()
+			return nil, err
+		}
+	}
+	if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
+		err = file.Truncate(0)
+		if err != nil {
+			file.Close()
+			return nil, err
+		}
+	}
+	if chmod {
+		m.Chmod(name, perm)
+	}
+	return file, nil
+}
+
+func (m *MemMapFs) Remove(name string) error {
+	name = normalizePath(name)
+
+	m.mu.Lock()
+	defer m.mu.Unlock()
+
+	if _, ok := m.getData()[name]; ok {
+		err := m.unRegisterWithParent(name)
+		if err != nil {
+			return &os.PathError{Op: "remove", Path: name, Err: err}
+		}
+		delete(m.getData(), name)
+	} else {
+		return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
+	}
+	return nil
+}
+
+func (m *MemMapFs) RemoveAll(path string) error {
+	path = normalizePath(path)
+	m.mu.Lock()
+	m.unRegisterWithParent(path)
+	m.mu.Unlock()
+
+	m.mu.RLock()
+	defer m.mu.RUnlock()
+
+	for p, _ := range m.getData() {
+		if strings.HasPrefix(p, path) {
+			m.mu.RUnlock()
+			m.mu.Lock()
+			delete(m.getData(), p)
+			m.mu.Unlock()
+			m.mu.RLock()
+		}
+	}
+	return nil
+}
+
+func (m *MemMapFs) Rename(oldname, newname string) error {
+	oldname = normalizePath(oldname)
+	newname = normalizePath(newname)
+
+	if oldname == newname {
+		return nil
+	}
+
+	m.mu.RLock()
+	defer m.mu.RUnlock()
+	if _, ok := m.getData()[oldname]; ok {
+		m.mu.RUnlock()
+		m.mu.Lock()
+		m.unRegisterWithParent(oldname)
+		fileData := m.getData()[oldname]
+		delete(m.getData(), oldname)
+		mem.ChangeFileName(fileData, newname)
+		m.getData()[newname] = fileData
+		m.registerWithParent(fileData)
+		m.mu.Unlock()
+		m.mu.RLock()
+	} else {
+		return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
+	}
+	return nil
+}
+
+func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
+	f, err := m.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	fi := mem.GetFileInfo(f.(*mem.File).Data())
+	return fi, nil
+}
+
+func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
+	name = normalizePath(name)
+
+	m.mu.RLock()
+	f, ok := m.getData()[name]
+	m.mu.RUnlock()
+	if !ok {
+		return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
+	}
+
+	m.mu.Lock()
+	mem.SetMode(f, mode)
+	m.mu.Unlock()
+
+	return nil
+}
+
+func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+	name = normalizePath(name)
+
+	m.mu.RLock()
+	f, ok := m.getData()[name]
+	m.mu.RUnlock()
+	if !ok {
+		return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
+	}
+
+	m.mu.Lock()
+	mem.SetModTime(f, mtime)
+	m.mu.Unlock()
+
+	return nil
+}
+
+func (m *MemMapFs) List() {
+	for _, x := range m.data {
+		y := mem.FileInfo{FileData: x}
+		fmt.Println(x.Name(), y.Size())
+	}
+}
+
+// func debugMemMapList(fs Fs) {
+// 	if x, ok := fs.(*MemMapFs); ok {
+// 		x.List()
+// 	}
+// }
diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go
new file mode 100644
index 0000000000000000000000000000000000000000..6b8bce1c502c95f4434b727acc5950008f0329da
--- /dev/null
+++ b/vendor/github.com/spf13/afero/os.go
@@ -0,0 +1,94 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2013 tsuru authors. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"os"
+	"time"
+)
+
+// OsFs is a Fs implementation that uses functions provided by the os package.
+//
+// For details in any method, check the documentation of the os package
+// (http://golang.org/pkg/os/).
+type OsFs struct{}
+
+func NewOsFs() Fs {
+	return &OsFs{}
+}
+
+func (OsFs) Name() string { return "OsFs" }
+
+func (OsFs) Create(name string) (File, error) {
+	f, e := os.Create(name)
+	if f == nil {
+		// while this looks strange, we need to return a bare nil (of type nil) not
+		// a nil value of type *os.File or nil won't be nil
+		return nil, e
+	}
+	return f, e
+}
+
+func (OsFs) Mkdir(name string, perm os.FileMode) error {
+	return os.Mkdir(name, perm)
+}
+
+func (OsFs) MkdirAll(path string, perm os.FileMode) error {
+	return os.MkdirAll(path, perm)
+}
+
+func (OsFs) Open(name string) (File, error) {
+	f, e := os.Open(name)
+	if f == nil {
+		// while this looks strange, we need to return a bare nil (of type nil) not
+		// a nil value of type *os.File or nil won't be nil
+		return nil, e
+	}
+	return f, e
+}
+
+func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	f, e := os.OpenFile(name, flag, perm)
+	if f == nil {
+		// while this looks strange, we need to return a bare nil (of type nil) not
+		// a nil value of type *os.File or nil won't be nil
+		return nil, e
+	}
+	return f, e
+}
+
+func (OsFs) Remove(name string) error {
+	return os.Remove(name)
+}
+
+func (OsFs) RemoveAll(path string) error {
+	return os.RemoveAll(path)
+}
+
+func (OsFs) Rename(oldname, newname string) error {
+	return os.Rename(oldname, newname)
+}
+
+func (OsFs) Stat(name string) (os.FileInfo, error) {
+	return os.Stat(name)
+}
+
+func (OsFs) Chmod(name string, mode os.FileMode) error {
+	return os.Chmod(name, mode)
+}
+
+func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
+	return os.Chtimes(name, atime, mtime)
+}
diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go
new file mode 100644
index 0000000000000000000000000000000000000000..1d90e46dd0fc4a34283712b8ae7398380d1bd589
--- /dev/null
+++ b/vendor/github.com/spf13/afero/path.go
@@ -0,0 +1,108 @@
+// Copyright ©2015 The Go Authors
+// Copyright ©2015 Steve Francia <spf@spf13.com>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"os"
+	"path/filepath"
+	"sort"
+)
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+// adapted from https://golang.org/src/path/filepath/path.go
+func readDirNames(fs Fs, dirname string) ([]string, error) {
+	f, err := fs.Open(dirname)
+	if err != nil {
+		return nil, err
+	}
+	names, err := f.Readdirnames(-1)
+	f.Close()
+	if err != nil {
+		return nil, err
+	}
+	sort.Strings(names)
+	return names, nil
+}
+
+// walk recursively descends path, calling walkFn
+// adapted from https://golang.org/src/path/filepath/path.go
+func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+	err := walkFn(path, info, nil)
+	if err != nil {
+		if info.IsDir() && err == filepath.SkipDir {
+			return nil
+		}
+		return err
+	}
+
+	if !info.IsDir() {
+		return nil
+	}
+
+	names, err := readDirNames(fs, path)
+	if err != nil {
+		return walkFn(path, info, err)
+	}
+
+	for _, name := range names {
+		filename := filepath.Join(path, name)
+		fileInfo, err := lstatIfOs(fs, filename)
+		if err != nil {
+			if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+				return err
+			}
+		} else {
+			err = walk(fs, filename, fileInfo, walkFn)
+			if err != nil {
+				if !fileInfo.IsDir() || err != filepath.SkipDir {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// if the filesystem is OsFs use Lstat, else use fs.Stat
+func lstatIfOs(fs Fs, path string) (info os.FileInfo, err error) {
+	_, ok := fs.(*OsFs)
+	if ok {
+		info, err = os.Lstat(path)
+	} else {
+		info, err = fs.Stat(path)
+	}
+	return
+}
+
+// Walk walks the file tree rooted at root, calling walkFn for each file or
+// directory in the tree, including root. All errors that arise visiting files
+// and directories are filtered by walkFn. The files are walked in lexical
+// order, which makes the output deterministic but means that for very
+// large directories Walk can be inefficient.
+// Walk does not follow symbolic links.
+
+func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
+	return Walk(a.Fs, root, walkFn)
+}
+
+func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
+	info, err := lstatIfOs(fs, root)
+	if err != nil {
+		return walkFn(root, nil, err)
+	}
+	return walk(fs, root, info, walkFn)
+}
diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go
new file mode 100644
index 0000000000000000000000000000000000000000..f1fa55bcf4e8c77e5093741e35c74a621e4de884
--- /dev/null
+++ b/vendor/github.com/spf13/afero/readonlyfs.go
@@ -0,0 +1,70 @@
+package afero
+
+import (
+	"os"
+	"syscall"
+	"time"
+)
+
+type ReadOnlyFs struct {
+	source Fs
+}
+
+func NewReadOnlyFs(source Fs) Fs {
+	return &ReadOnlyFs{source: source}
+}
+
+func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
+	return ReadDir(r.source, name)
+}
+
+func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Name() string {
+	return "ReadOnlyFilter"
+}
+
+func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
+	return r.source.Stat(name)
+}
+
+func (r *ReadOnlyFs) Rename(o, n string) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) RemoveAll(p string) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Remove(n string) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
+		return nil, syscall.EPERM
+	}
+	return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *ReadOnlyFs) Open(n string) (File, error) {
+	return r.source.Open(n)
+}
+
+func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
+	return syscall.EPERM
+}
+
+func (r *ReadOnlyFs) Create(n string) (File, error) {
+	return nil, syscall.EPERM
+}
diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go
new file mode 100644
index 0000000000000000000000000000000000000000..9d92dbc051ff5591e312ffd09914f914590f8687
--- /dev/null
+++ b/vendor/github.com/spf13/afero/regexpfs.go
@@ -0,0 +1,214 @@
+package afero
+
+import (
+	"os"
+	"regexp"
+	"syscall"
+	"time"
+)
+
+// The RegexpFs filters files (not directories) by regular expression. Only
+// files matching the given regexp will be allowed, all others get a ENOENT error (
+// "No such file or directory").
+//
+type RegexpFs struct {
+	re     *regexp.Regexp
+	source Fs
+}
+
+func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
+	return &RegexpFs{source: source, re: re}
+}
+
+type RegexpFile struct {
+	f  File
+	re *regexp.Regexp
+}
+
+func (r *RegexpFs) matchesName(name string) error {
+	if r.re == nil {
+		return nil
+	}
+	if r.re.MatchString(name) {
+		return nil
+	}
+	return syscall.ENOENT
+}
+
+func (r *RegexpFs) dirOrMatches(name string) error {
+	dir, err := IsDir(r.source, name)
+	if err != nil {
+		return err
+	}
+	if dir {
+		return nil
+	}
+	return r.matchesName(name)
+}
+
+func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
+	if err := r.dirOrMatches(name); err != nil {
+		return err
+	}
+	return r.source.Chtimes(name, a, m)
+}
+
+func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
+	if err := r.dirOrMatches(name); err != nil {
+		return err
+	}
+	return r.source.Chmod(name, mode)
+}
+
+func (r *RegexpFs) Name() string {
+	return "RegexpFs"
+}
+
+func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
+	if err := r.dirOrMatches(name); err != nil {
+		return nil, err
+	}
+	return r.source.Stat(name)
+}
+
+func (r *RegexpFs) Rename(oldname, newname string) error {
+	dir, err := IsDir(r.source, oldname)
+	if err != nil {
+		return err
+	}
+	if dir {
+		return nil
+	}
+	if err := r.matchesName(oldname); err != nil {
+		return err
+	}
+	if err := r.matchesName(newname); err != nil {
+		return err
+	}
+	return r.source.Rename(oldname, newname)
+}
+
+func (r *RegexpFs) RemoveAll(p string) error {
+	dir, err := IsDir(r.source, p)
+	if err != nil {
+		return err
+	}
+	if !dir {
+		if err := r.matchesName(p); err != nil {
+			return err
+		}
+	}
+	return r.source.RemoveAll(p)
+}
+
+func (r *RegexpFs) Remove(name string) error {
+	if err := r.dirOrMatches(name); err != nil {
+		return err
+	}
+	return r.source.Remove(name)
+}
+
+func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
+	if err := r.dirOrMatches(name); err != nil {
+		return nil, err
+	}
+	return r.source.OpenFile(name, flag, perm)
+}
+
+func (r *RegexpFs) Open(name string) (File, error) {
+	dir, err := IsDir(r.source, name)
+	if err != nil {
+		return nil, err
+	}
+	if !dir {
+		if err := r.matchesName(name); err != nil {
+			return nil, err
+		}
+	}
+	f, err := r.source.Open(name)
+	return &RegexpFile{f: f, re: r.re}, nil
+}
+
+func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
+	return r.source.Mkdir(n, p)
+}
+
+func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
+	return r.source.MkdirAll(n, p)
+}
+
+func (r *RegexpFs) Create(name string) (File, error) {
+	if err := r.matchesName(name); err != nil {
+		return nil, err
+	}
+	return r.source.Create(name)
+}
+
+func (f *RegexpFile) Close() error {
+	return f.f.Close()
+}
+
+func (f *RegexpFile) Read(s []byte) (int, error) {
+	return f.f.Read(s)
+}
+
+func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
+	return f.f.ReadAt(s, o)
+}
+
+func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
+	return f.f.Seek(o, w)
+}
+
+func (f *RegexpFile) Write(s []byte) (int, error) {
+	return f.f.Write(s)
+}
+
+func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
+	return f.f.WriteAt(s, o)
+}
+
+func (f *RegexpFile) Name() string {
+	return f.f.Name()
+}
+
+func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
+	var rfi []os.FileInfo
+	rfi, err = f.f.Readdir(c)
+	if err != nil {
+		return nil, err
+	}
+	for _, i := range rfi {
+		if i.IsDir() || f.re.MatchString(i.Name()) {
+			fi = append(fi, i)
+		}
+	}
+	return fi, nil
+}
+
+func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
+	fi, err := f.Readdir(c)
+	if err != nil {
+		return nil, err
+	}
+	for _, s := range fi {
+		n = append(n, s.Name())
+	}
+	return n, nil
+}
+
+func (f *RegexpFile) Stat() (os.FileInfo, error) {
+	return f.f.Stat()
+}
+
+func (f *RegexpFile) Sync() error {
+	return f.f.Sync()
+}
+
+func (f *RegexpFile) Truncate(s int64) error {
+	return f.f.Truncate(s)
+}
+
+func (f *RegexpFile) WriteString(s string) (int, error) {
+	return f.f.WriteString(s)
+}
diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go
new file mode 100644
index 0000000000000000000000000000000000000000..99f9e5db27ee6483c72f351d3996271927c82ceb
--- /dev/null
+++ b/vendor/github.com/spf13/afero/unionFile.go
@@ -0,0 +1,274 @@
+package afero
+
+import (
+	"io"
+	"os"
+	"path/filepath"
+	"syscall"
+)
+
+// The UnionFile implements the afero.File interface and will be returned
+// when reading a directory present at least in the overlay or opening a file
+// for writing.
+//
+// The calls to
+// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
+// base and the overlay - for files present in both layers, only those
+// from the overlay will be used.
+//
+// When opening files for writing (Create() / OpenFile() with the right flags)
+// the operations will be done in both layers, starting with the overlay. A
+// successful read in the overlay will move the cursor position in the base layer
+// by the number of bytes read.
+type UnionFile struct {
+	base  File
+	layer File
+	off   int
+	files []os.FileInfo
+}
+
+func (f *UnionFile) Close() error {
+	// first close base, so we have a newer timestamp in the overlay. If we'd close
+	// the overlay first, we'd get a cacheStale the next time we access this file
+	// -> cache would be useless ;-)
+	if f.base != nil {
+		f.base.Close()
+	}
+	if f.layer != nil {
+		return f.layer.Close()
+	}
+	return BADFD
+}
+
+func (f *UnionFile) Read(s []byte) (int, error) {
+	if f.layer != nil {
+		n, err := f.layer.Read(s)
+		if (err == nil || err == io.EOF) && f.base != nil {
+			// advance the file position also in the base file, the next
+			// call may be a write at this position (or a seek with SEEK_CUR)
+			if _, seekErr := f.base.Seek(int64(n), os.SEEK_CUR); seekErr != nil {
+				// only overwrite err in case the seek fails: we need to
+				// report an eventual io.EOF to the caller
+				err = seekErr
+			}
+		}
+		return n, err
+	}
+	if f.base != nil {
+		return f.base.Read(s)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
+	if f.layer != nil {
+		n, err := f.layer.ReadAt(s, o)
+		if (err == nil || err == io.EOF) && f.base != nil {
+			_, err = f.base.Seek(o+int64(n), os.SEEK_SET)
+		}
+		return n, err
+	}
+	if f.base != nil {
+		return f.base.ReadAt(s, o)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
+	if f.layer != nil {
+		pos, err = f.layer.Seek(o, w)
+		if (err == nil || err == io.EOF) && f.base != nil {
+			_, err = f.base.Seek(o, w)
+		}
+		return pos, err
+	}
+	if f.base != nil {
+		return f.base.Seek(o, w)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) Write(s []byte) (n int, err error) {
+	if f.layer != nil {
+		n, err = f.layer.Write(s)
+		if err == nil && f.base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
+			_, err = f.base.Write(s)
+		}
+		return n, err
+	}
+	if f.base != nil {
+		return f.base.Write(s)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
+	if f.layer != nil {
+		n, err = f.layer.WriteAt(s, o)
+		if err == nil && f.base != nil {
+			_, err = f.base.WriteAt(s, o)
+		}
+		return n, err
+	}
+	if f.base != nil {
+		return f.base.WriteAt(s, o)
+	}
+	return 0, BADFD
+}
+
+func (f *UnionFile) Name() string {
+	if f.layer != nil {
+		return f.layer.Name()
+	}
+	return f.base.Name()
+}
+
+// Readdir will weave the two directories together and
+// return a single view of the overlayed directories
+func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
+	if f.off == 0 {
+		var files = make(map[string]os.FileInfo)
+		var rfi []os.FileInfo
+		if f.layer != nil {
+			rfi, err = f.layer.Readdir(-1)
+			if err != nil {
+				return nil, err
+			}
+			for _, fi := range rfi {
+				files[fi.Name()] = fi
+			}
+		}
+
+		if f.base != nil {
+			rfi, err = f.base.Readdir(-1)
+			if err != nil {
+				return nil, err
+			}
+			for _, fi := range rfi {
+				if _, exists := files[fi.Name()]; !exists {
+					files[fi.Name()] = fi
+				}
+			}
+		}
+		for _, fi := range files {
+			f.files = append(f.files, fi)
+		}
+	}
+	if c == -1 {
+		return f.files[f.off:], nil
+	}
+	defer func() { f.off += c }()
+	return f.files[f.off:c], nil
+}
+
+func (f *UnionFile) Readdirnames(c int) ([]string, error) {
+	rfi, err := f.Readdir(c)
+	if err != nil {
+		return nil, err
+	}
+	var names []string
+	for _, fi := range rfi {
+		names = append(names, fi.Name())
+	}
+	return names, nil
+}
+
+func (f *UnionFile) Stat() (os.FileInfo, error) {
+	if f.layer != nil {
+		return f.layer.Stat()
+	}
+	if f.base != nil {
+		return f.base.Stat()
+	}
+	return nil, BADFD
+}
+
+func (f *UnionFile) Sync() (err error) {
+	if f.layer != nil {
+		err = f.layer.Sync()
+		if err == nil && f.base != nil {
+			err = f.base.Sync()
+		}
+		return err
+	}
+	if f.base != nil {
+		return f.base.Sync()
+	}
+	return BADFD
+}
+
+func (f *UnionFile) Truncate(s int64) (err error) {
+	if f.layer != nil {
+		err = f.layer.Truncate(s)
+		if err == nil && f.base != nil {
+			err = f.base.Truncate(s)
+		}
+		return err
+	}
+	if f.base != nil {
+		return f.base.Truncate(s)
+	}
+	return BADFD
+}
+
+func (f *UnionFile) WriteString(s string) (n int, err error) {
+	if f.layer != nil {
+		n, err = f.layer.WriteString(s)
+		if err == nil && f.base != nil {
+			_, err = f.base.WriteString(s)
+		}
+		return n, err
+	}
+	if f.base != nil {
+		return f.base.WriteString(s)
+	}
+	return 0, BADFD
+}
+
+func copyToLayer(base Fs, layer Fs, name string) error {
+	bfh, err := base.Open(name)
+	if err != nil {
+		return err
+	}
+	defer bfh.Close()
+
+	// First make sure the directory exists
+	exists, err := Exists(layer, filepath.Dir(name))
+	if err != nil {
+		return err
+	}
+	if !exists {
+		err = layer.MkdirAll(filepath.Dir(name), 0777) // FIXME?
+		if err != nil {
+			return err
+		}
+	}
+
+	// Create the file on the overlay
+	lfh, err := layer.Create(name)
+	if err != nil {
+		return err
+	}
+	n, err := io.Copy(lfh, bfh)
+	if err != nil {
+		// If anything fails, clean up the file
+		layer.Remove(name)
+		lfh.Close()
+		return err
+	}
+
+	bfi, err := bfh.Stat()
+	if err != nil || bfi.Size() != n {
+		layer.Remove(name)
+		lfh.Close()
+		return syscall.EIO
+	}
+
+	err = lfh.Close()
+	if err != nil {
+		layer.Remove(name)
+		lfh.Close()
+		return err
+	}
+	return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
+}
diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..7463887fd8dd2ef2cf5e078f309265d5aa02163a
--- /dev/null
+++ b/vendor/github.com/spf13/afero/util.go
@@ -0,0 +1,331 @@
+// Copyright ©2015 Steve Francia <spf@spf13.com>
+// Portions Copyright ©2015 The Hugo Authors
+// Portions Copyright 2016-present Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+	"unicode"
+
+	"golang.org/x/text/transform"
+	"golang.org/x/text/unicode/norm"
+)
+
+// Filepath separator defined by os.Separator.
+const FilePathSeparator = string(filepath.Separator)
+
+// Takes a reader and a path and writes the content
+func (a Afero) WriteReader(path string, r io.Reader) (err error) {
+	return WriteReader(a.Fs, path, r)
+}
+
+func WriteReader(fs Fs, path string, r io.Reader) (err error) {
+	dir, _ := filepath.Split(path)
+	ospath := filepath.FromSlash(dir)
+
+	if ospath != "" {
+		err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+		if err != nil {
+			if err != os.ErrExist {
+				log.Panicln(err)
+			}
+		}
+	}
+
+	file, err := fs.Create(path)
+	if err != nil {
+		return
+	}
+	defer file.Close()
+
+	_, err = io.Copy(file, r)
+	return
+}
+
+// Same as WriteReader but checks to see if file/directory already exists.
+func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
+	return SafeWriteReader(a.Fs, path, r)
+}
+
+func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
+	dir, _ := filepath.Split(path)
+	ospath := filepath.FromSlash(dir)
+
+	if ospath != "" {
+		err = fs.MkdirAll(ospath, 0777) // rwx, rw, r
+		if err != nil {
+			return
+		}
+	}
+
+	exists, err := Exists(fs, path)
+	if err != nil {
+		return
+	}
+	if exists {
+		return fmt.Errorf("%v already exists", path)
+	}
+
+	file, err := fs.Create(path)
+	if err != nil {
+		return
+	}
+	defer file.Close()
+
+	_, err = io.Copy(file, r)
+	return
+}
+
+func (a Afero) GetTempDir(subPath string) string {
+	return GetTempDir(a.Fs, subPath)
+}
+
+// GetTempDir returns the default temp directory with trailing slash
+// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
+func GetTempDir(fs Fs, subPath string) string {
+	addSlash := func(p string) string {
+		if FilePathSeparator != p[len(p)-1:] {
+			p = p + FilePathSeparator
+		}
+		return p
+	}
+	dir := addSlash(os.TempDir())
+
+	if subPath != "" {
+		// preserve windows backslash :-(
+		if FilePathSeparator == "\\" {
+			subPath = strings.Replace(subPath, "\\", "____", -1)
+		}
+		dir = dir + UnicodeSanitize((subPath))
+		if FilePathSeparator == "\\" {
+			dir = strings.Replace(dir, "____", "\\", -1)
+		}
+
+		if exists, _ := Exists(fs, dir); exists {
+			return addSlash(dir)
+		}
+
+		err := fs.MkdirAll(dir, 0777)
+		if err != nil {
+			panic(err)
+		}
+		dir = addSlash(dir)
+	}
+	return dir
+}
+
+// Rewrite string to remove non-standard path characters
+func UnicodeSanitize(s string) string {
+	source := []rune(s)
+	target := make([]rune, 0, len(source))
+
+	for _, r := range source {
+		if unicode.IsLetter(r) ||
+			unicode.IsDigit(r) ||
+			unicode.IsMark(r) ||
+			r == '.' ||
+			r == '/' ||
+			r == '\\' ||
+			r == '_' ||
+			r == '-' ||
+			r == '%' ||
+			r == ' ' ||
+			r == '#' {
+			target = append(target, r)
+		}
+	}
+
+	return string(target)
+}
+
+// Transform characters with accents into plain forms.
+func NeuterAccents(s string) string {
+	t := transform.Chain(norm.NFD, transform.RemoveFunc(isMn), norm.NFC)
+	result, _, _ := transform.String(t, string(s))
+
+	return result
+}
+
+func isMn(r rune) bool {
+	return unicode.Is(unicode.Mn, r) // Mn: nonspacing marks
+}
+
+func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
+	return FileContainsBytes(a.Fs, filename, subslice)
+}
+
+// Check if a file contains a specified byte slice.
+func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
+	f, err := fs.Open(filename)
+	if err != nil {
+		return false, err
+	}
+	defer f.Close()
+
+	return readerContainsAny(f, subslice), nil
+}
+
+func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
+	return FileContainsAnyBytes(a.Fs, filename, subslices)
+}
+
+// Check if a file contains any of the specified byte slices.
+func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
+	f, err := fs.Open(filename)
+	if err != nil {
+		return false, err
+	}
+	defer f.Close()
+
+	return readerContainsAny(f, subslices...), nil
+}
+
+// readerContains reports whether any of the subslices is within r.
+func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
+
+	if r == nil || len(subslices) == 0 {
+		return false
+	}
+
+	largestSlice := 0
+
+	for _, sl := range subslices {
+		if len(sl) > largestSlice {
+			largestSlice = len(sl)
+		}
+	}
+
+	if largestSlice == 0 {
+		return false
+	}
+
+	bufflen := largestSlice * 4
+	halflen := bufflen / 2
+	buff := make([]byte, bufflen)
+	var err error
+	var n, i int
+
+	for {
+		i++
+		if i == 1 {
+			n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
+		} else {
+			if i != 2 {
+				// shift left to catch overlapping matches
+				copy(buff[:], buff[halflen:])
+			}
+			n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
+		}
+
+		if n > 0 {
+			for _, sl := range subslices {
+				if bytes.Contains(buff, sl) {
+					return true
+				}
+			}
+		}
+
+		if err != nil {
+			break
+		}
+	}
+	return false
+}
+
+func (a Afero) DirExists(path string) (bool, error) {
+	return DirExists(a.Fs, path)
+}
+
+// DirExists checks if a path exists and is a directory.
+func DirExists(fs Fs, path string) (bool, error) {
+	fi, err := fs.Stat(path)
+	if err == nil && fi.IsDir() {
+		return true, nil
+	}
+	if os.IsNotExist(err) {
+		return false, nil
+	}
+	return false, err
+}
+
+func (a Afero) IsDir(path string) (bool, error) {
+	return IsDir(a.Fs, path)
+}
+
+// IsDir checks if a given path is a directory.
+func IsDir(fs Fs, path string) (bool, error) {
+	fi, err := fs.Stat(path)
+	if err != nil {
+		return false, err
+	}
+	return fi.IsDir(), nil
+}
+
+func (a Afero) IsEmpty(path string) (bool, error) {
+	return IsEmpty(a.Fs, path)
+}
+
+// IsEmpty checks if a given file or directory is empty.
+func IsEmpty(fs Fs, path string) (bool, error) {
+	if b, _ := Exists(fs, path); !b {
+		return false, fmt.Errorf("%q path does not exist", path)
+	}
+	fi, err := fs.Stat(path)
+	if err != nil {
+		return false, err
+	}
+	if fi.IsDir() {
+		f, err := fs.Open(path)
+		if err != nil {
+			return false, err
+		}
+		defer f.Close()
+		list, err := f.Readdir(-1)
+		return len(list) == 0, nil
+	}
+	return fi.Size() == 0, nil
+}
+
+func (a Afero) Exists(path string) (bool, error) {
+	return Exists(a.Fs, path)
+}
+
+// Check if a file or directory exists.
+func Exists(fs Fs, path string) (bool, error) {
+	_, err := fs.Stat(path)
+	if err == nil {
+		return true, nil
+	}
+	if os.IsNotExist(err) {
+		return false, nil
+	}
+	return false, err
+}
+
+func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
+	combinedPath := filepath.Join(basePathFs.path, relativePath)
+	if parent, ok := basePathFs.source.(*BasePathFs); ok {
+		return FullBaseFsPath(parent, combinedPath)
+	}
+
+	return combinedPath
+}
diff --git a/vendor/github.com/spf13/cast/LICENSE b/vendor/github.com/spf13/cast/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..4527efb9c067932d21b145e55cc7a38d58b24c13
--- /dev/null
+++ b/vendor/github.com/spf13/cast/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..7ccf8930b56f4fe4a7fa96700ff300d21bd4bb48
--- /dev/null
+++ b/vendor/github.com/spf13/cast/Makefile
@@ -0,0 +1,38 @@
+# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+
+.PHONY: check fmt lint test test-race vet test-cover-html help
+.DEFAULT_GOAL := help
+
+check: test-race fmt vet lint ## Run tests and linters
+
+test: ## Run tests
+	go test ./...
+
+test-race: ## Run tests with race detector
+	go test -race ./...
+
+fmt: ## Run gofmt linter
+	@for d in `go list` ; do \
+		if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
+			echo "^ improperly formatted go files" && echo && exit 1; \
+		fi \
+	done
+
+lint: ## Run golint linter
+	@for d in `go list` ; do \
+		if [ "`golint $$d | tee /dev/stderr`" ]; then \
+			echo "^ golint errors!" && echo && exit 1; \
+		fi \
+	done
+
+vet: ## Run go vet linter
+	@if [ "`go vet | tee /dev/stderr`" ]; then \
+		echo "^ go vet errors!" && echo && exit 1; \
+	fi
+
+test-cover-html: ## Generate test coverage report
+	go test -coverprofile=coverage.out -covermode=count
+	go tool cover -func=coverage.out
+
+help:
+	@grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e6939397ddd82801882e891d5d728a4637052b98
--- /dev/null
+++ b/vendor/github.com/spf13/cast/README.md
@@ -0,0 +1,75 @@
+cast
+====
+[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast)
+[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast)
+
+Easy and safe casting from one type to another in Go
+
+Don’t Panic! ... Cast
+
+## What is Cast?
+
+Cast is a library to convert between different go types in a consistent and easy way.
+
+Cast provides simple functions to easily convert a number to a string, an
+interface into a bool, etc. Cast does this intelligently when an obvious
+conversion is possible. It doesn’t make any attempts to guess what you meant,
+for example you can only convert a string to an int when it is a string
+representation of an int such as “8”. Cast was developed for use in
+[Hugo](http://hugo.spf13.com), a website engine which uses YAML, TOML or JSON
+for meta data.
+
+## Why use Cast?
+
+When working with dynamic data in Go you often need to cast or convert the data
+from one type into another. Cast goes beyond just using type assertion (though
+it uses that when possible) to provide a very straightforward and convenient
+library.
+
+If you are working with interfaces to handle things like dynamic content
+you’ll need an easy way to convert an interface into a given type. This
+is the library for you.
+
+If you are taking in data from YAML, TOML or JSON or other formats which lack
+full types, then Cast is the library for you.
+
+## Usage
+
+Cast provides a handful of To_____ methods. These methods will always return
+the desired type. **If input is provided that will not convert to that type, the
+0 or nil value for that type will be returned**.
+
+Cast also provides identical methods To_____E. These return the same result as
+the To_____ methods, plus an additional error which tells you if it successfully
+converted. Using these methods you can tell the difference between when the
+input matched the zero value or when the conversion failed and the zero value
+was returned.
+
+The following examples are merely a sample of what is available. Please review
+the code for a complete set.
+
+### Example ‘ToString’:
+
+    cast.ToString("mayonegg")         // "mayonegg"
+    cast.ToString(8)                  // "8"
+    cast.ToString(8.31)               // "8.31"
+    cast.ToString([]byte("one time")) // "one time"
+    cast.ToString(nil)                // ""
+
+	var foo interface{} = "one more time"
+    cast.ToString(foo)                // "one more time"
+
+
+### Example ‘ToInt’:
+
+    cast.ToInt(8)                  // 8
+    cast.ToInt(8.31)               // 8
+    cast.ToInt("8")                // 8
+    cast.ToInt(true)               // 1
+    cast.ToInt(false)              // 0
+
+	var eight interface{} = 8
+    cast.ToInt(eight)              // 8
+    cast.ToInt(nil)                // 0
+
diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go
new file mode 100644
index 0000000000000000000000000000000000000000..8b8c208befd2f954e754405e17bb608e0f6cd46d
--- /dev/null
+++ b/vendor/github.com/spf13/cast/cast.go
@@ -0,0 +1,159 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Package cast provides easy and safe casting in Go.
+package cast
+
+import "time"
+
+// ToBool casts an interface to a bool type.
+func ToBool(i interface{}) bool {
+	v, _ := ToBoolE(i)
+	return v
+}
+
+// ToTime casts an interface to a time.Time type.
+func ToTime(i interface{}) time.Time {
+	v, _ := ToTimeE(i)
+	return v
+}
+
+// ToDuration casts an interface to a time.Duration type.
+func ToDuration(i interface{}) time.Duration {
+	v, _ := ToDurationE(i)
+	return v
+}
+
+// ToFloat64 casts an interface to a float64 type.
+func ToFloat64(i interface{}) float64 {
+	v, _ := ToFloat64E(i)
+	return v
+}
+
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+	v, _ := ToFloat32E(i)
+	return v
+}
+
+// ToInt64 casts an interface to an int64 type.
+func ToInt64(i interface{}) int64 {
+	v, _ := ToInt64E(i)
+	return v
+}
+
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+	v, _ := ToInt32E(i)
+	return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+	v, _ := ToInt16E(i)
+	return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+	v, _ := ToInt8E(i)
+	return v
+}
+
+// ToInt casts an interface to an int type.
+func ToInt(i interface{}) int {
+	v, _ := ToIntE(i)
+	return v
+}
+
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+	v, _ := ToUintE(i)
+	return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+	v, _ := ToUint64E(i)
+	return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+	v, _ := ToUint32E(i)
+	return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+	v, _ := ToUint16E(i)
+	return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+	v, _ := ToUint8E(i)
+	return v
+}
+
+// ToString casts an interface to a string type.
+func ToString(i interface{}) string {
+	v, _ := ToStringE(i)
+	return v
+}
+
+// ToStringMapString casts an interface to a map[string]string type.
+func ToStringMapString(i interface{}) map[string]string {
+	v, _ := ToStringMapStringE(i)
+	return v
+}
+
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
+func ToStringMapStringSlice(i interface{}) map[string][]string {
+	v, _ := ToStringMapStringSliceE(i)
+	return v
+}
+
+// ToStringMapBool casts an interface to a map[string]bool type.
+func ToStringMapBool(i interface{}) map[string]bool {
+	v, _ := ToStringMapBoolE(i)
+	return v
+}
+
+// ToStringMap casts an interface to a map[string]interface{} type.
+func ToStringMap(i interface{}) map[string]interface{} {
+	v, _ := ToStringMapE(i)
+	return v
+}
+
+// ToSlice casts an interface to a []interface{} type.
+func ToSlice(i interface{}) []interface{} {
+	v, _ := ToSliceE(i)
+	return v
+}
+
+// ToBoolSlice casts an interface to a []bool type.
+func ToBoolSlice(i interface{}) []bool {
+	v, _ := ToBoolSliceE(i)
+	return v
+}
+
+// ToStringSlice casts an interface to a []string type.
+func ToStringSlice(i interface{}) []string {
+	v, _ := ToStringSliceE(i)
+	return v
+}
+
+// ToIntSlice casts an interface to a []int type.
+func ToIntSlice(i interface{}) []int {
+	v, _ := ToIntSliceE(i)
+	return v
+}
+
+// ToDurationSlice casts an interface to a []time.Duration type.
+func ToDurationSlice(i interface{}) []time.Duration {
+	v, _ := ToDurationSliceE(i)
+	return v
+}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
new file mode 100644
index 0000000000000000000000000000000000000000..81511fe52d5ed7a696979950f3d8735681f55a55
--- /dev/null
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -0,0 +1,1146 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package cast
+
+import (
+	"errors"
+	"fmt"
+	"html/template"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var errNegativeNotAllowed = errors.New("unable to cast negative value")
+
+// ToTimeE casts an interface to a time.Time type.
+func ToTimeE(i interface{}) (tim time.Time, err error) {
+	i = indirect(i)
+
+	switch v := i.(type) {
+	case time.Time:
+		return v, nil
+	case string:
+		return StringToDate(v)
+	case int:
+		return time.Unix(int64(v), 0), nil
+	case int64:
+		return time.Unix(v, 0), nil
+	case int32:
+		return time.Unix(int64(v), 0), nil
+	case uint:
+		return time.Unix(int64(v), 0), nil
+	case uint64:
+		return time.Unix(int64(v), 0), nil
+	case uint32:
+		return time.Unix(int64(v), 0), nil
+	default:
+		return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
+	}
+}
+
+// ToDurationE casts an interface to a time.Duration type.
+func ToDurationE(i interface{}) (d time.Duration, err error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case time.Duration:
+		return s, nil
+	case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
+		d = time.Duration(ToInt64(s))
+		return
+	case float32, float64:
+		d = time.Duration(ToFloat64(s))
+		return
+	case string:
+		if strings.ContainsAny(s, "nsuµmh") {
+			d, err = time.ParseDuration(s)
+		} else {
+			d, err = time.ParseDuration(s + "ns")
+		}
+		return
+	default:
+		err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
+		return
+	}
+}
+
+// ToBoolE casts an interface to a bool type.
+func ToBoolE(i interface{}) (bool, error) {
+	i = indirect(i)
+
+	switch b := i.(type) {
+	case bool:
+		return b, nil
+	case nil:
+		return false, nil
+	case int:
+		if i.(int) != 0 {
+			return true, nil
+		}
+		return false, nil
+	case string:
+		return strconv.ParseBool(i.(string))
+	default:
+		return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
+	}
+}
+
+// ToFloat64E casts an interface to a float64 type.
+func ToFloat64E(i interface{}) (float64, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case float64:
+		return s, nil
+	case float32:
+		return float64(s), nil
+	case int:
+		return float64(s), nil
+	case int64:
+		return float64(s), nil
+	case int32:
+		return float64(s), nil
+	case int16:
+		return float64(s), nil
+	case int8:
+		return float64(s), nil
+	case uint:
+		return float64(s), nil
+	case uint64:
+		return float64(s), nil
+	case uint32:
+		return float64(s), nil
+	case uint16:
+		return float64(s), nil
+	case uint8:
+		return float64(s), nil
+	case string:
+		v, err := strconv.ParseFloat(s, 64)
+		if err == nil {
+			return v, nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+	}
+}
+
+// ToFloat32E casts an interface to a float32 type.
+func ToFloat32E(i interface{}) (float32, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case float64:
+		return float32(s), nil
+	case float32:
+		return s, nil
+	case int:
+		return float32(s), nil
+	case int64:
+		return float32(s), nil
+	case int32:
+		return float32(s), nil
+	case int16:
+		return float32(s), nil
+	case int8:
+		return float32(s), nil
+	case uint:
+		return float32(s), nil
+	case uint64:
+		return float32(s), nil
+	case uint32:
+		return float32(s), nil
+	case uint16:
+		return float32(s), nil
+	case uint8:
+		return float32(s), nil
+	case string:
+		v, err := strconv.ParseFloat(s, 32)
+		if err == nil {
+			return float32(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+	}
+}
+
+// ToInt64E casts an interface to an int64 type.
+func ToInt64E(i interface{}) (int64, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int64(s), nil
+	case int64:
+		return s, nil
+	case int32:
+		return int64(s), nil
+	case int16:
+		return int64(s), nil
+	case int8:
+		return int64(s), nil
+	case uint:
+		return int64(s), nil
+	case uint64:
+		return int64(s), nil
+	case uint32:
+		return int64(s), nil
+	case uint16:
+		return int64(s), nil
+	case uint8:
+		return int64(s), nil
+	case float64:
+		return int64(s), nil
+	case float32:
+		return int64(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return v, nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+	}
+}
+
+// ToInt32E casts an interface to an int32 type.
+func ToInt32E(i interface{}) (int32, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int32(s), nil
+	case int64:
+		return int32(s), nil
+	case int32:
+		return s, nil
+	case int16:
+		return int32(s), nil
+	case int8:
+		return int32(s), nil
+	case uint:
+		return int32(s), nil
+	case uint64:
+		return int32(s), nil
+	case uint32:
+		return int32(s), nil
+	case uint16:
+		return int32(s), nil
+	case uint8:
+		return int32(s), nil
+	case float64:
+		return int32(s), nil
+	case float32:
+		return int32(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int32(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+	}
+}
+
+// ToInt16E casts an interface to an int16 type.
+func ToInt16E(i interface{}) (int16, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int16(s), nil
+	case int64:
+		return int16(s), nil
+	case int32:
+		return int16(s), nil
+	case int16:
+		return s, nil
+	case int8:
+		return int16(s), nil
+	case uint:
+		return int16(s), nil
+	case uint64:
+		return int16(s), nil
+	case uint32:
+		return int16(s), nil
+	case uint16:
+		return int16(s), nil
+	case uint8:
+		return int16(s), nil
+	case float64:
+		return int16(s), nil
+	case float32:
+		return int16(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int16(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
+	}
+}
+
+// ToInt8E casts an interface to an int8 type.
+func ToInt8E(i interface{}) (int8, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return int8(s), nil
+	case int64:
+		return int8(s), nil
+	case int32:
+		return int8(s), nil
+	case int16:
+		return int8(s), nil
+	case int8:
+		return s, nil
+	case uint:
+		return int8(s), nil
+	case uint64:
+		return int8(s), nil
+	case uint32:
+		return int8(s), nil
+	case uint16:
+		return int8(s), nil
+	case uint8:
+		return int8(s), nil
+	case float64:
+		return int8(s), nil
+	case float32:
+		return int8(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int8(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+	}
+}
+
+// ToIntE casts an interface to an int type.
+func ToIntE(i interface{}) (int, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case int:
+		return s, nil
+	case int64:
+		return int(s), nil
+	case int32:
+		return int(s), nil
+	case int16:
+		return int(s), nil
+	case int8:
+		return int(s), nil
+	case uint:
+		return int(s), nil
+	case uint64:
+		return int(s), nil
+	case uint32:
+		return int(s), nil
+	case uint16:
+		return int(s), nil
+	case uint8:
+		return int(s), nil
+	case float64:
+		return int(s), nil
+	case float32:
+		return int(s), nil
+	case string:
+		v, err := strconv.ParseInt(s, 0, 0)
+		if err == nil {
+			return int(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+	}
+}
+
+// ToUintE casts an interface to a uint type.
+func ToUintE(i interface{}) (uint, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 0)
+		if err == nil {
+			return uint(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case uint:
+		return s, nil
+	case uint64:
+		return uint(s), nil
+	case uint32:
+		return uint(s), nil
+	case uint16:
+		return uint(s), nil
+	case uint8:
+		return uint(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+	}
+}
+
+// ToUint64E casts an interface to a uint64 type.
+func ToUint64E(i interface{}) (uint64, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 64)
+		if err == nil {
+			return v, nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case uint:
+		return uint64(s), nil
+	case uint64:
+		return s, nil
+	case uint32:
+		return uint64(s), nil
+	case uint16:
+		return uint64(s), nil
+	case uint8:
+		return uint64(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint64(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+	}
+}
+
+// ToUint32E casts an interface to a uint32 type.
+func ToUint32E(i interface{}) (uint32, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 32)
+		if err == nil {
+			return uint32(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case uint:
+		return uint32(s), nil
+	case uint64:
+		return uint32(s), nil
+	case uint32:
+		return s, nil
+	case uint16:
+		return uint32(s), nil
+	case uint8:
+		return uint32(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint32(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+	}
+}
+
+// ToUint16E casts an interface to a uint16 type.
+func ToUint16E(i interface{}) (uint16, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 16)
+		if err == nil {
+			return uint16(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case uint:
+		return uint16(s), nil
+	case uint64:
+		return uint16(s), nil
+	case uint32:
+		return uint16(s), nil
+	case uint16:
+		return s, nil
+	case uint8:
+		return uint16(s), nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint16(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+	}
+}
+
+// ToUint8E casts an interface to a uint type.
+func ToUint8E(i interface{}) (uint8, error) {
+	i = indirect(i)
+
+	switch s := i.(type) {
+	case string:
+		v, err := strconv.ParseUint(s, 0, 8)
+		if err == nil {
+			return uint8(v), nil
+		}
+		return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err)
+	case int:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int16:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case int8:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case uint:
+		return uint8(s), nil
+	case uint64:
+		return uint8(s), nil
+	case uint32:
+		return uint8(s), nil
+	case uint16:
+		return uint8(s), nil
+	case uint8:
+		return s, nil
+	case float64:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case float32:
+		if s < 0 {
+			return 0, errNegativeNotAllowed
+		}
+		return uint8(s), nil
+	case bool:
+		if s {
+			return 1, nil
+		}
+		return 0, nil
+	case nil:
+		return 0, nil
+	default:
+		return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
+	}
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirect returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil).
+func indirect(a interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
+	if t := reflect.TypeOf(a); t.Kind() != reflect.Ptr {
+		// Avoid creating a reflect.Value if it's not a pointer.
+		return a
+	}
+	v := reflect.ValueOf(a)
+	for v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+// From html/template/content.go
+// Copyright 2011 The Go Authors. All rights reserved.
+// indirectToStringerOrError returns the value, after dereferencing as many times
+// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
+// or error,
+func indirectToStringerOrError(a interface{}) interface{} {
+	if a == nil {
+		return nil
+	}
+
+	var errorType = reflect.TypeOf((*error)(nil)).Elem()
+	var fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+
+	v := reflect.ValueOf(a)
+	for !v.Type().Implements(fmtStringerType) && !v.Type().Implements(errorType) && v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v.Interface()
+}
+
+// ToStringE casts an interface to a string type.
+func ToStringE(i interface{}) (string, error) {
+	i = indirectToStringerOrError(i)
+
+	switch s := i.(type) {
+	case string:
+		return s, nil
+	case bool:
+		return strconv.FormatBool(s), nil
+	case float64:
+		return strconv.FormatFloat(s, 'f', -1, 64), nil
+	case float32:
+		return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
+	case int:
+		return strconv.Itoa(s), nil
+	case int64:
+		return strconv.FormatInt(s, 10), nil
+	case int32:
+		return strconv.Itoa(int(s)), nil
+	case int16:
+		return strconv.FormatInt(int64(s), 10), nil
+	case int8:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint64:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint32:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint16:
+		return strconv.FormatInt(int64(s), 10), nil
+	case uint8:
+		return strconv.FormatInt(int64(s), 10), nil
+	case []byte:
+		return string(s), nil
+	case template.HTML:
+		return string(s), nil
+	case template.URL:
+		return string(s), nil
+	case template.JS:
+		return string(s), nil
+	case template.CSS:
+		return string(s), nil
+	case template.HTMLAttr:
+		return string(s), nil
+	case nil:
+		return "", nil
+	case fmt.Stringer:
+		return s.String(), nil
+	case error:
+		return s.Error(), nil
+	default:
+		return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i)
+	}
+}
+
+// ToStringMapStringE casts an interface to a map[string]string type.
+func ToStringMapStringE(i interface{}) (map[string]string, error) {
+	var m = map[string]string{}
+
+	switch v := i.(type) {
+	case map[string]string:
+		return v, nil
+	case map[string]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToString(val)
+		}
+		return m, nil
+	case map[interface{}]string:
+		for k, val := range v {
+			m[ToString(k)] = ToString(val)
+		}
+		return m, nil
+	case map[interface{}]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToString(val)
+		}
+		return m, nil
+	default:
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i)
+	}
+}
+
+// ToStringMapStringSliceE casts an interface to a map[string][]string type.
+func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
+	var m = map[string][]string{}
+
+	switch v := i.(type) {
+	case map[string][]string:
+		return v, nil
+	case map[string][]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToStringSlice(val)
+		}
+		return m, nil
+	case map[string]string:
+		for k, val := range v {
+			m[ToString(k)] = []string{val}
+		}
+	case map[string]interface{}:
+		for k, val := range v {
+			switch vt := val.(type) {
+			case []interface{}:
+				m[ToString(k)] = ToStringSlice(vt)
+			case []string:
+				m[ToString(k)] = vt
+			default:
+				m[ToString(k)] = []string{ToString(val)}
+			}
+		}
+		return m, nil
+	case map[interface{}][]string:
+		for k, val := range v {
+			m[ToString(k)] = ToStringSlice(val)
+		}
+		return m, nil
+	case map[interface{}]string:
+		for k, val := range v {
+			m[ToString(k)] = ToStringSlice(val)
+		}
+		return m, nil
+	case map[interface{}][]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToStringSlice(val)
+		}
+		return m, nil
+	case map[interface{}]interface{}:
+		for k, val := range v {
+			key, err := ToStringE(k)
+			if err != nil {
+				return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+			}
+			value, err := ToStringSliceE(val)
+			if err != nil {
+				return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+			}
+			m[key] = value
+		}
+	default:
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
+	}
+	return m, nil
+}
+
+// ToStringMapBoolE casts an interface to a map[string]bool type.
+func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
+	var m = map[string]bool{}
+
+	switch v := i.(type) {
+	case map[interface{}]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToBool(val)
+		}
+		return m, nil
+	case map[string]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = ToBool(val)
+		}
+		return m, nil
+	case map[string]bool:
+		return v, nil
+	default:
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i)
+	}
+}
+
+// ToStringMapE casts an interface to a map[string]interface{} type.
+func ToStringMapE(i interface{}) (map[string]interface{}, error) {
+	var m = map[string]interface{}{}
+
+	switch v := i.(type) {
+	case map[interface{}]interface{}:
+		for k, val := range v {
+			m[ToString(k)] = val
+		}
+		return m, nil
+	case map[string]interface{}:
+		return v, nil
+	default:
+		return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i)
+	}
+}
+
+// ToSliceE casts an interface to a []interface{} type.
+func ToSliceE(i interface{}) ([]interface{}, error) {
+	var s []interface{}
+
+	switch v := i.(type) {
+	case []interface{}:
+		return append(s, v...), nil
+	case []map[string]interface{}:
+		for _, u := range v {
+			s = append(s, u)
+		}
+		return s, nil
+	default:
+		return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i)
+	}
+}
+
+// ToBoolSliceE casts an interface to a []bool type.
+func ToBoolSliceE(i interface{}) ([]bool, error) {
+	if i == nil {
+		return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+	}
+
+	switch v := i.(type) {
+	case []bool:
+		return v, nil
+	}
+
+	kind := reflect.TypeOf(i).Kind()
+	switch kind {
+	case reflect.Slice, reflect.Array:
+		s := reflect.ValueOf(i)
+		a := make([]bool, s.Len())
+		for j := 0; j < s.Len(); j++ {
+			val, err := ToBoolE(s.Index(j).Interface())
+			if err != nil {
+				return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+			}
+			a[j] = val
+		}
+		return a, nil
+	default:
+		return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
+	}
+}
+
+// ToStringSliceE casts an interface to a []string type.
+func ToStringSliceE(i interface{}) ([]string, error) {
+	var a []string
+
+	switch v := i.(type) {
+	case []interface{}:
+		for _, u := range v {
+			a = append(a, ToString(u))
+		}
+		return a, nil
+	case []string:
+		return v, nil
+	case string:
+		return strings.Fields(v), nil
+	case interface{}:
+		str, err := ToStringE(v)
+		if err != nil {
+			return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+		}
+		return []string{str}, nil
+	default:
+		return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
+	}
+}
+
+// ToIntSliceE casts an interface to a []int type.
+func ToIntSliceE(i interface{}) ([]int, error) {
+	if i == nil {
+		return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+	}
+
+	switch v := i.(type) {
+	case []int:
+		return v, nil
+	}
+
+	kind := reflect.TypeOf(i).Kind()
+	switch kind {
+	case reflect.Slice, reflect.Array:
+		s := reflect.ValueOf(i)
+		a := make([]int, s.Len())
+		for j := 0; j < s.Len(); j++ {
+			val, err := ToIntE(s.Index(j).Interface())
+			if err != nil {
+				return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+			}
+			a[j] = val
+		}
+		return a, nil
+	default:
+		return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
+	}
+}
+
+// ToDurationSliceE casts an interface to a []time.Duration type.
+func ToDurationSliceE(i interface{}) ([]time.Duration, error) {
+	if i == nil {
+		return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+	}
+
+	switch v := i.(type) {
+	case []time.Duration:
+		return v, nil
+	}
+
+	kind := reflect.TypeOf(i).Kind()
+	switch kind {
+	case reflect.Slice, reflect.Array:
+		s := reflect.ValueOf(i)
+		a := make([]time.Duration, s.Len())
+		for j := 0; j < s.Len(); j++ {
+			val, err := ToDurationE(s.Index(j).Interface())
+			if err != nil {
+				return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+			}
+			a[j] = val
+		}
+		return a, nil
+	default:
+		return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+	}
+}
+
+// StringToDate attempts to parse a string into a time.Time type using a
+// predefined list of formats.  If no suitable format is found, an error is
+// returned.
+func StringToDate(s string) (time.Time, error) {
+	return parseDateWith(s, []string{
+		time.RFC3339,
+		"2006-01-02T15:04:05", // iso8601 without timezone
+		time.RFC1123Z,
+		time.RFC1123,
+		time.RFC822Z,
+		time.RFC822,
+		time.RFC850,
+		time.ANSIC,
+		time.UnixDate,
+		time.RubyDate,
+		"2006-01-02 15:04:05.999999999 -0700 MST", // Time.String()
+		"2006-01-02",
+		"02 Jan 2006",
+		"2006-01-02 15:04:05 -07:00",
+		"2006-01-02 15:04:05 -0700",
+		"2006-01-02 15:04:05Z07:00", // RFC3339 without T
+		"2006-01-02 15:04:05",
+		time.Kitchen,
+		time.Stamp,
+		time.StampMilli,
+		time.StampMicro,
+		time.StampNano,
+	})
+}
+
+func parseDateWith(s string, dates []string) (d time.Time, e error) {
+	for _, dateType := range dates {
+		if d, e = time.Parse(dateType, s); e == nil {
+			return
+		}
+	}
+	return d, fmt.Errorf("unable to parse date: %s", s)
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/LICENSE b/vendor/github.com/spf13/jwalterweatherman/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..4527efb9c067932d21b145e55cc7a38d58b24c13
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/jwalterweatherman/README.md b/vendor/github.com/spf13/jwalterweatherman/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..350a9683db01b46ad31190d65fae94104c330106
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/README.md
@@ -0,0 +1,148 @@
+jWalterWeatherman
+=================
+
+Seamless printing to the terminal (stdout) and logging to a io.Writer
+(file) that’s as easy to use as fmt.Println.
+
+![and_that__s_why_you_always_leave_a_note_by_jonnyetc-d57q7um](https://cloud.githubusercontent.com/assets/173412/11002937/ccd01654-847d-11e5-828e-12ebaf582eaf.jpg)
+Graphic by [JonnyEtc](http://jonnyetc.deviantart.com/art/And-That-s-Why-You-Always-Leave-a-Note-315311422)
+
+JWW is primarily a wrapper around the excellent standard log library. It
+provides a few advantages over using the standard log library alone.
+
+1. Ready to go out of the box. 
+2. One library for both printing to the terminal and logging (to files).
+3. Really easy to log to either a temp file or a file you specify.
+
+
+I really wanted a very straightforward library that could seamlessly do
+the following things.
+
+1. Replace all the println, printf, etc statements thought my code with
+   something more useful
+2. Allow the user to easily control what levels are printed to stdout
+3. Allow the user to easily control what levels are logged
+4. Provide an easy mechanism (like fmt.Println) to print info to the user
+   which can be easily logged as well 
+5. Due to 2 & 3 provide easy verbose mode for output and logs
+6. Not have any unnecessary initialization cruft. Just use it.
+
+# Usage
+
+## Step 1. Use it
+Put calls throughout your source based on type of feedback.
+No initialization or setup needs to happen. Just start calling things.
+
+Available Loggers are:
+
+ * TRACE
+ * DEBUG
+ * INFO
+ * WARN
+ * ERROR
+ * CRITICAL
+ * FATAL
+
+These each are loggers based on the log standard library and follow the
+standard usage. Eg.
+
+```go
+    import (
+        jww "github.com/spf13/jwalterweatherman"
+    )
+
+    ...
+
+    if err != nil {
+
+        // This is a pretty serious error and the user should know about
+        // it. It will be printed to the terminal as well as logged under the
+        // default thresholds.
+
+        jww.ERROR.Println(err)
+    }
+
+    if err2 != nil {
+        // This error isn’t going to materially change the behavior of the
+        // application, but it’s something that may not be what the user
+        // expects. Under the default thresholds, Warn will be logged, but
+        // not printed to the terminal. 
+
+        jww.WARN.Println(err2)
+    }
+
+    // Information that’s relevant to what’s happening, but not very
+    // important for the user. Under the default thresholds this will be
+    // discarded.
+
+    jww.INFO.Printf("information %q", response)
+
+```
+
+NOTE: You can also use the library in a non-global setting by creating an instance of a Notebook:
+
+```go
+notepad = jww.NewNotepad(jww.LevelInfo, jww.LevelTrace, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+notepad.WARN.Println("Some warning"")
+```
+
+_Why 7 levels?_
+
+Maybe you think that 7 levels are too much for any application... and you
+are probably correct. Just because there are seven levels doesn’t mean
+that you should be using all 7 levels. Pick the right set for your needs.
+Remember they only have to mean something to your project.
+
+## Step 2. Optionally configure JWW
+
+Under the default thresholds :
+
+ * Debug, Trace & Info goto /dev/null
+ * Warn and above is logged (when a log file/io.Writer is provided)
+ * Error and above is printed to the terminal (stdout)
+
+### Changing the thresholds
+
+The threshold can be changed at any time, but will only affect calls that
+execute after the change was made.
+
+This is very useful if your application has a verbose mode. Of course you
+can decide what verbose means to you or even have multiple levels of
+verbosity.
+
+
+```go
+    import (
+        jww "github.com/spf13/jwalterweatherman"
+    )
+
+    if Verbose {
+        jww.SetLogThreshold(jww.LevelTrace)
+        jww.SetStdoutThreshold(jww.LevelInfo)
+    }
+```
+
+Note that JWW's own internal output uses log levels as well, so set the log
+level before making any other calls if you want to see what it's up to.
+
+
+### Setting a log file
+
+JWW can log to any `io.Writer`:
+
+
+```go
+
+    jww.SetLogOutput(customWriter) 
+
+```
+
+
+# More information
+
+This is an early release. I’ve been using it for a while and this is the
+third interface I’ve tried. I like this one pretty well, but no guarantees
+that it won’t change a bit.
+
+I wrote this for use in [hugo](http://hugo.spf13.com). If you are looking
+for a static website engine that’s super fast please checkout Hugo.
diff --git a/vendor/github.com/spf13/jwalterweatherman/default_notepad.go b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
new file mode 100644
index 0000000000000000000000000000000000000000..bcb763403c2c30a3d4af4ac9901018e4f670d3b3
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/default_notepad.go
@@ -0,0 +1,113 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+)
+
+var (
+	TRACE    *log.Logger
+	DEBUG    *log.Logger
+	INFO     *log.Logger
+	WARN     *log.Logger
+	ERROR    *log.Logger
+	CRITICAL *log.Logger
+	FATAL    *log.Logger
+
+	LOG      *log.Logger
+	FEEDBACK *Feedback
+
+	defaultNotepad *Notepad
+)
+
+func reloadDefaultNotepad() {
+	TRACE = defaultNotepad.TRACE
+	DEBUG = defaultNotepad.DEBUG
+	INFO = defaultNotepad.INFO
+	WARN = defaultNotepad.WARN
+	ERROR = defaultNotepad.ERROR
+	CRITICAL = defaultNotepad.CRITICAL
+	FATAL = defaultNotepad.FATAL
+
+	LOG = defaultNotepad.LOG
+	FEEDBACK = defaultNotepad.FEEDBACK
+}
+
+func init() {
+	defaultNotepad = NewNotepad(LevelError, LevelWarn, os.Stdout, ioutil.Discard, "", log.Ldate|log.Ltime)
+	reloadDefaultNotepad()
+}
+
+// SetLogThreshold set the log threshold for the default notepad. Trace by default.
+func SetLogThreshold(threshold Threshold) {
+	defaultNotepad.SetLogThreshold(threshold)
+	reloadDefaultNotepad()
+}
+
+// SetLogOutput set the log output for the default notepad. Discarded by default.
+func SetLogOutput(handle io.Writer) {
+	defaultNotepad.SetLogOutput(handle)
+	reloadDefaultNotepad()
+}
+
+// SetStdoutThreshold set the standard output threshold for the default notepad.
+// Info by default.
+func SetStdoutThreshold(threshold Threshold) {
+	defaultNotepad.SetStdoutThreshold(threshold)
+	reloadDefaultNotepad()
+}
+
+// SetPrefix set the prefix for the default logger. Empty by default.
+func SetPrefix(prefix string) {
+	defaultNotepad.SetPrefix(prefix)
+	reloadDefaultNotepad()
+}
+
+// SetFlags set the flags for the default logger. "log.Ldate | log.Ltime" by default.
+func SetFlags(flags int) {
+	defaultNotepad.SetFlags(flags)
+	reloadDefaultNotepad()
+}
+
+// Level returns the current global log threshold.
+func LogThreshold() Threshold {
+	return defaultNotepad.logThreshold
+}
+
+// Level returns the current global output threshold.
+func StdoutThreshold() Threshold {
+	return defaultNotepad.stdoutThreshold
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func GetLogThreshold() Threshold {
+	return defaultNotepad.GetLogThreshold()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func GetStdoutThreshold() Threshold {
+	return defaultNotepad.GetStdoutThreshold()
+}
+
+// LogCountForLevel returns the number of log invocations for a given threshold.
+func LogCountForLevel(l Threshold) uint64 {
+	return defaultNotepad.LogCountForLevel(l)
+}
+
+// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
+// greater than or equal to a given threshold.
+func LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
+	return defaultNotepad.LogCountForLevelsGreaterThanorEqualTo(threshold)
+}
+
+// ResetLogCounters resets the invocation counters for all levels.
+func ResetLogCounters() {
+	defaultNotepad.ResetLogCounters()
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
new file mode 100644
index 0000000000000000000000000000000000000000..11423ac41e12321c7a6556d529439115fb5eb067
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
@@ -0,0 +1,55 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+	"sync/atomic"
+)
+
+type logCounter struct {
+	counter uint64
+}
+
+func (c *logCounter) incr() {
+	atomic.AddUint64(&c.counter, 1)
+}
+
+func (c *logCounter) resetCounter() {
+	atomic.StoreUint64(&c.counter, 0)
+}
+
+func (c *logCounter) getCount() uint64 {
+	return atomic.LoadUint64(&c.counter)
+}
+
+func (c *logCounter) Write(p []byte) (n int, err error) {
+	c.incr()
+	return len(p), nil
+}
+
+// LogCountForLevel returns the number of log invocations for a given threshold.
+func (n *Notepad) LogCountForLevel(l Threshold) uint64 {
+	return n.logCounters[l].getCount()
+}
+
+// LogCountForLevelsGreaterThanorEqualTo returns the number of log invocations
+// greater than or equal to a given threshold.
+func (n *Notepad) LogCountForLevelsGreaterThanorEqualTo(threshold Threshold) uint64 {
+	var cnt uint64
+
+	for i := int(threshold); i < len(n.logCounters); i++ {
+		cnt += n.LogCountForLevel(Threshold(i))
+	}
+
+	return cnt
+}
+
+// ResetLogCounters resets the invocation counters for all levels.
+func (n *Notepad) ResetLogCounters() {
+	for _, np := range n.logCounters {
+		np.resetCounter()
+	}
+}
diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae5aaf7114a1896dadae1b9482ddb3f81f24ef1c
--- /dev/null
+++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go
@@ -0,0 +1,194 @@
+// Copyright © 2016 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package jwalterweatherman
+
+import (
+	"fmt"
+	"io"
+	"log"
+)
+
+type Threshold int
+
+func (t Threshold) String() string {
+	return prefixes[t]
+}
+
+const (
+	LevelTrace Threshold = iota
+	LevelDebug
+	LevelInfo
+	LevelWarn
+	LevelError
+	LevelCritical
+	LevelFatal
+)
+
+var prefixes map[Threshold]string = map[Threshold]string{
+	LevelTrace:    "TRACE",
+	LevelDebug:    "DEBUG",
+	LevelInfo:     "INFO",
+	LevelWarn:     "WARN",
+	LevelError:    "ERROR",
+	LevelCritical: "CRITICAL",
+	LevelFatal:    "FATAL",
+}
+
+// Notepad is where you leave a note!
+type Notepad struct {
+	TRACE    *log.Logger
+	DEBUG    *log.Logger
+	INFO     *log.Logger
+	WARN     *log.Logger
+	ERROR    *log.Logger
+	CRITICAL *log.Logger
+	FATAL    *log.Logger
+
+	LOG      *log.Logger
+	FEEDBACK *Feedback
+
+	loggers         [7]**log.Logger
+	logHandle       io.Writer
+	outHandle       io.Writer
+	logThreshold    Threshold
+	stdoutThreshold Threshold
+	prefix          string
+	flags           int
+
+	// One per Threshold
+	logCounters [7]*logCounter
+}
+
+// NewNotepad create a new notepad.
+func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad {
+	n := &Notepad{}
+
+	n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL}
+	n.outHandle = outHandle
+	n.logHandle = logHandle
+	n.stdoutThreshold = outThreshold
+	n.logThreshold = logThreshold
+
+	if len(prefix) != 0 {
+		n.prefix = "[" + prefix + "] "
+	} else {
+		n.prefix = ""
+	}
+
+	n.flags = flags
+
+	n.LOG = log.New(n.logHandle,
+		"LOG:   ",
+		n.flags)
+	n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG}
+
+	n.init()
+	return n
+}
+
+// init creates the loggers for each level depending on the notepad thresholds.
+func (n *Notepad) init() {
+	logAndOut := io.MultiWriter(n.outHandle, n.logHandle)
+
+	for t, logger := range n.loggers {
+		threshold := Threshold(t)
+		counter := &logCounter{}
+		n.logCounters[t] = counter
+		prefix := n.prefix + threshold.String() + " "
+
+		switch {
+		case threshold >= n.logThreshold && threshold >= n.stdoutThreshold:
+			*logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags)
+
+		case threshold >= n.logThreshold:
+			*logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags)
+
+		case threshold >= n.stdoutThreshold:
+			*logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags)
+
+		default:
+			// counter doesn't care about prefix and flags, so don't use them
+			// for performance.
+			*logger = log.New(counter, "", 0)
+		}
+	}
+}
+
+// SetLogThreshold changes the threshold above which messages are written to the
+// log file.
+func (n *Notepad) SetLogThreshold(threshold Threshold) {
+	n.logThreshold = threshold
+	n.init()
+}
+
+// SetLogOutput changes the file where log messages are written.
+func (n *Notepad) SetLogOutput(handle io.Writer) {
+	n.logHandle = handle
+	n.init()
+}
+
+// GetStdoutThreshold returns the defined Treshold for the log logger.
+func (n *Notepad) GetLogThreshold() Threshold {
+	return n.logThreshold
+}
+
+// SetStdoutThreshold changes the threshold above which messages are written to the
+// standard output.
+func (n *Notepad) SetStdoutThreshold(threshold Threshold) {
+	n.stdoutThreshold = threshold
+	n.init()
+}
+
+// GetStdoutThreshold returns the Treshold for the stdout logger.
+func (n *Notepad) GetStdoutThreshold() Threshold {
+	return n.stdoutThreshold
+}
+
+// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between
+// brackets at the beginning of the line. An empty prefix won't be displayed at all.
+func (n *Notepad) SetPrefix(prefix string) {
+	if len(prefix) != 0 {
+		n.prefix = "[" + prefix + "] "
+	} else {
+		n.prefix = ""
+	}
+	n.init()
+}
+
+// SetFlags choose which flags the logger will display (after prefix and message
+// level). See the package log for more informations on this.
+func (n *Notepad) SetFlags(flags int) {
+	n.flags = flags
+	n.init()
+}
+
+// Feedback writes plainly to the outHandle while
+// logging with the standard extra information (date, file, etc).
+type Feedback struct {
+	out *log.Logger
+	log *log.Logger
+}
+
+func (fb *Feedback) Println(v ...interface{}) {
+	fb.output(fmt.Sprintln(v...))
+}
+
+func (fb *Feedback) Printf(format string, v ...interface{}) {
+	fb.output(fmt.Sprintf(format, v...))
+}
+
+func (fb *Feedback) Print(v ...interface{}) {
+	fb.output(fmt.Sprint(v...))
+}
+
+func (fb *Feedback) output(s string) {
+	if fb.out != nil {
+		fb.out.Output(2, s)
+	}
+	if fb.log != nil {
+		fb.log.Output(2, s)
+	}
+}
diff --git a/vendor/github.com/spf13/viper/LICENSE b/vendor/github.com/spf13/viper/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..4527efb9c067932d21b145e55cc7a38d58b24c13
--- /dev/null
+++ b/vendor/github.com/spf13/viper/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Steve Francia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..64bf47435847d3b092866baf49dfa55329027616
--- /dev/null
+++ b/vendor/github.com/spf13/viper/README.md
@@ -0,0 +1,643 @@
+![viper logo](https://cloud.githubusercontent.com/assets/173412/10886745/998df88a-8151-11e5-9448-4736db51020d.png)
+
+Go configuration with fangs!
+
+Many Go projects are built using Viper including:
+
+* [Hugo](http://gohugo.io)
+* [EMC RexRay](http://rexray.readthedocs.org/en/stable/)
+* [Imgur’s Incus](https://github.com/Imgur/incus)
+* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
+* [Docker Notary](https://github.com/docker/Notary)
+* [BloomApi](https://www.bloomapi.com/)
+* [doctl](https://github.com/digitalocean/doctl)
+* [Clairctl](https://github.com/jgsqware/clairctl)
+
+[![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper)
+
+
+## What is Viper?
+
+Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed
+to work within an application, and can handle all types of configuration needs
+and formats. It supports:
+
+* setting defaults
+* reading from JSON, TOML, YAML, HCL, and Java properties config files
+* live watching and re-reading of config files (optional)
+* reading from environment variables
+* reading from remote config systems (etcd or Consul), and watching changes
+* reading from command line flags
+* reading from buffer
+* setting explicit values
+
+Viper can be thought of as a registry for all of your applications
+configuration needs.
+
+## Why Viper?
+
+When building a modern application, you don’t want to worry about
+configuration file formats; you want to focus on building awesome software.
+Viper is here to help with that.
+
+Viper does the following for you:
+
+1. Find, load, and unmarshal a configuration file in JSON, TOML, YAML, HCL, or Java properties formats.
+2. Provide a mechanism to set default values for your different
+   configuration options.
+3. Provide a mechanism to set override values for options specified through
+   command line flags.
+4. Provide an alias system to easily rename parameters without breaking existing
+   code.
+5. Make it easy to tell the difference between when a user has provided a
+   command line or config file which is the same as the default.
+
+Viper uses the following precedence order. Each item takes precedence over the
+item below it:
+
+ * explicit call to Set
+ * flag
+ * env
+ * config
+ * key/value store
+ * default
+
+Viper configuration keys are case insensitive.
+
+## Putting Values into Viper
+
+### Establishing Defaults
+
+A good configuration system will support default values. A default value is not
+required for a key, but it’s useful in the event that a key hasn’t been set via
+config file, environment variable, remote configuration or flag.
+
+Examples:
+
+```go
+viper.SetDefault("ContentDir", "content")
+viper.SetDefault("LayoutDir", "layouts")
+viper.SetDefault("Taxonomies", map[string]string{"tag": "tags", "category": "categories"})
+```
+
+### Reading Config Files
+
+Viper requires minimal configuration so it knows where to look for config files.
+Viper supports JSON, TOML, YAML, HCL, and Java Properties files. Viper can search multiple paths, but
+currently a single Viper instance only supports a single configuration file.
+Viper does not default to any configuration search paths leaving defaults decision
+to an application.
+
+Here is an example of how to use Viper to search for and read a configuration file.
+None of the specific paths are required, but at least one path should be provided
+where a configuration file is expected.
+
+```go
+viper.SetConfigName("config") // name of config file (without extension)
+viper.AddConfigPath("/etc/appname/")   // path to look for the config file in
+viper.AddConfigPath("$HOME/.appname")  // call multiple times to add many search paths
+viper.AddConfigPath(".")               // optionally look for config in the working directory
+err := viper.ReadInConfig() // Find and read the config file
+if err != nil { // Handle errors reading the config file
+	panic(fmt.Errorf("Fatal error config file: %s \n", err))
+}
+```
+
+### Watching and re-reading config files
+
+Viper supports the ability to have your application live read a config file while running.
+
+Gone are the days of needing to restart a server to have a config take effect,
+viper powered applications can read an update to a config file while running and
+not miss a beat.
+
+Simply tell the viper instance to watchConfig.
+Optionally you can provide a function for Viper to run each time a change occurs.
+
+**Make sure you add all of the configPaths prior to calling `WatchConfig()`**
+
+```go
+viper.WatchConfig()
+viper.OnConfigChange(func(e fsnotify.Event) {
+	fmt.Println("Config file changed:", e.Name)
+})
+```
+
+### Reading Config from io.Reader
+
+Viper predefines many configuration sources such as files, environment
+variables, flags, and remote K/V store, but you are not bound to them. You can
+also implement your own required configuration source and feed it to viper.
+
+```go
+viper.SetConfigType("yaml") // or viper.SetConfigType("YAML")
+
+// any approach to require this configuration into your program.
+var yamlExample = []byte(`
+Hacker: true
+name: steve
+hobbies:
+- skateboarding
+- snowboarding
+- go
+clothing:
+  jacket: leather
+  trousers: denim
+age: 35
+eyes : brown
+beard: true
+`)
+
+viper.ReadConfig(bytes.NewBuffer(yamlExample))
+
+viper.Get("name") // this would be "steve"
+```
+
+### Setting Overrides
+
+These could be from a command line flag, or from your own application logic.
+
+```go
+viper.Set("Verbose", true)
+viper.Set("LogFile", LogFile)
+```
+
+### Registering and Using Aliases
+
+Aliases permit a single value to be referenced by multiple keys
+
+```go
+viper.RegisterAlias("loud", "Verbose")
+
+viper.Set("verbose", true) // same result as next line
+viper.Set("loud", true)   // same result as prior line
+
+viper.GetBool("loud") // true
+viper.GetBool("verbose") // true
+```
+
+### Working with Environment Variables
+
+Viper has full support for environment variables. This enables 12 factor
+applications out of the box. There are four methods that exist to aid working
+with ENV:
+
+ * `AutomaticEnv()`
+ * `BindEnv(string...) : error`
+ * `SetEnvPrefix(string)`
+ * `SetEnvKeyReplacer(string...) *strings.Replacer`
+
+_When working with ENV variables, it’s important to recognize that Viper
+treats ENV variables as case sensitive._
+
+Viper provides a mechanism to try to ensure that ENV variables are unique. By
+using `SetEnvPrefix`, you can tell Viper to use add a prefix while reading from
+the environment variables. Both `BindEnv` and `AutomaticEnv` will use this
+prefix.
+
+`BindEnv` takes one or two parameters. The first parameter is the key name, the
+second is the name of the environment variable. The name of the environment
+variable is case sensitive. If the ENV variable name is not provided, then
+Viper will automatically assume that the key name matches the ENV variable name,
+but the ENV variable is IN ALL CAPS. When you explicitly provide the ENV
+variable name, it **does not** automatically add the prefix.
+
+One important thing to recognize when working with ENV variables is that the
+value will be read each time it is accessed. Viper does not fix the value when
+the `BindEnv` is called.
+
+`AutomaticEnv` is a powerful helper especially when combined with
+`SetEnvPrefix`. When called, Viper will check for an environment variable any
+time a `viper.Get` request is made. It will apply the following rules. It will
+check for a environment variable with a name matching the key uppercased and
+prefixed with the `EnvPrefix` if set.
+
+`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env
+keys to an extent. This is useful if you want to use `-` or something in your
+`Get()` calls, but want your environmental variables to use `_` delimiters. An
+example of using it can be found in `viper_test.go`.
+
+#### Env example
+
+```go
+SetEnvPrefix("spf") // will be uppercased automatically
+BindEnv("id")
+
+os.Setenv("SPF_ID", "13") // typically done outside of the app
+
+id := Get("id") // 13
+```
+
+### Working with Flags
+
+Viper has the ability to bind to flags. Specifically, Viper supports `Pflags`
+as used in the [Cobra](https://github.com/spf13/cobra) library.
+
+Like `BindEnv`, the value is not set when the binding method is called, but when
+it is accessed. This means you can bind as early as you want, even in an
+`init()` function.
+
+For individual flags, the `BindPFlag()` method provides this functionality.
+
+Example:
+
+```go
+serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+```
+
+You can also bind an existing set of pflags (pflag.FlagSet):
+
+Example:
+
+```go
+pflag.Int("flagname", 1234, "help message for flagname")
+
+pflag.Parse()
+viper.BindPFlags(pflag.CommandLine)
+
+i := viper.GetInt("flagname") // retrieve values from viper instead of pflag
+```
+
+The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude
+the use of other packages that use the [flag](https://golang.org/pkg/flag/)
+package from the standard library. The pflag package can handle the flags
+defined for the flag package by importing these flags. This is accomplished
+by a calling a convenience function provided by the pflag package called
+AddGoFlagSet().
+
+Example:
+
+```go
+package main
+
+import (
+	"flag"
+	"github.com/spf13/pflag"
+)
+
+func main() {
+
+	// using standard library "flag" package
+	flag.Int("flagname", 1234, "help message for flagname")
+
+	pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+	pflag.Parse()
+	viper.BindPFlags(pflag.CommandLine)
+
+	i := viper.GetInt("flagname") // retrieve value from viper
+
+	...
+}
+```
+
+#### Flag interfaces
+
+Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`.
+
+`FlagValue` represents a single flag. This is a very simple example on how to implement this interface:
+
+```go
+type myFlag struct {}
+func (f myFlag) HasChanged() bool { return false }
+func (f myFlag) Name() string { return "my-flag-name" }
+func (f myFlag) ValueString() string { return "my-flag-value" }
+func (f myFlag) ValueType() string { return "string" }
+```
+
+Once your flag implements this interface, you can simply tell Viper to bind it:
+
+```go
+viper.BindFlagValue("my-flag-name", myFlag{})
+```
+
+`FlagValueSet` represents a group of flags. This is a very simple example on how to implement this interface:
+
+```go
+type myFlagSet struct {
+	flags []myFlag
+}
+
+func (f myFlagSet) VisitAll(fn func(FlagValue)) {
+	for _, flag := range flags {
+		fn(flag)
+	}
+}
+```
+
+Once your flag set implements this interface, you can simply tell Viper to bind it:
+
+```go
+fSet := myFlagSet{
+	flags: []myFlag{myFlag{}, myFlag{}},
+}
+viper.BindFlagValues("my-flags", fSet)
+```
+
+### Remote Key/Value Store Support
+
+To enable remote support in Viper, do a blank import of the `viper/remote`
+package:
+
+`import _ "github.com/spf13/viper/remote"`
+
+Viper will read a config string (as JSON, TOML, YAML or HCL) retrieved from a path
+in a Key/Value store such as etcd or Consul.  These values take precedence over
+default values, but are overridden by configuration values retrieved from disk,
+flags, or environment variables.
+
+Viper uses [crypt](https://github.com/xordataexchange/crypt) to retrieve
+configuration from the K/V store, which means that you can store your
+configuration values encrypted and have them automatically decrypted if you have
+the correct gpg keyring.  Encryption is optional.
+
+You can use remote configuration in conjunction with local configuration, or
+independently of it.
+
+`crypt` has a command-line helper that you can use to put configurations in your
+K/V store. `crypt` defaults to etcd on http://127.0.0.1:4001.
+
+```bash
+$ go get github.com/xordataexchange/crypt/bin/crypt
+$ crypt set -plaintext /config/hugo.json /Users/hugo/settings/config.json
+```
+
+Confirm that your value was set:
+
+```bash
+$ crypt get -plaintext /config/hugo.json
+```
+
+See the `crypt` documentation for examples of how to set encrypted values, or
+how to use Consul.
+
+### Remote Key/Value Store Example - Unencrypted
+
+```go
+viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
+err := viper.ReadRemoteConfig()
+```
+
+### Remote Key/Value Store Example - Encrypted
+
+```go
+viper.AddSecureRemoteProvider("etcd","http://127.0.0.1:4001","/config/hugo.json","/etc/secrets/mykeyring.gpg")
+viper.SetConfigType("json") // because there is no file extension in a stream of bytes,  supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
+err := viper.ReadRemoteConfig()
+```
+
+### Watching Changes in etcd - Unencrypted
+
+```go
+// alternatively, you can create a new viper instance.
+var runtime_viper = viper.New()
+
+runtime_viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001", "/config/hugo.yml")
+runtime_viper.SetConfigType("yaml") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop"
+
+// read from remote config the first time.
+err := runtime_viper.ReadRemoteConfig()
+
+// unmarshal config
+runtime_viper.Unmarshal(&runtime_conf)
+
+// open a goroutine to watch remote changes forever
+go func(){
+	for {
+	    time.Sleep(time.Second * 5) // delay after each request
+
+	    // currently, only tested with etcd support
+	    err := runtime_viper.WatchRemoteConfig()
+	    if err != nil {
+	        log.Errorf("unable to read remote config: %v", err)
+	        continue
+	    }
+
+	    // unmarshal new config into our runtime config struct. you can also use channel
+	    // to implement a signal to notify the system of the changes
+	    runtime_viper.Unmarshal(&runtime_conf)
+	}
+}()
+```
+
+## Getting Values From Viper
+
+In Viper, there are a few ways to get a value depending on the value’s type.
+The following functions and methods exist:
+
+ * `Get(key string) : interface{}`
+ * `GetBool(key string) : bool`
+ * `GetFloat64(key string) : float64`
+ * `GetInt(key string) : int`
+ * `GetString(key string) : string`
+ * `GetStringMap(key string) : map[string]interface{}`
+ * `GetStringMapString(key string) : map[string]string`
+ * `GetStringSlice(key string) : []string`
+ * `GetTime(key string) : time.Time`
+ * `GetDuration(key string) : time.Duration`
+ * `IsSet(key string) : bool`
+
+One important thing to recognize is that each Get function will return a zero
+value if it’s not found. To check if a given key exists, the `IsSet()` method
+has been provided.
+
+Example:
+```go
+viper.GetString("logfile") // case-insensitive Setting & Getting
+if viper.GetBool("verbose") {
+    fmt.Println("verbose enabled")
+}
+```
+### Accessing nested keys
+
+The accessor methods also accept formatted paths to deeply nested keys. For
+example, if the following JSON file is loaded:
+
+```json
+{
+    "host": {
+        "address": "localhost",
+        "port": 5799
+    },
+    "datastore": {
+        "metric": {
+            "host": "127.0.0.1",
+            "port": 3099
+        },
+        "warehouse": {
+            "host": "198.0.0.1",
+            "port": 2112
+        }
+    }
+}
+
+```
+
+Viper can access a nested field by passing a `.` delimited path of keys:
+
+```go
+GetString("datastore.metric.host") // (returns "127.0.0.1")
+```
+
+This obeys the precedence rules established above; the search for the path
+will cascade through the remaining configuration registries until found.
+
+For example, given this configuration file, both `datastore.metric.host` and
+`datastore.metric.port` are already defined (and may be overridden). If in addition
+`datastore.metric.protocol` was defined in the defaults, Viper would also find it.
+
+However, if `datastore.metric` was overridden (by a flag, an environment variable,
+the `Set()` method, …) with an immediate value, then all sub-keys of
+`datastore.metric` become undefined, they are “shadowed” by the higher-priority
+configuration level.
+
+Lastly, if there exists a key that matches the delimited key path, its value
+will be returned instead. E.g.
+
+```json
+{
+    "datastore.metric.host": "0.0.0.0",
+    "host": {
+        "address": "localhost",
+        "port": 5799
+    },
+    "datastore": {
+        "metric": {
+            "host": "127.0.0.1",
+            "port": 3099
+        },
+        "warehouse": {
+            "host": "198.0.0.1",
+            "port": 2112
+        }
+    }
+}
+
+GetString("datastore.metric.host") // returns "0.0.0.0"
+```
+
+### Extract sub-tree
+
+Extract sub-tree from Viper.
+
+For example, `viper` represents:
+
+```json
+app:
+  cache1:
+    max-items: 100
+    item-size: 64
+  cache2:
+    max-items: 200
+    item-size: 80
+```
+
+After executing:
+
+```go
+subv := viper.Sub("app.cache1")
+```
+
+`subv` represents:
+
+```json
+max-items: 100
+item-size: 64
+```
+
+Suppose we have:
+
+```go
+func NewCache(cfg *Viper) *Cache {...}
+```
+
+which creates a cache based on config information formatted as `subv`.
+Now it’s easy to create these 2 caches separately as:
+
+```go
+cfg1 := viper.Sub("app.cache1")
+cache1 := NewCache(cfg1)
+
+cfg2 := viper.Sub("app.cache2")
+cache2 := NewCache(cfg2)
+```
+
+### Unmarshaling
+
+You also have the option of Unmarshaling all or a specific value to a struct, map,
+etc.
+
+There are two methods to do this:
+
+ * `Unmarshal(rawVal interface{}) : error`
+ * `UnmarshalKey(key string, rawVal interface{}) : error`
+
+Example:
+
+```go
+type config struct {
+	Port int
+	Name string
+	PathMap string `mapstructure:"path_map"`
+}
+
+var C config
+
+err := Unmarshal(&C)
+if err != nil {
+	t.Fatalf("unable to decode into struct, %v", err)
+}
+```
+
+## Viper or Vipers?
+
+Viper comes ready to use out of the box. There is no configuration or
+initialization needed to begin using Viper. Since most applications will want
+to use a single central repository for their configuration, the viper package
+provides this. It is similar to a singleton.
+
+In all of the examples above, they demonstrate using viper in its singleton
+style approach.
+
+### Working with multiple vipers
+
+You can also create many different vipers for use in your application. Each will
+have its own unique set of configurations and values. Each can read from a
+different config file, key value store, etc. All of the functions that viper
+package supports are mirrored as methods on a viper.
+
+Example:
+
+```go
+x := viper.New()
+y := viper.New()
+
+x.SetDefault("ContentDir", "content")
+y.SetDefault("ContentDir", "foobar")
+
+//...
+```
+
+When working with multiple vipers, it is up to the user to keep track of the
+different vipers.
+
+## Q & A
+
+Q: Why not INI files?
+
+A: Ini files are pretty awful. There’s no standard format, and they are hard to
+validate. Viper is designed to work with JSON, TOML or YAML files. If someone
+really wants to add this feature, I’d be happy to merge it. It’s easy to specify
+which formats your application will permit.
+
+Q: Why is it called “Viper”?
+
+A: Viper is designed to be a [companion](http://en.wikipedia.org/wiki/Viper_(G.I._Joe))
+to [Cobra](https://github.com/spf13/cobra). While both can operate completely
+independently, together they make a powerful pair to handle much of your
+application foundation needs.
+
+Q: Why is it called “Cobra”?
+
+A: Is there a better name for a [commander](http://en.wikipedia.org/wiki/Cobra_Commander)?
diff --git a/vendor/github.com/spf13/viper/flags.go b/vendor/github.com/spf13/viper/flags.go
new file mode 100644
index 0000000000000000000000000000000000000000..dd32f4e1c262bf0ba63411dd261ac8353b2df660
--- /dev/null
+++ b/vendor/github.com/spf13/viper/flags.go
@@ -0,0 +1,57 @@
+package viper
+
+import "github.com/spf13/pflag"
+
+// FlagValueSet is an interface that users can implement
+// to bind a set of flags to viper.
+type FlagValueSet interface {
+	VisitAll(fn func(FlagValue))
+}
+
+// FlagValue is an interface that users can implement
+// to bind different flags to viper.
+type FlagValue interface {
+	HasChanged() bool
+	Name() string
+	ValueString() string
+	ValueType() string
+}
+
+// pflagValueSet is a wrapper around *pflag.ValueSet
+// that implements FlagValueSet.
+type pflagValueSet struct {
+	flags *pflag.FlagSet
+}
+
+// VisitAll iterates over all *pflag.Flag inside the *pflag.FlagSet.
+func (p pflagValueSet) VisitAll(fn func(flag FlagValue)) {
+	p.flags.VisitAll(func(flag *pflag.Flag) {
+		fn(pflagValue{flag})
+	})
+}
+
+// pflagValue is a wrapper aroung *pflag.flag
+// that implements FlagValue
+type pflagValue struct {
+	flag *pflag.Flag
+}
+
+// HasChanges returns whether the flag has changes or not.
+func (p pflagValue) HasChanged() bool {
+	return p.flag.Changed
+}
+
+// Name returns the name of the flag.
+func (p pflagValue) Name() string {
+	return p.flag.Name
+}
+
+// ValueString returns the value of the flag as a string.
+func (p pflagValue) ValueString() string {
+	return p.flag.Value.String()
+}
+
+// ValueType returns the type of the flag as a string.
+func (p pflagValue) ValueType() string {
+	return p.flag.Value.Type()
+}
diff --git a/vendor/github.com/spf13/viper/nohup.out b/vendor/github.com/spf13/viper/nohup.out
new file mode 100644
index 0000000000000000000000000000000000000000..8973bf27b59df1fb1cd5e56430cc058b819365f1
--- /dev/null
+++ b/vendor/github.com/spf13/viper/nohup.out
@@ -0,0 +1 @@
+QProcess::start: Process is already running
diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go
new file mode 100644
index 0000000000000000000000000000000000000000..c784dad40a5a5928e6ba6dbeece87561ebf8c45c
--- /dev/null
+++ b/vendor/github.com/spf13/viper/util.go
@@ -0,0 +1,283 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+package viper
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"unicode"
+
+	"github.com/hashicorp/hcl"
+	"github.com/magiconair/properties"
+	toml "github.com/pelletier/go-toml"
+	"github.com/spf13/afero"
+	"github.com/spf13/cast"
+	jww "github.com/spf13/jwalterweatherman"
+	"gopkg.in/yaml.v2"
+)
+
+// ConfigParseError denotes failing to parse configuration file.
+type ConfigParseError struct {
+	err error
+}
+
+// Error returns the formatted configuration error.
+func (pe ConfigParseError) Error() string {
+	return fmt.Sprintf("While parsing config: %s", pe.err.Error())
+}
+
+// toCaseInsensitiveValue checks if the value is a  map;
+// if so, create a copy and lower-case the keys recursively.
+func toCaseInsensitiveValue(value interface{}) interface{} {
+	switch v := value.(type) {
+	case map[interface{}]interface{}:
+		value = copyAndInsensitiviseMap(cast.ToStringMap(v))
+	case map[string]interface{}:
+		value = copyAndInsensitiviseMap(v)
+	}
+
+	return value
+}
+
+// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of
+// any map it makes case insensitive.
+func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} {
+	nm := make(map[string]interface{})
+
+	for key, val := range m {
+		lkey := strings.ToLower(key)
+		switch v := val.(type) {
+		case map[interface{}]interface{}:
+			nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v))
+		case map[string]interface{}:
+			nm[lkey] = copyAndInsensitiviseMap(v)
+		default:
+			nm[lkey] = v
+		}
+	}
+
+	return nm
+}
+
+func insensitiviseMap(m map[string]interface{}) {
+	for key, val := range m {
+		switch val.(type) {
+		case map[interface{}]interface{}:
+			// nested map: cast and recursively insensitivise
+			val = cast.ToStringMap(val)
+			insensitiviseMap(val.(map[string]interface{}))
+		case map[string]interface{}:
+			// nested map: recursively insensitivise
+			insensitiviseMap(val.(map[string]interface{}))
+		}
+
+		lower := strings.ToLower(key)
+		if key != lower {
+			// remove old key (not lower-cased)
+			delete(m, key)
+		}
+		// update map
+		m[lower] = val
+	}
+}
+
+func absPathify(inPath string) string {
+	jww.INFO.Println("Trying to resolve absolute path to", inPath)
+
+	if strings.HasPrefix(inPath, "$HOME") {
+		inPath = userHomeDir() + inPath[5:]
+	}
+
+	if strings.HasPrefix(inPath, "$") {
+		end := strings.Index(inPath, string(os.PathSeparator))
+		inPath = os.Getenv(inPath[1:end]) + inPath[end:]
+	}
+
+	if filepath.IsAbs(inPath) {
+		return filepath.Clean(inPath)
+	}
+
+	p, err := filepath.Abs(inPath)
+	if err == nil {
+		return filepath.Clean(p)
+	}
+
+	jww.ERROR.Println("Couldn't discover absolute path")
+	jww.ERROR.Println(err)
+	return ""
+}
+
+// Check if File / Directory Exists
+func exists(fs afero.Fs, path string) (bool, error) {
+	_, err := fs.Stat(path)
+	if err == nil {
+		return true, nil
+	}
+	if os.IsNotExist(err) {
+		return false, nil
+	}
+	return false, err
+}
+
+func stringInSlice(a string, list []string) bool {
+	for _, b := range list {
+		if b == a {
+			return true
+		}
+	}
+	return false
+}
+
+func userHomeDir() string {
+	if runtime.GOOS == "windows" {
+		home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
+		if home == "" {
+			home = os.Getenv("USERPROFILE")
+		}
+		return home
+	}
+	return os.Getenv("HOME")
+}
+
+func unmarshallConfigReader(in io.Reader, c map[string]interface{}, configType string) error {
+	buf := new(bytes.Buffer)
+	buf.ReadFrom(in)
+
+	switch strings.ToLower(configType) {
+	case "yaml", "yml":
+		if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil {
+			return ConfigParseError{err}
+		}
+
+	case "json":
+		if err := json.Unmarshal(buf.Bytes(), &c); err != nil {
+			return ConfigParseError{err}
+		}
+
+	case "hcl":
+		obj, err := hcl.Parse(string(buf.Bytes()))
+		if err != nil {
+			return ConfigParseError{err}
+		}
+		if err = hcl.DecodeObject(&c, obj); err != nil {
+			return ConfigParseError{err}
+		}
+
+	case "toml":
+		tree, err := toml.LoadReader(buf)
+		if err != nil {
+			return ConfigParseError{err}
+		}
+		tmap := tree.ToMap()
+		for k, v := range tmap {
+			c[k] = v
+		}
+
+	case "properties", "props", "prop":
+		var p *properties.Properties
+		var err error
+		if p, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil {
+			return ConfigParseError{err}
+		}
+		for _, key := range p.Keys() {
+			value, _ := p.Get(key)
+			// recursively build nested maps
+			path := strings.Split(key, ".")
+			lastKey := strings.ToLower(path[len(path)-1])
+			deepestMap := deepSearch(c, path[0:len(path)-1])
+			// set innermost value
+			deepestMap[lastKey] = value
+		}
+	}
+
+	insensitiviseMap(c)
+	return nil
+}
+
+func safeMul(a, b uint) uint {
+	c := a * b
+	if a > 1 && b > 1 && c/b != a {
+		return 0
+	}
+	return c
+}
+
+// parseSizeInBytes converts strings like 1GB or 12 mb into an unsigned integer number of bytes
+func parseSizeInBytes(sizeStr string) uint {
+	sizeStr = strings.TrimSpace(sizeStr)
+	lastChar := len(sizeStr) - 1
+	multiplier := uint(1)
+
+	if lastChar > 0 {
+		if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
+			if lastChar > 1 {
+				switch unicode.ToLower(rune(sizeStr[lastChar-1])) {
+				case 'k':
+					multiplier = 1 << 10
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				case 'm':
+					multiplier = 1 << 20
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				case 'g':
+					multiplier = 1 << 30
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				default:
+					multiplier = 1
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar])
+				}
+			}
+		}
+	}
+
+	size := cast.ToInt(sizeStr)
+	if size < 0 {
+		size = 0
+	}
+
+	return safeMul(uint(size), multiplier)
+}
+
+// deepSearch scans deep maps, following the key indexes listed in the
+// sequence "path".
+// The last value is expected to be another map, and is returned.
+//
+// In case intermediate keys do not exist, or map to a non-map value,
+// a new map is created and inserted, and the search continues from there:
+// the initial map "m" may be modified!
+func deepSearch(m map[string]interface{}, path []string) map[string]interface{} {
+	for _, k := range path {
+		m2, ok := m[k]
+		if !ok {
+			// intermediate key does not exist
+			// => create it and continue from there
+			m3 := make(map[string]interface{})
+			m[k] = m3
+			m = m3
+			continue
+		}
+		m3, ok := m2.(map[string]interface{})
+		if !ok {
+			// intermediate key is a value
+			// => replace with a new map
+			m3 = make(map[string]interface{})
+			m[k] = m3
+		}
+		// continue search from here
+		m = m3
+	}
+	return m
+}
diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go
new file mode 100644
index 0000000000000000000000000000000000000000..64f006a39880991f6c413e3f257dfa5b968ac42a
--- /dev/null
+++ b/vendor/github.com/spf13/viper/viper.go
@@ -0,0 +1,1574 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+// Viper is a application configuration system.
+// It believes that applications can be configured a variety of ways
+// via flags, ENVIRONMENT variables, configuration files retrieved
+// from the file system, or a remote key/value store.
+
+// Each item takes precedence over the item below it:
+
+// overrides
+// flag
+// env
+// config
+// key/value store
+// default
+
+package viper
+
+import (
+	"bytes"
+	"encoding/csv"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strings"
+	"time"
+
+	"github.com/fsnotify/fsnotify"
+	"github.com/mitchellh/mapstructure"
+	"github.com/spf13/afero"
+	"github.com/spf13/cast"
+	jww "github.com/spf13/jwalterweatherman"
+	"github.com/spf13/pflag"
+)
+
+var v *Viper
+
+type RemoteResponse struct {
+	Value []byte
+	Error error
+}
+
+func init() {
+	v = New()
+}
+
+type remoteConfigFactory interface {
+	Get(rp RemoteProvider) (io.Reader, error)
+	Watch(rp RemoteProvider) (io.Reader, error)
+	WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
+}
+
+// RemoteConfig is optional, see the remote package
+var RemoteConfig remoteConfigFactory
+
+// UnsupportedConfigError denotes encountering an unsupported
+// configuration filetype.
+type UnsupportedConfigError string
+
+// Error returns the formatted configuration error.
+func (str UnsupportedConfigError) Error() string {
+	return fmt.Sprintf("Unsupported Config Type %q", string(str))
+}
+
+// UnsupportedRemoteProviderError denotes encountering an unsupported remote
+// provider. Currently only etcd and Consul are supported.
+type UnsupportedRemoteProviderError string
+
+// Error returns the formatted remote provider error.
+func (str UnsupportedRemoteProviderError) Error() string {
+	return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str))
+}
+
+// RemoteConfigError denotes encountering an error while trying to
+// pull the configuration from the remote provider.
+type RemoteConfigError string
+
+// Error returns the formatted remote provider error
+func (rce RemoteConfigError) Error() string {
+	return fmt.Sprintf("Remote Configurations Error: %s", string(rce))
+}
+
+// ConfigFileNotFoundError denotes failing to find configuration file.
+type ConfigFileNotFoundError struct {
+	name, locations string
+}
+
+// Error returns the formatted configuration error.
+func (fnfe ConfigFileNotFoundError) Error() string {
+	return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations)
+}
+
+// Viper is a prioritized configuration registry. It
+// maintains a set of configuration sources, fetches
+// values to populate those, and provides them according
+// to the source's priority.
+// The priority of the sources is the following:
+// 1. overrides
+// 2. flags
+// 3. env. variables
+// 4. config file
+// 5. key/value store
+// 6. defaults
+//
+// For example, if values from the following sources were loaded:
+//
+//  Defaults : {
+//  	"secret": "",
+//  	"user": "default",
+//  	"endpoint": "https://localhost"
+//  }
+//  Config : {
+//  	"user": "root"
+//  	"secret": "defaultsecret"
+//  }
+//  Env : {
+//  	"secret": "somesecretkey"
+//  }
+//
+// The resulting config will have the following values:
+//
+//	{
+//		"secret": "somesecretkey",
+//		"user": "root",
+//		"endpoint": "https://localhost"
+//	}
+type Viper struct {
+	// Delimiter that separates a list of keys
+	// used to access a nested value in one go
+	keyDelim string
+
+	// A set of paths to look for the config file in
+	configPaths []string
+
+	// The filesystem to read config from.
+	fs afero.Fs
+
+	// A set of remote providers to search for the configuration
+	remoteProviders []*defaultRemoteProvider
+
+	// Name of file to look for inside the path
+	configName string
+	configFile string
+	configType string
+	envPrefix  string
+
+	automaticEnvApplied bool
+	envKeyReplacer      *strings.Replacer
+
+	config         map[string]interface{}
+	override       map[string]interface{}
+	defaults       map[string]interface{}
+	kvstore        map[string]interface{}
+	pflags         map[string]FlagValue
+	env            map[string]string
+	aliases        map[string]string
+	typeByDefValue bool
+
+	onConfigChange func(fsnotify.Event)
+}
+
+// New returns an initialized Viper instance.
+func New() *Viper {
+	v := new(Viper)
+	v.keyDelim = "."
+	v.configName = "config"
+	v.fs = afero.NewOsFs()
+	v.config = make(map[string]interface{})
+	v.override = make(map[string]interface{})
+	v.defaults = make(map[string]interface{})
+	v.kvstore = make(map[string]interface{})
+	v.pflags = make(map[string]FlagValue)
+	v.env = make(map[string]string)
+	v.aliases = make(map[string]string)
+	v.typeByDefValue = false
+
+	return v
+}
+
+// Intended for testing, will reset all to default settings.
+// In the public interface for the viper package so applications
+// can use it in their testing as well.
+func Reset() {
+	v = New()
+	SupportedExts = []string{"json", "toml", "yaml", "yml", "hcl"}
+	SupportedRemoteProviders = []string{"etcd", "consul"}
+}
+
+type defaultRemoteProvider struct {
+	provider      string
+	endpoint      string
+	path          string
+	secretKeyring string
+}
+
+func (rp defaultRemoteProvider) Provider() string {
+	return rp.provider
+}
+
+func (rp defaultRemoteProvider) Endpoint() string {
+	return rp.endpoint
+}
+
+func (rp defaultRemoteProvider) Path() string {
+	return rp.path
+}
+
+func (rp defaultRemoteProvider) SecretKeyring() string {
+	return rp.secretKeyring
+}
+
+// RemoteProvider stores the configuration necessary
+// to connect to a remote key/value store.
+// Optional secretKeyring to unencrypt encrypted values
+// can be provided.
+type RemoteProvider interface {
+	Provider() string
+	Endpoint() string
+	Path() string
+	SecretKeyring() string
+}
+
+// SupportedExts are universally supported extensions.
+var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"}
+
+// SupportedRemoteProviders are universally supported remote providers.
+var SupportedRemoteProviders = []string{"etcd", "consul"}
+
+func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) }
+func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) {
+	v.onConfigChange = run
+}
+
+func WatchConfig() { v.WatchConfig() }
+func (v *Viper) WatchConfig() {
+	go func() {
+		watcher, err := fsnotify.NewWatcher()
+		if err != nil {
+			log.Fatal(err)
+		}
+		defer watcher.Close()
+
+		// we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way
+		filename, err := v.getConfigFile()
+		if err != nil {
+			log.Println("error:", err)
+			return
+		}
+
+		configFile := filepath.Clean(filename)
+		configDir, _ := filepath.Split(configFile)
+
+		done := make(chan bool)
+		go func() {
+			for {
+				select {
+				case event := <-watcher.Events:
+					// we only care about the config file
+					if filepath.Clean(event.Name) == configFile {
+						if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create {
+							err := v.ReadInConfig()
+							if err != nil {
+								log.Println("error:", err)
+							}
+							v.onConfigChange(event)
+						}
+					}
+				case err := <-watcher.Errors:
+					log.Println("error:", err)
+				}
+			}
+		}()
+
+		watcher.Add(configDir)
+		<-done
+	}()
+}
+
+// SetConfigFile explicitly defines the path, name and extension of the config file.
+// Viper will use this and not check any of the config paths.
+func SetConfigFile(in string) { v.SetConfigFile(in) }
+func (v *Viper) SetConfigFile(in string) {
+	if in != "" {
+		v.configFile = in
+	}
+}
+
+// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
+// E.g. if your prefix is "spf", the env registry will look for env
+// variables that start with "SPF_".
+func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
+func (v *Viper) SetEnvPrefix(in string) {
+	if in != "" {
+		v.envPrefix = in
+	}
+}
+
+func (v *Viper) mergeWithEnvPrefix(in string) string {
+	if v.envPrefix != "" {
+		return strings.ToUpper(v.envPrefix + "_" + in)
+	}
+
+	return strings.ToUpper(in)
+}
+
+// TODO: should getEnv logic be moved into find(). Can generalize the use of
+// rewriting keys many things, Ex: Get('someKey') -> some_key
+// (camel case to snake case for JSON keys perhaps)
+
+// getEnv is a wrapper around os.Getenv which replaces characters in the original
+// key. This allows env vars which have different keys than the config object
+// keys.
+func (v *Viper) getEnv(key string) string {
+	if v.envKeyReplacer != nil {
+		key = v.envKeyReplacer.Replace(key)
+	}
+	return os.Getenv(key)
+}
+
+// ConfigFileUsed returns the file used to populate the config registry.
+func ConfigFileUsed() string            { return v.ConfigFileUsed() }
+func (v *Viper) ConfigFileUsed() string { return v.configFile }
+
+// AddConfigPath adds a path for Viper to search for the config file in.
+// Can be called multiple times to define multiple search paths.
+func AddConfigPath(in string) { v.AddConfigPath(in) }
+func (v *Viper) AddConfigPath(in string) {
+	if in != "" {
+		absin := absPathify(in)
+		jww.INFO.Println("adding", absin, "to paths to search")
+		if !stringInSlice(absin, v.configPaths) {
+			v.configPaths = append(v.configPaths, absin)
+		}
+	}
+}
+
+// AddRemoteProvider adds a remote configuration source.
+// Remote Providers are searched in the order they are added.
+// provider is a string value, "etcd" or "consul" are currently supported.
+// endpoint is the url.  etcd requires http://ip:port  consul requires ip:port
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+func AddRemoteProvider(provider, endpoint, path string) error {
+	return v.AddRemoteProvider(provider, endpoint, path)
+}
+func (v *Viper) AddRemoteProvider(provider, endpoint, path string) error {
+	if !stringInSlice(provider, SupportedRemoteProviders) {
+		return UnsupportedRemoteProviderError(provider)
+	}
+	if provider != "" && endpoint != "" {
+		jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+		rp := &defaultRemoteProvider{
+			endpoint: endpoint,
+			provider: provider,
+			path:     path,
+		}
+		if !v.providerPathExists(rp) {
+			v.remoteProviders = append(v.remoteProviders, rp)
+		}
+	}
+	return nil
+}
+
+// AddSecureRemoteProvider adds a remote configuration source.
+// Secure Remote Providers are searched in the order they are added.
+// provider is a string value, "etcd" or "consul" are currently supported.
+// endpoint is the url.  etcd requires http://ip:port  consul requires ip:port
+// secretkeyring is the filepath to your openpgp secret keyring.  e.g. /etc/secrets/myring.gpg
+// path is the path in the k/v store to retrieve configuration
+// To retrieve a config file called myapp.json from /configs/myapp.json
+// you should set path to /configs and set config name (SetConfigName()) to
+// "myapp"
+// Secure Remote Providers are implemented with github.com/xordataexchange/crypt
+func AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+	return v.AddSecureRemoteProvider(provider, endpoint, path, secretkeyring)
+}
+
+func (v *Viper) AddSecureRemoteProvider(provider, endpoint, path, secretkeyring string) error {
+	if !stringInSlice(provider, SupportedRemoteProviders) {
+		return UnsupportedRemoteProviderError(provider)
+	}
+	if provider != "" && endpoint != "" {
+		jww.INFO.Printf("adding %s:%s to remote provider list", provider, endpoint)
+		rp := &defaultRemoteProvider{
+			endpoint:      endpoint,
+			provider:      provider,
+			path:          path,
+			secretKeyring: secretkeyring,
+		}
+		if !v.providerPathExists(rp) {
+			v.remoteProviders = append(v.remoteProviders, rp)
+		}
+	}
+	return nil
+}
+
+func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool {
+	for _, y := range v.remoteProviders {
+		if reflect.DeepEqual(y, p) {
+			return true
+		}
+	}
+	return false
+}
+
+// searchMap recursively searches for a value for path in source map.
+// Returns nil if not found.
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} {
+	if len(path) == 0 {
+		return source
+	}
+
+	next, ok := source[path[0]]
+	if ok {
+		// Fast path
+		if len(path) == 1 {
+			return next
+		}
+
+		// Nested case
+		switch next.(type) {
+		case map[interface{}]interface{}:
+			return v.searchMap(cast.ToStringMap(next), path[1:])
+		case map[string]interface{}:
+			// Type assertion is safe here since it is only reached
+			// if the type of `next` is the same as the type being asserted
+			return v.searchMap(next.(map[string]interface{}), path[1:])
+		default:
+			// got a value but nested key expected, return "nil" for not found
+			return nil
+		}
+	}
+	return nil
+}
+
+// searchMapWithPathPrefixes recursively searches for a value for path in source map.
+//
+// While searchMap() considers each path element as a single map key, this
+// function searches for, and prioritizes, merged path elements.
+// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar"
+// is also defined, this latter value is returned for path ["foo", "bar"].
+//
+// This should be useful only at config level (other maps may not contain dots
+// in their keys).
+//
+// Note: This assumes that the path entries and map keys are lower cased.
+func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} {
+	if len(path) == 0 {
+		return source
+	}
+
+	// search for path prefixes, starting from the longest one
+	for i := len(path); i > 0; i-- {
+		prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim))
+
+		next, ok := source[prefixKey]
+		if ok {
+			// Fast path
+			if i == len(path) {
+				return next
+			}
+
+			// Nested case
+			var val interface{}
+			switch next.(type) {
+			case map[interface{}]interface{}:
+				val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:])
+			case map[string]interface{}:
+				// Type assertion is safe here since it is only reached
+				// if the type of `next` is the same as the type being asserted
+				val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:])
+			default:
+				// got a value but nested key expected, do nothing and look for next prefix
+			}
+			if val != nil {
+				return val
+			}
+		}
+	}
+
+	// not found
+	return nil
+}
+
+// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere
+// on its path in the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+//       "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string {
+	var parentVal interface{}
+	for i := 1; i < len(path); i++ {
+		parentVal = v.searchMap(m, path[0:i])
+		if parentVal == nil {
+			// not found, no need to add more path elements
+			return ""
+		}
+		switch parentVal.(type) {
+		case map[interface{}]interface{}:
+			continue
+		case map[string]interface{}:
+			continue
+		default:
+			// parentVal is a regular value which shadows "path"
+			return strings.Join(path[0:i], v.keyDelim)
+		}
+	}
+	return ""
+}
+
+// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere
+// in a sub-path of the map.
+// e.g., if "foo.bar" has a value in the given map, it “shadows”
+//       "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string {
+	// unify input map
+	var m map[string]interface{}
+	switch mi.(type) {
+	case map[string]string, map[string]FlagValue:
+		m = cast.ToStringMap(mi)
+	default:
+		return ""
+	}
+
+	// scan paths
+	var parentKey string
+	for i := 1; i < len(path); i++ {
+		parentKey = strings.Join(path[0:i], v.keyDelim)
+		if _, ok := m[parentKey]; ok {
+			return parentKey
+		}
+	}
+	return ""
+}
+
+// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere
+// in the environment, when automatic env is on.
+// e.g., if "foo.bar" has a value in the environment, it “shadows”
+//       "foo.bar.baz" in a lower-priority map
+func (v *Viper) isPathShadowedInAutoEnv(path []string) string {
+	var parentKey string
+	var val string
+	for i := 1; i < len(path); i++ {
+		parentKey = strings.Join(path[0:i], v.keyDelim)
+		if val = v.getEnv(v.mergeWithEnvPrefix(parentKey)); val != "" {
+			return parentKey
+		}
+	}
+	return ""
+}
+
+// SetTypeByDefaultValue enables or disables the inference of a key value's
+// type when the Get function is used based upon a key's default value as
+// opposed to the value returned based on the normal fetch logic.
+//
+// For example, if a key has a default value of []string{} and the same key
+// is set via an environment variable to "a b c", a call to the Get function
+// would return a string slice for the key if the key's type is inferred by
+// the default value and the Get function would return:
+//
+//   []string {"a", "b", "c"}
+//
+// Otherwise the Get function would return:
+//
+//   "a b c"
+func SetTypeByDefaultValue(enable bool) { v.SetTypeByDefaultValue(enable) }
+func (v *Viper) SetTypeByDefaultValue(enable bool) {
+	v.typeByDefValue = enable
+}
+
+// GetViper gets the global Viper instance.
+func GetViper() *Viper {
+	return v
+}
+
+// Get can retrieve any value given the key to use.
+// Get is case-insensitive for a key.
+// Get has the behavior of returning the value associated with the first
+// place from where it is set. Viper will check in the following order:
+// override, flag, env, config file, key/value store, default
+//
+// Get returns an interface. For a specific value use one of the Get____ methods.
+func Get(key string) interface{} { return v.Get(key) }
+func (v *Viper) Get(key string) interface{} {
+	lcaseKey := strings.ToLower(key)
+	val := v.find(lcaseKey)
+	if val == nil {
+		return nil
+	}
+
+	if v.typeByDefValue {
+		// TODO(bep) this branch isn't covered by a single test.
+		valType := val
+		path := strings.Split(lcaseKey, v.keyDelim)
+		defVal := v.searchMap(v.defaults, path)
+		if defVal != nil {
+			valType = defVal
+		}
+
+		switch valType.(type) {
+		case bool:
+			return cast.ToBool(val)
+		case string:
+			return cast.ToString(val)
+		case int64, int32, int16, int8, int:
+			return cast.ToInt(val)
+		case float64, float32:
+			return cast.ToFloat64(val)
+		case time.Time:
+			return cast.ToTime(val)
+		case time.Duration:
+			return cast.ToDuration(val)
+		case []string:
+			return cast.ToStringSlice(val)
+		}
+	}
+
+	return val
+}
+
+// Sub returns new Viper instance representing a sub tree of this instance.
+// Sub is case-insensitive for a key.
+func Sub(key string) *Viper { return v.Sub(key) }
+func (v *Viper) Sub(key string) *Viper {
+	subv := New()
+	data := v.Get(key)
+	if data == nil {
+		return nil
+	}
+
+	if reflect.TypeOf(data).Kind() == reflect.Map {
+		subv.config = cast.ToStringMap(data)
+		return subv
+	}
+	return nil
+}
+
+// GetString returns the value associated with the key as a string.
+func GetString(key string) string { return v.GetString(key) }
+func (v *Viper) GetString(key string) string {
+	return cast.ToString(v.Get(key))
+}
+
+// GetBool returns the value associated with the key as a boolean.
+func GetBool(key string) bool { return v.GetBool(key) }
+func (v *Viper) GetBool(key string) bool {
+	return cast.ToBool(v.Get(key))
+}
+
+// GetInt returns the value associated with the key as an integer.
+func GetInt(key string) int { return v.GetInt(key) }
+func (v *Viper) GetInt(key string) int {
+	return cast.ToInt(v.Get(key))
+}
+
+// GetInt64 returns the value associated with the key as an integer.
+func GetInt64(key string) int64 { return v.GetInt64(key) }
+func (v *Viper) GetInt64(key string) int64 {
+	return cast.ToInt64(v.Get(key))
+}
+
+// GetFloat64 returns the value associated with the key as a float64.
+func GetFloat64(key string) float64 { return v.GetFloat64(key) }
+func (v *Viper) GetFloat64(key string) float64 {
+	return cast.ToFloat64(v.Get(key))
+}
+
+// GetTime returns the value associated with the key as time.
+func GetTime(key string) time.Time { return v.GetTime(key) }
+func (v *Viper) GetTime(key string) time.Time {
+	return cast.ToTime(v.Get(key))
+}
+
+// GetDuration returns the value associated with the key as a duration.
+func GetDuration(key string) time.Duration { return v.GetDuration(key) }
+func (v *Viper) GetDuration(key string) time.Duration {
+	return cast.ToDuration(v.Get(key))
+}
+
+// GetStringSlice returns the value associated with the key as a slice of strings.
+func GetStringSlice(key string) []string { return v.GetStringSlice(key) }
+func (v *Viper) GetStringSlice(key string) []string {
+	return cast.ToStringSlice(v.Get(key))
+}
+
+// GetStringMap returns the value associated with the key as a map of interfaces.
+func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) }
+func (v *Viper) GetStringMap(key string) map[string]interface{} {
+	return cast.ToStringMap(v.Get(key))
+}
+
+// GetStringMapString returns the value associated with the key as a map of strings.
+func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) }
+func (v *Viper) GetStringMapString(key string) map[string]string {
+	return cast.ToStringMapString(v.Get(key))
+}
+
+// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings.
+func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) }
+func (v *Viper) GetStringMapStringSlice(key string) map[string][]string {
+	return cast.ToStringMapStringSlice(v.Get(key))
+}
+
+// GetSizeInBytes returns the size of the value associated with the given key
+// in bytes.
+func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) }
+func (v *Viper) GetSizeInBytes(key string) uint {
+	sizeStr := cast.ToString(v.Get(key))
+	return parseSizeInBytes(sizeStr)
+}
+
+// UnmarshalKey takes a single key and unmarshals it into a Struct.
+func UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal) }
+func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error {
+	err := decode(v.Get(key), defaultDecoderConfig(rawVal))
+
+	if err != nil {
+		return err
+	}
+
+	v.insensitiviseMaps()
+
+	return nil
+}
+
+// Unmarshal unmarshals the config into a Struct. Make sure that the tags
+// on the fields of the structure are properly set.
+func Unmarshal(rawVal interface{}) error { return v.Unmarshal(rawVal) }
+func (v *Viper) Unmarshal(rawVal interface{}) error {
+	err := decode(v.AllSettings(), defaultDecoderConfig(rawVal))
+
+	if err != nil {
+		return err
+	}
+
+	v.insensitiviseMaps()
+
+	return nil
+}
+
+// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
+// of time.Duration values & string slices
+func defaultDecoderConfig(output interface{}) *mapstructure.DecoderConfig {
+	return &mapstructure.DecoderConfig{
+		Metadata:         nil,
+		Result:           output,
+		WeaklyTypedInput: true,
+		DecodeHook: mapstructure.ComposeDecodeHookFunc(
+			mapstructure.StringToTimeDurationHookFunc(),
+			mapstructure.StringToSliceHookFunc(","),
+		),
+	}
+}
+
+// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality
+func decode(input interface{}, config *mapstructure.DecoderConfig) error {
+	decoder, err := mapstructure.NewDecoder(config)
+	if err != nil {
+		return err
+	}
+	return decoder.Decode(input)
+}
+
+// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent
+// in the destination struct.
+func (v *Viper) UnmarshalExact(rawVal interface{}) error {
+	config := defaultDecoderConfig(rawVal)
+	config.ErrorUnused = true
+
+	err := decode(v.AllSettings(), config)
+
+	if err != nil {
+		return err
+	}
+
+	v.insensitiviseMaps()
+
+	return nil
+}
+
+// BindPFlags binds a full flag set to the configuration, using each flag's long
+// name as the config key.
+func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) }
+func (v *Viper) BindPFlags(flags *pflag.FlagSet) error {
+	return v.BindFlagValues(pflagValueSet{flags})
+}
+
+// BindPFlag binds a specific key to a pflag (as used by cobra).
+// Example (where serverCmd is a Cobra instance):
+//
+//	 serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+//	 Viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
+//
+func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) }
+func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error {
+	return v.BindFlagValue(key, pflagValue{flag})
+}
+
+// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long
+// name as the config key.
+func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) }
+func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
+	flags.VisitAll(func(flag FlagValue) {
+		if err = v.BindFlagValue(flag.Name(), flag); err != nil {
+			return
+		}
+	})
+	return nil
+}
+
+// BindFlagValue binds a specific key to a FlagValue.
+// Example (where serverCmd is a Cobra instance):
+//
+//	 serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
+//	 Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port"))
+//
+func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) }
+func (v *Viper) BindFlagValue(key string, flag FlagValue) error {
+	if flag == nil {
+		return fmt.Errorf("flag for %q is nil", key)
+	}
+	v.pflags[strings.ToLower(key)] = flag
+	return nil
+}
+
+// BindEnv binds a Viper key to a ENV variable.
+// ENV variables are case sensitive.
+// If only a key is provided, it will use the env key matching the key, uppercased.
+// EnvPrefix will be used when set when env name is not provided.
+func BindEnv(input ...string) error { return v.BindEnv(input...) }
+func (v *Viper) BindEnv(input ...string) error {
+	var key, envkey string
+	if len(input) == 0 {
+		return fmt.Errorf("BindEnv missing key to bind to")
+	}
+
+	key = strings.ToLower(input[0])
+
+	if len(input) == 1 {
+		envkey = v.mergeWithEnvPrefix(key)
+	} else {
+		envkey = input[1]
+	}
+
+	v.env[key] = envkey
+
+	return nil
+}
+
+// Given a key, find the value.
+// Viper will check in the following order:
+// flag, env, config file, key/value store, default.
+// Viper will check to see if an alias exists first.
+// Note: this assumes a lower-cased key given.
+func (v *Viper) find(lcaseKey string) interface{} {
+
+	var (
+		val    interface{}
+		exists bool
+		path   = strings.Split(lcaseKey, v.keyDelim)
+		nested = len(path) > 1
+	)
+
+	// compute the path through the nested maps to the nested value
+	if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" {
+		return nil
+	}
+
+	// if the requested key is an alias, then return the proper key
+	lcaseKey = v.realKey(lcaseKey)
+	path = strings.Split(lcaseKey, v.keyDelim)
+	nested = len(path) > 1
+
+	// Set() override first
+	val = v.searchMap(v.override, path)
+	if val != nil {
+		return val
+	}
+	if nested && v.isPathShadowedInDeepMap(path, v.override) != "" {
+		return nil
+	}
+
+	// PFlag override next
+	flag, exists := v.pflags[lcaseKey]
+	if exists && flag.HasChanged() {
+		switch flag.ValueType() {
+		case "int", "int8", "int16", "int32", "int64":
+			return cast.ToInt(flag.ValueString())
+		case "bool":
+			return cast.ToBool(flag.ValueString())
+		case "stringSlice":
+			s := strings.TrimPrefix(flag.ValueString(), "[")
+			s = strings.TrimSuffix(s, "]")
+			res, _ := readAsCSV(s)
+			return res
+		default:
+			return flag.ValueString()
+		}
+	}
+	if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" {
+		return nil
+	}
+
+	// Env override next
+	if v.automaticEnvApplied {
+		// even if it hasn't been registered, if automaticEnv is used,
+		// check any Get request
+		if val = v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); val != "" {
+			return val
+		}
+		if nested && v.isPathShadowedInAutoEnv(path) != "" {
+			return nil
+		}
+	}
+	envkey, exists := v.env[lcaseKey]
+	if exists {
+		if val = v.getEnv(envkey); val != "" {
+			return val
+		}
+	}
+	if nested && v.isPathShadowedInFlatMap(path, v.env) != "" {
+		return nil
+	}
+
+	// Config file next
+	val = v.searchMapWithPathPrefixes(v.config, path)
+	if val != nil {
+		return val
+	}
+	if nested && v.isPathShadowedInDeepMap(path, v.config) != "" {
+		return nil
+	}
+
+	// K/V store next
+	val = v.searchMap(v.kvstore, path)
+	if val != nil {
+		return val
+	}
+	if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" {
+		return nil
+	}
+
+	// Default next
+	val = v.searchMap(v.defaults, path)
+	if val != nil {
+		return val
+	}
+	if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" {
+		return nil
+	}
+
+	// last chance: if no other value is returned and a flag does exist for the value,
+	// get the flag's value even if the flag's value has not changed
+	if flag, exists := v.pflags[lcaseKey]; exists {
+		switch flag.ValueType() {
+		case "int", "int8", "int16", "int32", "int64":
+			return cast.ToInt(flag.ValueString())
+		case "bool":
+			return cast.ToBool(flag.ValueString())
+		case "stringSlice":
+			s := strings.TrimPrefix(flag.ValueString(), "[")
+			s = strings.TrimSuffix(s, "]")
+			res, _ := readAsCSV(s)
+			return res
+		default:
+			return flag.ValueString()
+		}
+	}
+	// last item, no need to check shadowing
+
+	return nil
+}
+
+func readAsCSV(val string) ([]string, error) {
+	if val == "" {
+		return []string{}, nil
+	}
+	stringReader := strings.NewReader(val)
+	csvReader := csv.NewReader(stringReader)
+	return csvReader.Read()
+}
+
+// IsSet checks to see if the key has been set in any of the data locations.
+// IsSet is case-insensitive for a key.
+func IsSet(key string) bool { return v.IsSet(key) }
+func (v *Viper) IsSet(key string) bool {
+	lcaseKey := strings.ToLower(key)
+	val := v.find(lcaseKey)
+	return val != nil
+}
+
+// AutomaticEnv has Viper check ENV variables for all.
+// keys set in config, default & flags
+func AutomaticEnv() { v.AutomaticEnv() }
+func (v *Viper) AutomaticEnv() {
+	v.automaticEnvApplied = true
+}
+
+// SetEnvKeyReplacer sets the strings.Replacer on the viper object
+// Useful for mapping an environmental variable to a key that does
+// not match it.
+func SetEnvKeyReplacer(r *strings.Replacer) { v.SetEnvKeyReplacer(r) }
+func (v *Viper) SetEnvKeyReplacer(r *strings.Replacer) {
+	v.envKeyReplacer = r
+}
+
+// Aliases provide another accessor for the same key.
+// This enables one to change a name without breaking the application
+func RegisterAlias(alias string, key string) { v.RegisterAlias(alias, key) }
+func (v *Viper) RegisterAlias(alias string, key string) {
+	v.registerAlias(alias, strings.ToLower(key))
+}
+
+func (v *Viper) registerAlias(alias string, key string) {
+	alias = strings.ToLower(alias)
+	if alias != key && alias != v.realKey(key) {
+		_, exists := v.aliases[alias]
+
+		if !exists {
+			// if we alias something that exists in one of the maps to another
+			// name, we'll never be able to get that value using the original
+			// name, so move the config value to the new realkey.
+			if val, ok := v.config[alias]; ok {
+				delete(v.config, alias)
+				v.config[key] = val
+			}
+			if val, ok := v.kvstore[alias]; ok {
+				delete(v.kvstore, alias)
+				v.kvstore[key] = val
+			}
+			if val, ok := v.defaults[alias]; ok {
+				delete(v.defaults, alias)
+				v.defaults[key] = val
+			}
+			if val, ok := v.override[alias]; ok {
+				delete(v.override, alias)
+				v.override[key] = val
+			}
+			v.aliases[alias] = key
+		}
+	} else {
+		jww.WARN.Println("Creating circular reference alias", alias, key, v.realKey(key))
+	}
+}
+
+func (v *Viper) realKey(key string) string {
+	newkey, exists := v.aliases[key]
+	if exists {
+		jww.DEBUG.Println("Alias", key, "to", newkey)
+		return v.realKey(newkey)
+	}
+	return key
+}
+
+// InConfig checks to see if the given key (or an alias) is in the config file.
+func InConfig(key string) bool { return v.InConfig(key) }
+func (v *Viper) InConfig(key string) bool {
+	// if the requested key is an alias, then return the proper key
+	key = v.realKey(key)
+
+	_, exists := v.config[key]
+	return exists
+}
+
+// SetDefault sets the default value for this key.
+// SetDefault is case-insensitive for a key.
+// Default only used when no value is provided by the user via flag, config or ENV.
+func SetDefault(key string, value interface{}) { v.SetDefault(key, value) }
+func (v *Viper) SetDefault(key string, value interface{}) {
+	// If alias passed in, then set the proper default
+	key = v.realKey(strings.ToLower(key))
+	value = toCaseInsensitiveValue(value)
+
+	path := strings.Split(key, v.keyDelim)
+	lastKey := strings.ToLower(path[len(path)-1])
+	deepestMap := deepSearch(v.defaults, path[0:len(path)-1])
+
+	// set innermost value
+	deepestMap[lastKey] = value
+}
+
+// Set sets the value for the key in the override regiser.
+// Set is case-insensitive for a key.
+// Will be used instead of values obtained via
+// flags, config file, ENV, default, or key/value store.
+func Set(key string, value interface{}) { v.Set(key, value) }
+func (v *Viper) Set(key string, value interface{}) {
+	// If alias passed in, then set the proper override
+	key = v.realKey(strings.ToLower(key))
+	value = toCaseInsensitiveValue(value)
+
+	path := strings.Split(key, v.keyDelim)
+	lastKey := strings.ToLower(path[len(path)-1])
+	deepestMap := deepSearch(v.override, path[0:len(path)-1])
+
+	// set innermost value
+	deepestMap[lastKey] = value
+}
+
+// ReadInConfig will discover and load the configuration file from disk
+// and key/value stores, searching in one of the defined paths.
+func ReadInConfig() error { return v.ReadInConfig() }
+func (v *Viper) ReadInConfig() error {
+	jww.INFO.Println("Attempting to read in config file")
+	filename, err := v.getConfigFile()
+	if err != nil {
+		return err
+	}
+
+	if !stringInSlice(v.getConfigType(), SupportedExts) {
+		return UnsupportedConfigError(v.getConfigType())
+	}
+
+	file, err := afero.ReadFile(v.fs, filename)
+	if err != nil {
+		return err
+	}
+
+	config := make(map[string]interface{})
+
+	err = v.unmarshalReader(bytes.NewReader(file), config)
+	if err != nil {
+		return err
+	}
+
+	v.config = config
+	return nil
+}
+
+// MergeInConfig merges a new configuration with an existing config.
+func MergeInConfig() error { return v.MergeInConfig() }
+func (v *Viper) MergeInConfig() error {
+	jww.INFO.Println("Attempting to merge in config file")
+	filename, err := v.getConfigFile()
+	if err != nil {
+		return err
+	}
+
+	if !stringInSlice(v.getConfigType(), SupportedExts) {
+		return UnsupportedConfigError(v.getConfigType())
+	}
+
+	file, err := afero.ReadFile(v.fs, filename)
+	if err != nil {
+		return err
+	}
+
+	return v.MergeConfig(bytes.NewReader(file))
+}
+
+// ReadConfig will read a configuration file, setting existing keys to nil if the
+// key does not exist in the file.
+func ReadConfig(in io.Reader) error { return v.ReadConfig(in) }
+func (v *Viper) ReadConfig(in io.Reader) error {
+	v.config = make(map[string]interface{})
+	return v.unmarshalReader(in, v.config)
+}
+
+// MergeConfig merges a new configuration with an existing config.
+func MergeConfig(in io.Reader) error { return v.MergeConfig(in) }
+func (v *Viper) MergeConfig(in io.Reader) error {
+	if v.config == nil {
+		v.config = make(map[string]interface{})
+	}
+	cfg := make(map[string]interface{})
+	if err := v.unmarshalReader(in, cfg); err != nil {
+		return err
+	}
+	mergeMaps(cfg, v.config, nil)
+	return nil
+}
+
+func keyExists(k string, m map[string]interface{}) string {
+	lk := strings.ToLower(k)
+	for mk := range m {
+		lmk := strings.ToLower(mk)
+		if lmk == lk {
+			return mk
+		}
+	}
+	return ""
+}
+
+func castToMapStringInterface(
+	src map[interface{}]interface{}) map[string]interface{} {
+	tgt := map[string]interface{}{}
+	for k, v := range src {
+		tgt[fmt.Sprintf("%v", k)] = v
+	}
+	return tgt
+}
+
+func castMapStringToMapInterface(src map[string]string) map[string]interface{} {
+	tgt := map[string]interface{}{}
+	for k, v := range src {
+		tgt[k] = v
+	}
+	return tgt
+}
+
+func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} {
+	tgt := map[string]interface{}{}
+	for k, v := range src {
+		tgt[k] = v
+	}
+	return tgt
+}
+
+// mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's
+// insistence on parsing nested structures as `map[interface{}]interface{}`
+// instead of using a `string` as the key for nest structures beyond one level
+// deep. Both map types are supported as there is a go-yaml fork that uses
+// `map[string]interface{}` instead.
+func mergeMaps(
+	src, tgt map[string]interface{}, itgt map[interface{}]interface{}) {
+	for sk, sv := range src {
+		tk := keyExists(sk, tgt)
+		if tk == "" {
+			jww.TRACE.Printf("tk=\"\", tgt[%s]=%v", sk, sv)
+			tgt[sk] = sv
+			if itgt != nil {
+				itgt[sk] = sv
+			}
+			continue
+		}
+
+		tv, ok := tgt[tk]
+		if !ok {
+			jww.TRACE.Printf("tgt[%s] != ok, tgt[%s]=%v", tk, sk, sv)
+			tgt[sk] = sv
+			if itgt != nil {
+				itgt[sk] = sv
+			}
+			continue
+		}
+
+		svType := reflect.TypeOf(sv)
+		tvType := reflect.TypeOf(tv)
+		if svType != tvType {
+			jww.ERROR.Printf(
+				"svType != tvType; key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+				sk, svType, tvType, sv, tv)
+			continue
+		}
+
+		jww.TRACE.Printf("processing key=%s, st=%v, tt=%v, sv=%v, tv=%v",
+			sk, svType, tvType, sv, tv)
+
+		switch ttv := tv.(type) {
+		case map[interface{}]interface{}:
+			jww.TRACE.Printf("merging maps (must convert)")
+			tsv := sv.(map[interface{}]interface{})
+			ssv := castToMapStringInterface(tsv)
+			stv := castToMapStringInterface(ttv)
+			mergeMaps(ssv, stv, ttv)
+		case map[string]interface{}:
+			jww.TRACE.Printf("merging maps")
+			mergeMaps(sv.(map[string]interface{}), ttv, nil)
+		default:
+			jww.TRACE.Printf("setting value")
+			tgt[tk] = sv
+			if itgt != nil {
+				itgt[tk] = sv
+			}
+		}
+	}
+}
+
+// ReadRemoteConfig attempts to get configuration from a remote source
+// and read it in the remote configuration registry.
+func ReadRemoteConfig() error { return v.ReadRemoteConfig() }
+func (v *Viper) ReadRemoteConfig() error {
+	return v.getKeyValueConfig()
+}
+
+func WatchRemoteConfig() error { return v.WatchRemoteConfig() }
+func (v *Viper) WatchRemoteConfig() error {
+	return v.watchKeyValueConfig()
+}
+
+func (v *Viper) WatchRemoteConfigOnChannel() error {
+	return v.watchKeyValueConfigOnChannel()
+}
+
+// Unmarshal a Reader into a map.
+// Should probably be an unexported function.
+func unmarshalReader(in io.Reader, c map[string]interface{}) error {
+	return v.unmarshalReader(in, c)
+}
+
+func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error {
+	return unmarshallConfigReader(in, c, v.getConfigType())
+}
+
+func (v *Viper) insensitiviseMaps() {
+	insensitiviseMap(v.config)
+	insensitiviseMap(v.defaults)
+	insensitiviseMap(v.override)
+	insensitiviseMap(v.kvstore)
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) getKeyValueConfig() error {
+	if RemoteConfig == nil {
+		return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'")
+	}
+
+	for _, rp := range v.remoteProviders {
+		val, err := v.getRemoteConfig(rp)
+		if err != nil {
+			continue
+		}
+		v.kvstore = val
+		return nil
+	}
+	return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+	reader, err := RemoteConfig.Get(provider)
+	if err != nil {
+		return nil, err
+	}
+	err = v.unmarshalReader(reader, v.kvstore)
+	return v.kvstore, err
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfigOnChannel() error {
+	for _, rp := range v.remoteProviders {
+		respc, _ := RemoteConfig.WatchChannel(rp)
+		//Todo: Add quit channel
+		go func(rc <-chan *RemoteResponse) {
+			for {
+				b := <-rc
+				reader := bytes.NewReader(b.Value)
+				v.unmarshalReader(reader, v.kvstore)
+			}
+		}(respc)
+		return nil
+	}
+	return RemoteConfigError("No Files Found")
+}
+
+// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfig() error {
+	for _, rp := range v.remoteProviders {
+		val, err := v.watchRemoteConfig(rp)
+		if err != nil {
+			continue
+		}
+		v.kvstore = val
+		return nil
+	}
+	return RemoteConfigError("No Files Found")
+}
+
+func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) {
+	reader, err := RemoteConfig.Watch(provider)
+	if err != nil {
+		return nil, err
+	}
+	err = v.unmarshalReader(reader, v.kvstore)
+	return v.kvstore, err
+}
+
+// AllKeys returns all keys holding a value, regardless of where they are set.
+// Nested keys are returned with a v.keyDelim (= ".") separator
+func AllKeys() []string { return v.AllKeys() }
+func (v *Viper) AllKeys() []string {
+	m := map[string]bool{}
+	// add all paths, by order of descending priority to ensure correct shadowing
+	m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "")
+	m = v.flattenAndMergeMap(m, v.override, "")
+	m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags))
+	m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env))
+	m = v.flattenAndMergeMap(m, v.config, "")
+	m = v.flattenAndMergeMap(m, v.kvstore, "")
+	m = v.flattenAndMergeMap(m, v.defaults, "")
+
+	// convert set of paths to list
+	a := []string{}
+	for x := range m {
+		a = append(a, x)
+	}
+	return a
+}
+
+// flattenAndMergeMap recursively flattens the given map into a map[string]bool
+// of key paths (used as a set, easier to manipulate than a []string):
+// - each path is merged into a single key string, delimited with v.keyDelim (= ".")
+// - if a path is shadowed by an earlier value in the initial shadow map,
+//   it is skipped.
+// The resulting set of paths is merged to the given shadow set at the same time.
+func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool {
+	if shadow != nil && prefix != "" && shadow[prefix] {
+		// prefix is shadowed => nothing more to flatten
+		return shadow
+	}
+	if shadow == nil {
+		shadow = make(map[string]bool)
+	}
+
+	var m2 map[string]interface{}
+	if prefix != "" {
+		prefix += v.keyDelim
+	}
+	for k, val := range m {
+		fullKey := prefix + k
+		switch val.(type) {
+		case map[string]interface{}:
+			m2 = val.(map[string]interface{})
+		case map[interface{}]interface{}:
+			m2 = cast.ToStringMap(val)
+		default:
+			// immediate value
+			shadow[strings.ToLower(fullKey)] = true
+			continue
+		}
+		// recursively merge to shadow map
+		shadow = v.flattenAndMergeMap(shadow, m2, fullKey)
+	}
+	return shadow
+}
+
+// mergeFlatMap merges the given maps, excluding values of the second map
+// shadowed by values from the first map.
+func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool {
+	// scan keys
+outer:
+	for k, _ := range m {
+		path := strings.Split(k, v.keyDelim)
+		// scan intermediate paths
+		var parentKey string
+		for i := 1; i < len(path); i++ {
+			parentKey = strings.Join(path[0:i], v.keyDelim)
+			if shadow[parentKey] {
+				// path is shadowed, continue
+				continue outer
+			}
+		}
+		// add key
+		shadow[strings.ToLower(k)] = true
+	}
+	return shadow
+}
+
+// AllSettings merges all settings and returns them as a map[string]interface{}.
+func AllSettings() map[string]interface{} { return v.AllSettings() }
+func (v *Viper) AllSettings() map[string]interface{} {
+	m := map[string]interface{}{}
+	// start from the list of keys, and construct the map one value at a time
+	for _, k := range v.AllKeys() {
+		value := v.Get(k)
+		if value == nil {
+			// should not happen, since AllKeys() returns only keys holding a value,
+			// check just in case anything changes
+			continue
+		}
+		path := strings.Split(k, v.keyDelim)
+		lastKey := strings.ToLower(path[len(path)-1])
+		deepestMap := deepSearch(m, path[0:len(path)-1])
+		// set innermost value
+		deepestMap[lastKey] = value
+	}
+	return m
+}
+
+// SetFs sets the filesystem to use to read configuration.
+func SetFs(fs afero.Fs) { v.SetFs(fs) }
+func (v *Viper) SetFs(fs afero.Fs) {
+	v.fs = fs
+}
+
+// SetConfigName sets name for the config file.
+// Does not include extension.
+func SetConfigName(in string) { v.SetConfigName(in) }
+func (v *Viper) SetConfigName(in string) {
+	if in != "" {
+		v.configName = in
+		v.configFile = ""
+	}
+}
+
+// SetConfigType sets the type of the configuration returned by the
+// remote source, e.g. "json".
+func SetConfigType(in string) { v.SetConfigType(in) }
+func (v *Viper) SetConfigType(in string) {
+	if in != "" {
+		v.configType = in
+	}
+}
+
+func (v *Viper) getConfigType() string {
+	if v.configType != "" {
+		return v.configType
+	}
+
+	cf, err := v.getConfigFile()
+	if err != nil {
+		return ""
+	}
+
+	ext := filepath.Ext(cf)
+
+	if len(ext) > 1 {
+		return ext[1:]
+	}
+
+	return ""
+}
+
+func (v *Viper) getConfigFile() (string, error) {
+	// if explicitly set, then use it
+	if v.configFile != "" {
+		return v.configFile, nil
+	}
+
+	cf, err := v.findConfigFile()
+	if err != nil {
+		return "", err
+	}
+
+	v.configFile = cf
+	return v.getConfigFile()
+}
+
+func (v *Viper) searchInPath(in string) (filename string) {
+	jww.DEBUG.Println("Searching for config in ", in)
+	for _, ext := range SupportedExts {
+		jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext))
+		if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b {
+			jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext))
+			return filepath.Join(in, v.configName+"."+ext)
+		}
+	}
+
+	return ""
+}
+
+// Search all configPaths for any config file.
+// Returns the first path that exists (and is a config file).
+func (v *Viper) findConfigFile() (string, error) {
+	jww.INFO.Println("Searching for config in ", v.configPaths)
+
+	for _, cp := range v.configPaths {
+		file := v.searchInPath(cp)
+		if file != "" {
+			return file, nil
+		}
+	}
+	return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)}
+}
+
+// Debug prints all configuration registries for debugging
+// purposes.
+func Debug() { v.Debug() }
+func (v *Viper) Debug() {
+	fmt.Printf("Aliases:\n%#v\n", v.aliases)
+	fmt.Printf("Override:\n%#v\n", v.override)
+	fmt.Printf("PFlags:\n%#v\n", v.pflags)
+	fmt.Printf("Env:\n%#v\n", v.env)
+	fmt.Printf("Key/Value Store:\n%#v\n", v.kvstore)
+	fmt.Printf("Config:\n%#v\n", v.config)
+	fmt.Printf("Defaults:\n%#v\n", v.defaults)
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 269b9c5ffb2140916e6b572187637ad55514db1f..9349165d5a63dd8aa155d4d90a3d55b1c2b8ae66 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -204,6 +204,12 @@
 			"revision": "d1e51a4af19092715f4ce7d8257fe5bc8f8be727",
 			"revisionTime": "2015-09-09T17:09:13Z"
 		},
+		{
+			"checksumSHA1": "x2Km0Qy3WgJJnV19Zv25VwTJcBM=",
+			"path": "github.com/fsnotify/fsnotify",
+			"revision": "4da3e2cfbabc9f751898f250b49f2439785783a1",
+			"revisionTime": "2017-03-29T04:21:07Z"
+		},
 		{
 			"checksumSHA1": "aZgc99rAVaEA9gYf6D4n1iF8oHs=",
 			"path": "github.com/gogo/protobuf/jsonpb",
@@ -270,18 +276,90 @@
 			"revision": "a45d81b686e85d01f2838439deaf72126ccd5a96",
 			"revisionTime": "2015-06-17T15:18:51Z"
 		},
+		{
+			"checksumSHA1": "HtpYAWHvd9mq+mHkpo7z8PGzMik=",
+			"path": "github.com/hashicorp/hcl",
+			"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
+			"revisionTime": "2017-10-17T18:19:29Z"
+		},
+		{
+			"checksumSHA1": "XQmjDva9JCGGkIecOgwtBEMCJhU=",
+			"path": "github.com/hashicorp/hcl/hcl/ast",
+			"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
+			"revisionTime": "2017-10-17T18:19:29Z"
+		},
+		{
+			"checksumSHA1": "/15SVLnCDzxICSatuYbfctrcpSM=",
+			"path": "github.com/hashicorp/hcl/hcl/parser",
+			"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
+			"revisionTime": "2017-10-17T18:19:29Z"
+		},
+		{
+			"checksumSHA1": "PYDzRc61T0pbwWuLNHgBRp/gJII=",
+			"path": "github.com/hashicorp/hcl/hcl/scanner",
+			"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
+			"revisionTime": "2017-10-17T18:19:29Z"
+		},
+		{
+			"checksumSHA1": "oS3SCN9Wd6D8/LG0Yx1fu84a7gI=",
+			"path": "github.com/hashicorp/hcl/hcl/strconv",
+			"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
+			"revisionTime": "2017-10-17T18:19:29Z"
+		},
+		{
+			"checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=",
+			"path": "github.com/hashicorp/hcl/hcl/token",
+			"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
+			"revisionTime": "2017-10-17T18:19:29Z"
+		},
+		{
+			"checksumSHA1": "PwlfXt7mFS8UYzWxOK5DOq0yxS0=",
+			"path": "github.com/hashicorp/hcl/json/parser",
+			"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
+			"revisionTime": "2017-10-17T18:19:29Z"
+		},
+		{
+			"checksumSHA1": "afrZ8VmAwfTdDAYVgNSXbxa4GsA=",
+			"path": "github.com/hashicorp/hcl/json/scanner",
+			"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
+			"revisionTime": "2017-10-17T18:19:29Z"
+		},
+		{
+			"checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=",
+			"path": "github.com/hashicorp/hcl/json/token",
+			"revision": "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8",
+			"revisionTime": "2017-10-17T18:19:29Z"
+		},
+		{
+			"checksumSHA1": "8ae1DyNE/yY9NvY3PmvtQdLBJnc=",
+			"path": "github.com/magiconair/properties",
+			"revision": "49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934",
+			"revisionTime": "2017-10-31T21:05:36Z"
+		},
 		{
 			"checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=",
 			"path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
 			"revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c",
 			"revisionTime": "2016-04-24T11:30:07Z"
 		},
+		{
+			"checksumSHA1": "gILp4IL+xwXLH6tJtRLrnZ56F24=",
+			"path": "github.com/mitchellh/mapstructure",
+			"revision": "06020f85339e21b2478f756a78e295255ffa4d6a",
+			"revisionTime": "2017-10-17T17:18:08Z"
+		},
 		{
 			"checksumSHA1": "i5DYjDaR52hXzynKeIW6RZY/4yI=",
 			"path": "github.com/paulmach/go.geojson",
 			"revision": "40612a87147b9cf5cd0e397ced54d89e623647ff",
 			"revisionTime": "2017-03-27T17:05:36Z"
 		},
+		{
+			"checksumSHA1": "pQwCl21+SANhotaqy5iEdqOnQiY=",
+			"path": "github.com/pelletier/go-toml",
+			"revision": "4e9e0ee19b60b13eb79915933f44d8ed5f268bdd",
+			"revisionTime": "2017-10-24T21:10:38Z"
+		},
 		{
 			"checksumSHA1": "Hky3u+8Rqum+wB5BHMj0A8ZmT4g=",
 			"path": "github.com/pkg/errors",
@@ -342,18 +420,48 @@
 			"revision": "e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2",
 			"revisionTime": "2017-07-03T10:12:42Z"
 		},
+		{
+			"checksumSHA1": "dW6L6oTOv4XfIahhwNzxb2Qu9to=",
+			"path": "github.com/spf13/afero",
+			"revision": "8d919cbe7e2627e417f3e45c3c0e489a5b7e2536",
+			"revisionTime": "2017-11-12T16:05:09Z"
+		},
+		{
+			"checksumSHA1": "ukvZdZw51B3QlWiWFmq8cLXIxQI=",
+			"path": "github.com/spf13/afero/mem",
+			"revision": "8d919cbe7e2627e417f3e45c3c0e489a5b7e2536",
+			"revisionTime": "2017-11-12T16:05:09Z"
+		},
+		{
+			"checksumSHA1": "Sq0QP4JywTr7UM4hTK1cjCi7jec=",
+			"path": "github.com/spf13/cast",
+			"revision": "acbeb36b902d72a7a4c18e8f3241075e7ab763e4",
+			"revisionTime": "2017-04-13T08:50:28Z"
+		},
 		{
 			"checksumSHA1": "Egby8wsHgh+rW7DphTy/b1dN2cs=",
 			"path": "github.com/spf13/cobra",
 			"revision": "b3426bbac13d7110c4d8fce456ea0012f79f3b8b",
 			"revisionTime": "2017-10-29T21:09:45Z"
 		},
+		{
+			"checksumSHA1": "suLj1G8Vd//a/a3sUEKz/ROalz0=",
+			"path": "github.com/spf13/jwalterweatherman",
+			"revision": "12bd96e66386c1960ab0f74ced1362f66f552f7b",
+			"revisionTime": "2017-09-01T15:06:07Z"
+		},
 		{
 			"checksumSHA1": "fKq6NiaqP3DFxnCRF5mmpJWTSUA=",
 			"path": "github.com/spf13/pflag",
 			"revision": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea",
 			"revisionTime": "2017-11-06T14:28:49Z"
 		},
+		{
+			"checksumSHA1": "zjfAVm+q8p9EbxPzjaZxGoJUc4M=",
+			"path": "github.com/spf13/viper",
+			"revision": "4dddf7c62e16bce5807744018f5b753bfe21bbd2",
+			"revisionTime": "2017-11-09T20:57:16Z"
+		},
 		{
 			"checksumSHA1": "Q2V7Zs3diLmLfmfbiuLpSxETSuY=",
 			"path": "github.com/stretchr/testify/assert",
diff --git a/wiki/content/deploy/index.md b/wiki/content/deploy/index.md
index ea4e0caa03097e579a50d25af572c12b2c846019..9ece82bbb3e06963a417638d9b420ec1664b3c6b 100644
--- a/wiki/content/deploy/index.md
+++ b/wiki/content/deploy/index.md
@@ -747,48 +747,49 @@ to see useful information, like the following:
 * `/removeNode?id=idx&group=gid` Used to remove dead node from the quorum, takes node id and group id as query param.
 
 ## Config
-{{% notice "note" %}}
-Currently only valid for Dgraph server.
-{{% /notice %}}
-
-The command-line flags can be stored in a YAML file and provided via the `--config` flag.  For example:
 
-```sh
-# Folder in which to store exports.
-export: export
+The full set of dgraph's configuration options (along with brief descriptions)
+can be viewed by invoking dgraph with the `--help` flag. For example, so see
+the options for `dgraph server`, run `dgraph server --help`.
 
-# Fraction of dirty posting lists to commit every few seconds.
-gentlecommit: 0.33
+The options can be configured in multiple ways (from highest precedence to
+lowest precedence):
 
-# RAFT ID that this server will use to join RAFT groups.
-idx: 1
+- Using command line flags (as described in the help output).
 
-# Port to run server on. (default 8080)
-port: 8080
+- Using environment variables.
 
-# GRPC port to run server on. (default 9080)
-grpc_port: 9080
+- Using a configuration file.
 
-# Port used by worker for internal communication.
-workerport: 12345
+If no configuration for an option is used, then the default value as described
+in the `--help` output applies.
 
-# Estimated memory the process can take. Actual usage would be slightly more
-memory_mb: 4096
+Multiple configuration methods can be used all at the same time. E.g. a core
+set of options could be set in a config file, and instance specific options
+could be set using environment vars or flags.
 
-# The ratio of queries to trace.
-trace: 0.33
+The environment variable names mirror the flag names as seen in the `--help`
+output. They are the concatenation of `DGRAPH`, the subcommand invoked
+(`SERVER`, `ZERO`, `LIVE`, or `BULK`), and then the name of the flag (in
+uppercase). For example, instead of using `dgraph server --memory_mb=8096`, you
+could use `DGRAPH_SERVER_MEMORY_MB=8096 dgraph server`.
 
-# Directory to store posting lists.
-p: p
+Configuration file formats supported are JSON, TOML, YAML, HCL, and Java
+properties (detected via file extension).
 
-# Directory to store raft write-ahead logs.
-w: w
+A configuration file can be specified using the `--config` flag, or an
+environment variable. E.g. `dgraph zero --config my_config.json` or
+`DGRAPH_ZERO_CONFIG=my_config.json dgraph zero`.
 
-# Debug mode for testing.
-debugmode: false
+The config file structure is just simple key/value pairs (mirroring the flag
+names). E.g. a JSON config file that sets `--idx`, `--peer`, and `--replicas`:
 
-# Address of dgraphzero
-peer: localhost:8888
+```json
+{
+  "idx": 42,
+  "peer": 192.168.0.55:9080,
+  "replicas": 2
+}
 ```
 
 ## TLS configuration
@@ -801,65 +802,65 @@ Following configuration options are available for the server:
 
 ```sh
 # Use TLS connections with clients.
-tls.on
+tls_on
 
 # CA Certs file path.
-tls.ca_certs string
+tls_ca_certs string
 
 # Include System CA into CA Certs.
-tls.use_system_ca
+tls_use_system_ca
 
 # Certificate file path.
-tls.cert string
+tls_cert string
 
 # Certificate key file path.
-tls.cert_key string
+tls_cert_key string
 
 # Certificate key passphrase.
-tls.cert_key_passphrase string
+tls_cert_key_passphrase string
 
 # Enable TLS client authentication
-tls.client_auth string
+tls_client_auth string
 
 # TLS max version. (default "TLS12")
-tls.max_version string
+tls_max_version string
 
 # TLS min version. (default "TLS11")
-tls.min_version string
+tls_min_version string
 ```
 
 Dgraph loader can be configured with following options:
 
 ```sh
 # Use TLS connections.
-tls.on
+tls_on
 
 # CA Certs file path.
-tls.ca_certs string
+tls_ca_certs string
 
 # Include System CA into CA Certs.
-tls.use_system_ca
+tls_use_system_ca
 
 # Certificate file path.
-tls.cert string
+tls_cert string
 
 # Certificate key file path.
-tls.cert_key string
+tls_cert_key string
 
 # Certificate key passphrase.
-tls.cert_key_passphrase string
+tls_cert_key_passphrase string
 
 # Server name.
-tls.server_name string
+tls_server_name string
 
 # Skip certificate validation (insecure)
-tls.insecure
+tls_insecure
 
 # TLS max version. (default "TLS12")
-tls.max_version string
+tls_max_version string
 
 # TLS min version. (default "TLS11")
-tls.min_version string
+tls_min_version string
 ```
 
 
diff --git a/x/config.go b/x/config.go
index fad1d40b4b679cbdf0c4e059644851449ab9170e..eb8c578923e4000d8f224c7beef018eb6cd1a04c 100644
--- a/x/config.go
+++ b/x/config.go
@@ -16,8 +16,6 @@
 package x
 
 type Options struct {
-	ConfigFile string
-	Version    bool
 	DebugMode  bool
 	PortOffset int
 }
diff --git a/x/init.go b/x/init.go
index 3e8a4c77022823cc8b6aabf5de206730d8d3cc7f..986cf97815fe541f4319639aff31ca4a9217e338 100644
--- a/x/init.go
+++ b/x/init.go
@@ -17,12 +17,8 @@
 package x
 
 import (
-	"flag"
 	"fmt"
-	"io/ioutil"
 	"os"
-
-	yaml "gopkg.in/yaml.v2"
 )
 
 var (
@@ -60,21 +56,6 @@ func Init(debug bool) {
 	}
 }
 
-// loadConfigFromYAML reads configurations from specified YAML file.
-func LoadConfigFromYAML(file string) {
-	bs, err := ioutil.ReadFile(file)
-	Checkf(err, "Cannot open specified config file: %v", file)
-
-	m := make(map[string]string)
-	Checkf(yaml.Unmarshal(bs, &m), "Error while parsing config file: %v", Config.ConfigFile)
-
-	for k, v := range m {
-		Printf("Picked flag from config: [%q = %v]\n", k, v)
-		err := flag.Set(k, v)
-		Checkf(err, "While setting flag from config.")
-	}
-}
-
 func BuildDetails() string {
 	return fmt.Sprintf(`
 Dgraph version   : %v
diff --git a/x/profile.go b/x/profile.go
new file mode 100644
index 0000000000000000000000000000000000000000..52b274a81e551e58afaa8a2ed8c4b182e92b9d87
--- /dev/null
+++ b/x/profile.go
@@ -0,0 +1,41 @@
+package x
+
+import (
+	"fmt"
+	"os"
+	"runtime"
+
+	"github.com/pkg/profile"
+	"github.com/spf13/viper"
+)
+
+type stopper interface {
+	Stop()
+}
+
+func StartProfile(conf *viper.Viper) stopper {
+	profileMode := conf.GetString("profile_mode")
+	switch profileMode {
+	case "cpu":
+		return profile.Start(profile.CPUProfile)
+	case "mem":
+		return profile.Start(profile.MemProfile)
+	case "mutex":
+		return profile.Start(profile.MutexProfile)
+	case "block":
+		blockRate := conf.GetInt("block_rate")
+		runtime.SetBlockProfileRate(blockRate)
+		return profile.Start(profile.BlockProfile)
+	case "":
+		// do nothing
+		return noOpStopper{}
+	default:
+		fmt.Printf("Invalid profile mode: %q\n", profileMode)
+		os.Exit(1)
+		return noOpStopper{}
+	}
+}
+
+type noOpStopper struct{}
+
+func (noOpStopper) Stop() {}
diff --git a/x/subcommand.go b/x/subcommand.go
new file mode 100644
index 0000000000000000000000000000000000000000..81a468757ed98c84eb249661c5404d5f84ab50ec
--- /dev/null
+++ b/x/subcommand.go
@@ -0,0 +1,13 @@
+package x
+
+import (
+	"github.com/spf13/cobra"
+	"github.com/spf13/viper"
+)
+
+type SubCommand struct {
+	Cmd  *cobra.Command
+	Conf *viper.Viper
+
+	EnvPrefix string
+}
diff --git a/x/tls_helper.go b/x/tls_helper.go
index 8482d709641b0ccaef0e2d2e8af6a12ed08eb877..e692ad122c7d06926b4682a82ca1080b1c622d53 100644
--- a/x/tls_helper.go
+++ b/x/tls_helper.go
@@ -27,6 +27,7 @@ import (
 	"time"
 
 	"github.com/spf13/pflag"
+	"github.com/spf13/viper"
 )
 
 type tlsConfigType int8
@@ -54,14 +55,25 @@ type TLSHelperConfig struct {
 	MaxVersion             string
 }
 
-func SetTLSFlags(conf *TLSHelperConfig, flag *pflag.FlagSet) {
-	flag.BoolVar(&conf.CertRequired, "tls.on", false, "Use TLS connections with clients.")
-	flag.StringVar(&conf.Cert, "tls.cert", "", "Certificate file path.")
-	flag.StringVar(&conf.Key, "tls.cert_key", "", "Certificate key file path.")
-	flag.StringVar(&conf.KeyPassphrase, "tls.cert_key_passphrase", "", "Certificate key passphrase.")
-	flag.BoolVar(&conf.UseSystemClientCACerts, "tls.use_system_ca", false, "Include System CA into CA Certs.")
-	flag.StringVar(&conf.MinVersion, "tls.min_version", "TLS11", "TLS min version.")
-	flag.StringVar(&conf.MaxVersion, "tls.max_version", "TLS12", "TLS max version.")
+func RegisterTLSFlags(flag *pflag.FlagSet) {
+	// TODO: Why is the naming of the flags inconsistent here?
+	flag.Bool("tls_on", false, "Use TLS connections with clients.")
+	flag.String("tls_cert", "", "Certificate file path.")
+	flag.String("tls_cert_key", "", "Certificate key file path.")
+	flag.String("tls_cert_key_passphrase", "", "Certificate key passphrase.")
+	flag.Bool("tls_use_system_ca", false, "Include System CA into CA Certs.")
+	flag.String("tls_min_version", "TLS11", "TLS min version.")
+	flag.String("tls_max_version", "TLS12", "TLS max version.")
+}
+
+func LoadTLSConfig(conf *TLSHelperConfig, v *viper.Viper) {
+	conf.CertRequired = v.GetBool("tls_on")
+	conf.Cert = v.GetString("tls_cert")
+	conf.Key = v.GetString("tls_cert_key")
+	conf.KeyPassphrase = v.GetString("tls_cert_key_passphrase")
+	conf.UseSystemClientCACerts = v.GetBool("tls_use_system_ca")
+	conf.MinVersion = v.GetString("tls_min_version")
+	conf.MaxVersion = v.GetString("tls_max_version")
 }
 
 func generateCertPool(certPath string, useSystemCA bool) (*x509.CertPool, error) {