Catch back up to master again
diff --git a/action/create.go b/action/create.go
index 5d42199..e44e2fe 100644
--- a/action/create.go
+++ b/action/create.go
@@ -85,7 +85,7 @@
 	config := new(cfg.Config)
 
 	// Get the name of the top level package
-	config.Name = name
+	config.ProjectRoot = name
 
 	// Import by looking at other package managers and looking over the
 	// entire directory structure.
@@ -131,7 +131,7 @@
 		n := strings.TrimPrefix(pa, vpath)
 		root, subpkg := util.NormalizeName(n)
 
-		if !config.Imports.Has(root) && root != config.Name {
+		if !config.Imports.Has(root) && root != config.ProjectRoot {
 			msg.Info("--> Found reference to %s\n", n)
 			d := &cfg.Dependency{
 				Name: root,
@@ -156,9 +156,9 @@
 		n := strings.TrimPrefix(pa, vpath)
 		root, subpkg := util.NormalizeName(n)
 
-		if config.Imports.Has(root) && root != config.Name {
+		if config.Imports.Has(root) && root != config.ProjectRoot {
 			msg.Debug("--> Found test reference to %s already listed as an import", n)
-		} else if !config.DevImports.Has(root) && root != config.Name {
+		} else if !config.DevImports.Has(root) && root != config.ProjectRoot {
 			msg.Info("--> Found test reference to %s", n)
 			d := &cfg.Dependency{
 				Name: root,
diff --git a/action/ensure.go b/action/ensure.go
index 2f5f5af..43f2df9 100644
--- a/action/ensure.go
+++ b/action/ensure.go
@@ -48,8 +48,8 @@
 		b, err = filepath.Rel(b, cwd)
 		if err == nil {
 			name := buildContext.PackageName(b)
-			if name != conf.Name {
-				msg.Warn("The name listed in the config file (%s) does not match the current location (%s)", conf.Name, name)
+			if name != conf.ProjectRoot {
+				msg.Warn("The name listed in the config file (%s) does not match the current location (%s)", conf.ProjectRoot, name)
 			}
 		} else {
 			msg.Warn("Problem finding the config file path (%s) relative to the current directory (%s): %s", b, cwd, err)
diff --git a/action/get.go b/action/get.go
index 009d90d..8b29cdb 100644
--- a/action/get.go
+++ b/action/get.go
@@ -2,111 +2,112 @@
 
 import (
 	"fmt"
+	"log"
+	"os"
 	"path/filepath"
 	"strings"
 
 	"github.com/Masterminds/glide/cache"
 	"github.com/Masterminds/glide/cfg"
-	"github.com/Masterminds/glide/godep"
+	"github.com/Masterminds/glide/dependency"
 	"github.com/Masterminds/glide/msg"
 	gpath "github.com/Masterminds/glide/path"
 	"github.com/Masterminds/glide/repo"
 	"github.com/Masterminds/glide/util"
 	"github.com/Masterminds/semver"
+	"github.com/sdboyer/gps"
 )
 
 // Get fetches one or more dependencies and installs.
 //
-// This includes resolving dependency resolution and re-generating the lock file.
-func Get(names []string, installer *repo.Installer, insecure, skipRecursive, strip, stripVendor, nonInteract, testDeps bool) {
-	if installer.UseCache {
-		cache.SystemLock()
-	}
-
+// This includes a solver run and re-generating the lock file.
+func Get(names []string, installer *repo.Installer, stripVendor, nonInteract bool) {
 	base := gpath.Basepath()
 	EnsureGopath()
 	EnsureVendorDir()
 	conf := EnsureConfig()
+
 	glidefile, err := gpath.Glide()
 	if err != nil {
 		msg.Die("Could not find Glide file: %s", err)
 	}
 
+	vend, err := gpath.Vendor()
+	if err != nil {
+		msg.Die("Could not find the vendor dir: %s", err)
+	}
+
+	params := gps.SolveParameters{
+		RootDir:     filepath.Dir(glidefile),
+		ImportRoot:  gps.ProjectRoot(conf.ProjectRoot),
+		Manifest:    conf,
+		Ignore:      conf.Ignore,
+		Trace:       true,
+		TraceLogger: log.New(os.Stdout, "", 0),
+	}
+
+	// We load the lock file early and bail out if there's a problem, because we
+	// don't want a get to just update all deps without the user explictly
+	// making that choice.
+	if gpath.HasLock(base) {
+		params.Lock, err = loadLockfile(base, conf)
+		if err != nil {
+			msg.Err("Could not load lockfile; aborting get. Existing dependency versions cannot be safely preserved without a lock file. Error was: %s", err)
+			return
+		}
+	}
+
+	// Create the SourceManager for this run
+	sm, err := gps.NewSourceManager(dependency.Analyzer{}, filepath.Join(installer.Home, "cache"), false)
+	defer sm.Release()
+	if err != nil {
+		msg.Err(err.Error())
+		return
+	}
+
+	// Now, with the easy/fast errors out of the way, dive into adding the new
+	// deps to the manifest.
+
 	// Add the packages to the config.
-	if count, err2 := addPkgsToConfig(conf, names, insecure, nonInteract, testDeps); err2 != nil {
+	//if count, err2 := addPkgsToConfig(conf, names, insecure, nonInteract, testDeps); err2 != nil {
+	if count, err2 := addPkgsToConfig(conf, names, false, nonInteract, false); err2 != nil {
 		msg.Die("Failed to get new packages: %s", err2)
 	} else if count == 0 {
 		msg.Warn("Nothing to do")
 		return
 	}
 
-	// Fetch the new packages. Can't resolve versions via installer.Update if
-	// get is called while the vendor/ directory is empty so we checkout
-	// everything.
-	err = installer.Checkout(conf)
+	// Prepare a solver. This validates our params.
+	s, err := gps.Prepare(params, sm)
 	if err != nil {
-		msg.Die("Failed to checkout packages: %s", err)
+		msg.Err("Aborted get - could not set up solver to reconcile dependencies: %s", err)
+		return
 	}
 
-	// Prior to resolving dependencies we need to start working with a clone
-	// of the conf because we'll be making real changes to it.
-	confcopy := conf.Clone()
-
-	if !skipRecursive {
-		// Get all repos and update them.
-		// TODO: Can we streamline this in any way? The reason that we update all
-		// of the dependencies is that we need to re-negotiate versions. For example,
-		// if an existing dependency has the constraint >1.0 and this new package
-		// adds the constraint <2.0, then this may re-resolve the existing dependency
-		// to be between 1.0 and 2.0. But changing that dependency may then result
-		// in that dependency's dependencies changing... so we sorta do the whole
-		// thing to be safe.
-		err = installer.Update(confcopy)
-		if err != nil {
-			msg.Die("Could not update packages: %s", err)
-		}
+	r, err := s.Solve()
+	if err != nil {
+		// TODO better error handling
+		msg.Err("Failed to find a solution for all new dependencies: %s", err.Error())
+		return
 	}
 
-	// Set Reference
-	if err := repo.SetReference(confcopy, installer.ResolveTest); err != nil {
-		msg.Err("Failed to set references: %s", err)
+	// Solve succeeded. Write out the yaml, lock, and vendor to a tmpdir, then mv
+	// them all into place iff all the writes worked
+
+	gw := safeGroupWriter{
+		conf:        conf,
+		lock:        params.Lock.(*cfg.Lockfile),
+		resultLock:  r,
+		sm:          sm,
+		glidefile:   glidefile,
+		vendor:      vend,
+		stripVendor: stripVendor,
 	}
 
-	// VendoredCleanup
-	// When stripping VCS happens this will happen as well. No need for double
-	// effort.
-	if installer.UpdateVendored && !strip {
-		repo.VendoredCleanup(confcopy)
-	}
-
-	// Write YAML
-	if err := conf.WriteFile(glidefile); err != nil {
-		msg.Die("Failed to write glide YAML file: %s", err)
-	}
-	if !skipRecursive {
-		// Write lock
-		if stripVendor {
-			confcopy = godep.RemoveGodepSubpackages(confcopy)
-		}
-		writeLock(conf, confcopy, base)
-	} else {
-		msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated")
-	}
-
-	if strip {
-		msg.Info("Removing version control data from vendor directory...")
-		err := gpath.StripVcs()
-		if err != nil {
-			msg.Err("Unable to strip version control data: %s", err)
-		}
-	}
-
-	if stripVendor {
-		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
-		err := gpath.StripVendor()
-		if err != nil {
-			msg.Err("Unable to strip vendor directories: %s", err)
-		}
+	err = gw.writeAllSafe()
+	if err != nil {
+		msg.Err(err.Error())
+		return
 	}
 }
 
@@ -134,7 +135,7 @@
 // - sets up insecure repo URLs where necessary
 // - generates a list of subpackages
 func addPkgsToConfig(conf *cfg.Config, names []string, insecure, nonInteract, testDeps bool) (int, error) {
-
+	// TODO refactor this to take and use a gps.SourceManager
 	if len(names) == 1 {
 		msg.Info("Preparing to install %d package.", len(names))
 	} else {
@@ -201,7 +202,8 @@
 		}
 
 		dep := &cfg.Dependency{
-			Name: root,
+			Name:       root,
+			Constraint: gps.Any(),
 		}
 
 		// When retriving from an insecure location set the repo to the
diff --git a/action/install.go b/action/install.go
index b1566a2..c10f5d3 100644
--- a/action/install.go
+++ b/action/install.go
@@ -1,87 +1,349 @@
 package action
 
 import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
 	"path/filepath"
 
-	"github.com/Masterminds/glide/cache"
 	"github.com/Masterminds/glide/cfg"
 	"github.com/Masterminds/glide/dependency"
 	"github.com/Masterminds/glide/msg"
 	gpath "github.com/Masterminds/glide/path"
 	"github.com/Masterminds/glide/repo"
+	"github.com/sdboyer/gps"
 )
 
 // Install installs a vendor directory based on an existing Glide configuration.
-func Install(installer *repo.Installer, strip, stripVendor bool) {
-	if installer.UseCache {
-		cache.SystemLock()
-	}
-
+func Install(installer *repo.Installer, io, so, sv bool) {
 	base := "."
 	// Ensure GOPATH
 	EnsureGopath()
 	EnsureVendorDir()
 	conf := EnsureConfig()
 
-	// Lockfile exists
-	if !gpath.HasLock(base) {
-		msg.Info("Lock file (glide.lock) does not exist. Performing update.")
-		Update(installer, false, strip, stripVendor)
+	// TODO might need a better way for discovering the root
+	vend, err := gpath.Vendor()
+	if err != nil {
+		msg.Die("Could not find the vendor dir: %s", err)
+	}
+
+	// Create the SourceManager for this run
+	sm, err := gps.NewSourceManager(dependency.Analyzer{}, filepath.Join(installer.Home, "cache"), false)
+	defer sm.Release()
+	if err != nil {
+		msg.Err(err.Error())
 		return
 	}
-	// Load lockfile
-	lock, err := cfg.ReadLockFile(filepath.Join(base, gpath.LockFile))
-	if err != nil {
-		msg.Die("Could not load lockfile.")
-	}
-	// Verify lockfile hasn't changed
-	hash, err := conf.Hash()
-	if err != nil {
-		msg.Die("Could not load lockfile.")
-	} else if hash != lock.Hash {
-		msg.Warn("Lock file may be out of date. Hash check of YAML failed. You may need to run 'update'")
+
+	params := gps.SolveParameters{
+		RootDir:     filepath.Dir(vend),
+		ImportRoot:  gps.ProjectRoot(conf.ProjectRoot),
+		Manifest:    conf,
+		Ignore:      conf.Ignore,
+		Trace:       true,
+		TraceLogger: log.New(os.Stdout, "", 0),
 	}
 
-	// Install
-	newConf, err := installer.Install(lock, conf)
-	if err != nil {
-		msg.Die("Failed to install: %s", err)
-	}
-
-	msg.Info("Setting references.")
-
-	// Set reference
-	if err := repo.SetReference(newConf, installer.ResolveTest); err != nil {
-		msg.Err("Failed to set references: %s (Skip to cleanup)", err)
-	}
-
-	// Delete unused packages
-	if installer.DeleteUnused {
-		// newConf is calculated based on the lock file so it should be
-		// accurate to the project list.
-		dependency.DeleteUnused(newConf)
-	}
-
-	// VendoredCleanup. This should ONLY be run if UpdateVendored was specified.
-	// When stripping VCS happens this will happen as well. No need for double
-	// effort.
-	if installer.UpdateVendored && !strip {
-		repo.VendoredCleanup(newConf)
-	}
-
-	if strip {
-		msg.Info("Removing version control data from vendor directory...")
-		err := gpath.StripVcs()
+	var s gps.Solver
+	if gpath.HasLock(base) {
+		params.Lock, err = loadLockfile(base, conf)
 		if err != nil {
-			msg.Err("Unable to strip version control data: %s", err)
+			msg.Err("Could not load lockfile.")
+			return
+		}
+
+		s, err = gps.Prepare(params, sm)
+		if err != nil {
+			msg.Err("Could not set up solver: %s", err)
+			return
+		}
+		digest, err := s.HashInputs()
+
+		// Check if digests match, and warn if they don't
+		if bytes.Equal(digest, params.Lock.InputHash()) {
+			if so {
+				msg.Err("glide.yaml is out of sync with glide.lock")
+				return
+			} else {
+				msg.Warn("glide.yaml is out of sync with glide.lock!")
+			}
+		}
+
+		gw := safeGroupWriter{
+			resultLock:  params.Lock,
+			vendor:      vend,
+			sm:          sm,
+			stripVendor: sv,
+		}
+
+		err = gw.writeAllSafe()
+		if err != nil {
+			msg.Err(err.Error())
+			return
+		}
+	} else if io || so {
+		msg.Err("No glide.lock file could be found.")
+		return
+	} else {
+		// There is no lock, so we have to solve first
+		s, err = gps.Prepare(params, sm)
+		if err != nil {
+			msg.Err("Could not set up solver: %s", err)
+			return
+		}
+
+		r, err := s.Solve()
+		if err != nil {
+			// TODO better error handling
+			msg.Err(err.Error())
+			return
+		}
+
+		gw := safeGroupWriter{
+			resultLock:  r,
+			vendor:      vend,
+			sm:          sm,
+			stripVendor: sv,
+		}
+
+		err = gw.writeAllSafe()
+		if err != nil {
+			msg.Err(err.Error())
+			return
+		}
+	}
+}
+
+// locksAreEquivalent compares the fingerprints between two locks to determine
+// if they're equivalent.
+//
+// If the either of the locks are nil, the input hashes are different, the
+// fingerprints are different, or any error is returned from fingerprinting,
+// this function returns false.
+func locksAreEquivalent(l1, l2 *cfg.Lockfile) bool {
+	if l1 != nil && l2 != nil {
+		if l1.Hash != l2.Hash {
+			return false
+		}
+
+		f1, err := l1.Fingerprint()
+		f2, err2 := l2.Fingerprint()
+		if err == nil && err2 == nil && f1 == f2 {
+			return true
+		}
+	}
+	return false
+}
+
+// safeGroupWriter provides a slipshod-but-better-than-nothing approach to
+// grouping together yaml, lock, and vendor dir writes.
+type safeGroupWriter struct {
+	conf              *cfg.Config
+	lock              *cfg.Lockfile
+	resultLock        gps.Lock
+	sm                gps.SourceManager
+	glidefile, vendor string
+	stripVendor       bool
+}
+
+// writeAllSafe writes out some combination of config yaml, lock, and a vendor
+// tree, to a temp dir, then moves them into place if and only if all the write
+// operations succeeded. It also does its best to roll back if any moves fail.
+//
+// This helps to ensure glide doesn't exit with a partial write, resulting in an
+// undefined disk state.
+//
+// - If a gw.conf is provided, it will be written to gw.glidefile
+// - If gw.lock is provided without a gw.resultLock, it will be written to
+//   `glide.lock` in the parent dir of gw.vendor
+// - If gw.lock and gw.resultLock are both provided and are not equivalent,
+//   the resultLock will be written to the same location as above, and a vendor
+//   tree will be written to gw.vendor
+// - If gw.resultLock is provided and gw.lock is not, it will write both a lock
+//   and vendor dir in the same way
+//
+// Any of the conf, lock, or result can be omitted; the grouped write operation
+// will continue for whichever inputs are present.
+func (gw safeGroupWriter) writeAllSafe() error {
+	// Decide which writes we need to do
+	var writeConf, writeLock, writeVendor bool
+
+	if gw.conf != nil {
+		writeConf = true
+	}
+
+	if gw.resultLock != nil {
+		if gw.lock == nil {
+			writeLock, writeVendor = true, true
+		} else {
+			rlf := cfg.LockfileFromSolverLock(gw.resultLock)
+			if !locksAreEquivalent(rlf, gw.lock) {
+				writeLock, writeVendor = true, true
+			}
+		}
+	} else if gw.lock != nil {
+		writeLock = true
+	}
+
+	if !writeConf && !writeLock && !writeVendor {
+		// nothing to do
+		return nil
+	}
+
+	if writeConf && gw.glidefile == "" {
+		return fmt.Errorf("Must provide a path if writing out a config yaml.")
+	}
+
+	if (writeLock || writeVendor) && gw.vendor == "" {
+		return fmt.Errorf("Must provide a vendor dir if writing out a lock or vendor dir.")
+	}
+
+	if writeVendor && gw.sm == nil {
+		return fmt.Errorf("Must provide a SourceManager if writing out a vendor dir.")
+	}
+
+	td, err := ioutil.TempDir(os.TempDir(), "glide")
+	if err != nil {
+		return fmt.Errorf("Error while creating temp dir for vendor directory: %s", err)
+	}
+	defer os.RemoveAll(td)
+
+	if writeConf {
+		if err := gw.conf.WriteFile(filepath.Join(td, "glide.yaml")); err != nil {
+			return fmt.Errorf("Failed to write glide YAML file: %s", err)
 		}
 	}
 
-	if stripVendor {
-		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
-		err := gpath.StripVendor()
-		if err != nil {
-			msg.Err("Unable to strip vendor directories: %s", err)
+	if writeLock {
+		if gw.resultLock == nil {
+			// the result lock is nil but the flag is on, so we must be writing
+			// the other one
+			if err := gw.lock.WriteFile(filepath.Join(td, gpath.LockFile)); err != nil {
+				return fmt.Errorf("Failed to write glide lock file: %s", err)
+			}
+		} else {
+			rlf := cfg.LockfileFromSolverLock(gw.resultLock)
+			if err := rlf.WriteFile(filepath.Join(td, gpath.LockFile)); err != nil {
+				return fmt.Errorf("Failed to write glide lock file: %s", err)
+			}
 		}
 	}
+
+	if writeVendor {
+		err = gps.CreateVendorTree(filepath.Join(td, "vendor"), gw.resultLock, gw.sm, gw.stripVendor)
+		if err != nil {
+			return fmt.Errorf("Error while generating vendor tree: %s", err)
+		}
+	}
+
+	// Move the existing files and dirs to the temp dir while we put the new
+	// ones in, to provide insurance against errors for as long as possible
+	var fail bool
+	var failerr error
+	type pathpair struct {
+		from, to string
+	}
+	var restore []pathpair
+
+	if writeConf {
+		if _, err := os.Stat(gw.glidefile); err == nil {
+			// move out the old one
+			tmploc := filepath.Join(td, "glide.yaml-old")
+			failerr = os.Rename(gw.glidefile, tmploc)
+			if failerr != nil {
+				fail = true
+			} else {
+				restore = append(restore, pathpair{from: tmploc, to: gw.glidefile})
+			}
+		}
+
+		// move in the new one
+		failerr = os.Rename(filepath.Join(td, "glide.yaml"), gw.glidefile)
+		if failerr != nil {
+			fail = true
+		}
+	}
+
+	if !fail && writeLock {
+		tgt := filepath.Join(filepath.Dir(gw.vendor), gpath.LockFile)
+		if _, err := os.Stat(tgt); err == nil {
+			// move out the old one
+			tmploc := filepath.Join(td, "glide.lock-old")
+
+			failerr = os.Rename(tgt, tmploc)
+			if failerr != nil {
+				fail = true
+			} else {
+				restore = append(restore, pathpair{from: tmploc, to: tgt})
+			}
+		}
+
+		// move in the new one
+		failerr = os.Rename(filepath.Join(td, gpath.LockFile), tgt)
+		if failerr != nil {
+			fail = true
+		}
+	}
+
+	// have to declare out here so it's present later
+	var vendorbak string
+	if !fail && writeVendor {
+		if _, err := os.Stat(gw.vendor); err == nil {
+			// move out the old vendor dir. just do it into an adjacent dir, in
+			// order to mitigate the possibility of a pointless cross-filesystem move
+			vendorbak = gw.vendor + "-old"
+			if _, err := os.Stat(vendorbak); err == nil {
+				// Just in case that happens to exist...
+				vendorbak = filepath.Join(td, "vendor-old")
+			}
+			failerr = os.Rename(gw.vendor, vendorbak)
+			if failerr != nil {
+				fail = true
+			} else {
+				restore = append(restore, pathpair{from: vendorbak, to: gw.vendor})
+			}
+		}
+
+		// move in the new one
+		failerr = os.Rename(filepath.Join(td, "vendor"), gw.vendor)
+		if failerr != nil {
+			fail = true
+		}
+	}
+
+	// If we failed at any point, move all the things back into place, then bail
+	if fail {
+		for _, pair := range restore {
+			// Nothing we can do on err here, we're already in recovery mode
+			os.Rename(pair.from, pair.to)
+		}
+		return failerr
+	}
+
+	// Renames all went smoothly. The deferred os.RemoveAll will get the temp
+	// dir, but if we wrote vendor, we have to clean that up directly
+
+	if writeVendor {
+		// Again, kinda nothing we can do about an error at this point
+		os.RemoveAll(vendorbak)
+	}
+
+	return nil
+}
+
+// loadLockfile loads the contents of a glide.lock file.
+func loadLockfile(base string, conf *cfg.Config) (*cfg.Lockfile, error) {
+	yml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile))
+	if err != nil {
+		return nil, err
+	}
+	lock, err := cfg.LockfileFromYaml(yml)
+	if err != nil {
+		return nil, err
+	}
+
+	return lock, nil
 }
diff --git a/action/name.go b/action/name.go
index 77972b3..7a67b7c 100644
--- a/action/name.go
+++ b/action/name.go
@@ -7,5 +7,5 @@
 // Name prints the name of the package, according to the glide.yaml file.
 func Name() {
 	conf := EnsureConfig()
-	msg.Puts(conf.Name)
+	msg.Puts(conf.ProjectRoot)
 }
diff --git a/action/project_info.go b/action/project_info.go
index 379026e..b82b41c 100644
--- a/action/project_info.go
+++ b/action/project_info.go
@@ -15,7 +15,7 @@
 		if varInit {
 			switch varfmt {
 			case 'n':
-				buffer.WriteString(conf.Name)
+				buffer.WriteString(conf.ProjectRoot)
 			case 'd':
 				buffer.WriteString(conf.Description)
 			case 'h':
diff --git a/action/update.go b/action/update.go
index 9712ebb..c008f98 100644
--- a/action/update.go
+++ b/action/update.go
@@ -1,137 +1,97 @@
 package action
 
 import (
-	"io/ioutil"
+	"log"
+	"os"
 	"path/filepath"
 
-	"github.com/Masterminds/glide/cache"
 	"github.com/Masterminds/glide/cfg"
 	"github.com/Masterminds/glide/dependency"
-	"github.com/Masterminds/glide/godep"
 	"github.com/Masterminds/glide/msg"
 	gpath "github.com/Masterminds/glide/path"
 	"github.com/Masterminds/glide/repo"
+	"github.com/sdboyer/gps"
 )
 
 // Update updates repos and the lock file from the main glide yaml.
-func Update(installer *repo.Installer, skipRecursive, strip, stripVendor bool) {
-	if installer.UseCache {
-		cache.SystemLock()
-	}
-
+func Update(installer *repo.Installer, sv bool, projs []string) {
 	base := "."
 	EnsureGopath()
 	EnsureVendorDir()
 	conf := EnsureConfig()
 
-	// Try to check out the initial dependencies.
-	if err := installer.Checkout(conf); err != nil {
-		msg.Die("Failed to do initial checkout of config: %s", err)
-	}
-
-	// Set the versions for the initial dependencies so that resolved dependencies
-	// are rooted in the correct version of the base.
-	if err := repo.SetReference(conf, installer.ResolveTest); err != nil {
-		msg.Die("Failed to set initial config references: %s", err)
-	}
-
-	// Prior to resolving dependencies we need to start working with a clone
-	// of the conf because we'll be making real changes to it.
-	confcopy := conf.Clone()
-
-	if !skipRecursive {
-		// Get all repos and update them.
-		err := installer.Update(confcopy)
-		if err != nil {
-			msg.Die("Could not update packages: %s", err)
-		}
-
-		// Set references. There may be no remaining references to set since the
-		// installer set them as it went to make sure it parsed the right imports
-		// from the right version of the package.
-		msg.Info("Setting references for remaining imports")
-		if err := repo.SetReference(confcopy, installer.ResolveTest); err != nil {
-			msg.Err("Failed to set references: %s (Skip to cleanup)", err)
-		}
-	}
-
-	// Delete unused packages
-	if installer.DeleteUnused {
-		dependency.DeleteUnused(confcopy)
-	}
-
-	// Vendored cleanup
-	// VendoredCleanup. This should ONLY be run if UpdateVendored was specified.
-	// When stripping VCS happens this will happen as well. No need for double
-	// effort.
-	if installer.UpdateVendored && !strip {
-		repo.VendoredCleanup(confcopy)
-	}
-
-	// Write glide.yaml (Why? Godeps/GPM/GB?)
-	// I think we don't need to write a new Glide file because update should not
-	// change anything important. It will just generate information about
-	// transative dependencies, all of which belongs exclusively in the lock
-	// file, not the glide.yaml file.
 	// TODO(mattfarina): Detect when a new dependency has been added or removed
 	// from the project. A removed dependency should warn and an added dependency
 	// should be added to the glide.yaml file. See issue #193.
 
-	if stripVendor {
-		confcopy = godep.RemoveGodepSubpackages(confcopy)
+	// TODO might need a better way for discovering the root
+	vend, err := gpath.Vendor()
+	if err != nil {
+		msg.Die("Could not find the vendor dir: %s", err)
 	}
 
-	if !skipRecursive {
-		// Write lock
-		hash, err := conf.Hash()
-		if err != nil {
-			msg.Die("Failed to generate config hash. Unable to generate lock file.")
-		}
-		lock, err := cfg.NewLockfile(confcopy.Imports, confcopy.DevImports, hash)
-		if err != nil {
-			msg.Die("Failed to generate lock file: %s", err)
-		}
-		wl := true
-		if gpath.HasLock(base) {
-			yml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile))
-			if err == nil {
-				l2, err := cfg.LockfileFromYaml(yml)
-				if err == nil {
-					f1, err := l2.Fingerprint()
-					f2, err2 := lock.Fingerprint()
-					if err == nil && err2 == nil && f1 == f2 {
-						wl = false
-					}
-				}
-			}
-		}
-		if wl {
-			if err := lock.WriteFile(filepath.Join(base, gpath.LockFile)); err != nil {
-				msg.Err("Could not write lock file to %s: %s", base, err)
-				return
-			}
-		} else {
-			msg.Info("Versions did not change. Skipping glide.lock update.")
-		}
+	params := gps.SolveParameters{
+		RootDir:     filepath.Dir(vend),
+		ImportRoot:  gps.ProjectRoot(conf.ProjectRoot),
+		Manifest:    conf,
+		Ignore:      conf.Ignore,
+		Trace:       true,
+		TraceLogger: log.New(os.Stdout, "", 0),
+	}
 
-		msg.Info("Project relies on %d dependencies.", len(confcopy.Imports))
+	if len(projs) == 0 {
+		params.ChangeAll = true
 	} else {
-		msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated")
-	}
-
-	if strip {
-		msg.Info("Removing version control data from vendor directory...")
-		err := gpath.StripVcs()
-		if err != nil {
-			msg.Err("Unable to strip version control data: %s", err)
+		params.ChangeAll = false
+		for _, p := range projs {
+			if !conf.HasDependency(p) {
+				msg.Die("Cannot update %s, as it is not listed as dependency in glide.yaml.", p)
+			}
+			params.ToChange = append(params.ToChange, gps.ProjectRoot(p))
 		}
 	}
 
-	if stripVendor {
-		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
-		err := gpath.StripVendor()
+	if gpath.HasLock(base) {
+		params.Lock, err = loadLockfile(base, conf)
 		if err != nil {
-			msg.Err("Unable to strip vendor directories: %s", err)
+			msg.Err("Could not load lockfile, aborting: %s", err)
+			return
 		}
 	}
+
+	// Create the SourceManager for this run
+	sm, err := gps.NewSourceManager(dependency.Analyzer{}, filepath.Join(installer.Home, "cache"), false)
+	if err != nil {
+		msg.Err(err.Error())
+		return
+	}
+	defer sm.Release()
+
+	// Prepare a solver. This validates our params.
+	s, err := gps.Prepare(params, sm)
+	if err != nil {
+		msg.Err("Could not set up solver: %s", err)
+		return
+	}
+
+	r, err := s.Solve()
+	if err != nil {
+		// TODO better error handling
+		msg.Err(err.Error())
+		return
+	}
+
+	gw := safeGroupWriter{
+		lock:        params.Lock.(*cfg.Lockfile),
+		resultLock:  r,
+		sm:          sm,
+		vendor:      vend,
+		stripVendor: sv,
+	}
+
+	err = gw.writeAllSafe()
+	if err != nil {
+		msg.Err(err.Error())
+		return
+	}
 }
diff --git a/cfg/config.go b/cfg/config.go
index 115c76d..a9e6930 100644
--- a/cfg/config.go
+++ b/cfg/config.go
@@ -2,6 +2,7 @@
 
 import (
 	"crypto/sha256"
+	"encoding/hex"
 	"fmt"
 	"io/ioutil"
 	"reflect"
@@ -10,6 +11,7 @@
 
 	"github.com/Masterminds/glide/util"
 	"github.com/Masterminds/vcs"
+	"github.com/sdboyer/gps"
 	"gopkg.in/yaml.v2"
 )
 
@@ -17,7 +19,7 @@
 type Config struct {
 
 	// Name is the name of the package or application.
-	Name string `yaml:"package"`
+	ProjectRoot string `yaml:"package"`
 
 	// Description is a short description for a package, application, or library.
 	// This description is similar but different to a Go package description as
@@ -56,6 +58,19 @@
 }
 
 // A transitive representation of a dependency for importing and exporting to yaml.
+type cf1 struct {
+	Name        string       `yaml:"package"`
+	Description string       `yaml:"description,omitempty"`
+	Home        string       `yaml:"homepage,omitempty"`
+	License     string       `yaml:"license,omitempty"`
+	Owners      Owners       `yaml:"owners,omitempty"`
+	Ignore      []string     `yaml:"ignore,omitempty"`
+	Exclude     []string     `yaml:"excludeDirs,omitempty"`
+	Imports     Dependencies `yaml:"dependencies"`
+	DevImports  Dependencies `yaml:"testdependencies,omitempty"`
+}
+
+// Legacy representation of a glide.yaml file.
 type cf struct {
 	Name        string       `yaml:"package"`
 	Description string       `yaml:"description,omitempty"`
@@ -90,7 +105,7 @@
 	if err := unmarshal(&newConfig); err != nil {
 		return err
 	}
-	c.Name = newConfig.Name
+	c.ProjectRoot = newConfig.Name
 	c.Description = newConfig.Description
 	c.Home = newConfig.Home
 	c.License = newConfig.License
@@ -109,7 +124,7 @@
 // MarshalYAML is a hook for gopkg.in/yaml.v2 in the marshaling process
 func (c *Config) MarshalYAML() (interface{}, error) {
 	newConfig := &cf{
-		Name:        c.Name,
+		Name:        c.ProjectRoot,
 		Description: c.Description,
 		Home:        c.Home,
 		License:     c.License,
@@ -148,6 +163,64 @@
 	return false
 }
 
+// DependencyConstraints lists all the non-test dependency constraints
+// described in a glide manifest in a way gps will understand.
+func (c *Config) DependencyConstraints() []gps.ProjectConstraint {
+	return depsToVSolver(c.Imports)
+}
+
+// TestDependencyConstraints lists all the test dependency constraints described
+// in a glide manifest in a way gps will understand.
+func (c *Config) TestDependencyConstraints() []gps.ProjectConstraint {
+	return depsToVSolver(c.DevImports)
+}
+
+func depsToVSolver(deps Dependencies) []gps.ProjectConstraint {
+	cp := make([]gps.ProjectConstraint, len(deps))
+	for k, d := range deps {
+		var c gps.Constraint
+		var err error
+
+		// Support both old and new. TODO handle this earlier
+		if d.Constraint != nil {
+			c = d.Constraint
+		} else {
+			// TODO need to differentiate types of constraints so that we don't have
+			// this ambiguity
+			// Try semver first
+			c, err = gps.NewSemverConstraint(d.Reference)
+			if err != nil {
+				// Not a semver constraint. Super crappy heuristic that'll cover hg
+				// and git revs, but not bzr (svn, you say? lol, madame. lol)
+				if len(d.Reference) == 40 {
+					c = gps.Revision(d.Reference)
+				} else {
+					// Otherwise, assume a branch. This also sucks, because it could
+					// very well be a shitty, non-semver tag.
+					c = gps.NewBranch(d.Reference)
+				}
+			}
+		}
+
+		id := gps.ProjectIdentifier{
+			ProjectRoot: gps.ProjectRoot(d.Name),
+			NetworkName: d.Repository,
+		}
+
+		cp[k] = gps.ProjectConstraint{
+			Ident:      id,
+			Constraint: c,
+		}
+	}
+
+	return cp
+}
+
+// Name returns the name of the project given in the manifest.
+func (c *Config) Name() gps.ProjectRoot {
+	return gps.ProjectRoot(c.ProjectRoot)
+}
+
 // HasIgnore returns true if the given name is listed on the ignore list.
 func (c *Config) HasIgnore(name string) bool {
 	for _, v := range c.Ignore {
@@ -177,7 +250,7 @@
 // Clone performs a deep clone of the Config instance
 func (c *Config) Clone() *Config {
 	n := &Config{}
-	n.Name = c.Name
+	n.ProjectRoot = c.ProjectRoot
 	n.Description = c.Description
 	n.Home = c.Home
 	n.License = c.License
@@ -218,7 +291,7 @@
 	// If the name on the config object is part of the imports remove it.
 	found := -1
 	for i, dep := range c.Imports {
-		if dep.Name == c.Name {
+		if dep.Name == c.ProjectRoot {
 			found = i
 		}
 	}
@@ -228,7 +301,7 @@
 
 	found = -1
 	for i, dep := range c.DevImports {
-		if dep.Name == c.Name {
+		if dep.Name == c.ProjectRoot {
 			found = i
 		}
 	}
@@ -368,21 +441,23 @@
 
 // Dependency describes a package that the present package depends upon.
 type Dependency struct {
-	Name             string   `yaml:"package"`
-	Reference        string   `yaml:"version,omitempty"`
-	Pin              string   `yaml:"-"`
-	Repository       string   `yaml:"repo,omitempty"`
-	VcsType          string   `yaml:"vcs,omitempty"`
-	Subpackages      []string `yaml:"subpackages,omitempty"`
-	Arch             []string `yaml:"arch,omitempty"`
-	Os               []string `yaml:"os,omitempty"`
-	UpdateAsVendored bool     `yaml:"-"`
+	Name             string         `yaml:"package"`
+	Constraint       gps.Constraint `yaml:"-"` // TODO temporary, for experimenting; reconcile with other data
+	Reference        string         `yaml:"version,omitempty"`
+	Pin              string         `yaml:"-"`
+	Repository       string         `yaml:"repo,omitempty"`
+	VcsType          string         `yaml:"vcs,omitempty"`
+	Subpackages      []string       `yaml:"subpackages,omitempty"`
+	Arch             []string       `yaml:"arch,omitempty"`
+	Os               []string       `yaml:"os,omitempty"`
+	UpdateAsVendored bool           `yaml:"-"`
 }
 
 // A transitive representation of a dependency for importing and exploting to yaml.
 type dep struct {
 	Name        string   `yaml:"package"`
 	Reference   string   `yaml:"version,omitempty"`
+	Branch      string   `yaml:"branch,omitempty"`
 	Ref         string   `yaml:"ref,omitempty"`
 	Repository  string   `yaml:"repo,omitempty"`
 	VcsType     string   `yaml:"vcs,omitempty"`
@@ -413,16 +488,45 @@
 	}
 	d.Name = newDep.Name
 	d.Reference = newDep.Reference
+
+	if d.Reference == "" && newDep.Ref != "" {
+		d.Reference = newDep.Ref
+	}
+
+	if d.Reference != "" {
+		r := d.Reference
+		// TODO this covers git & hg; bzr and svn (??) need love
+		if len(r) == 40 {
+			if _, err := hex.DecodeString(r); err == nil {
+				d.Constraint = gps.Revision(r)
+			}
+		} else {
+			d.Constraint, err = gps.NewSemverConstraint(r)
+			if err != nil {
+				d.Constraint = gps.NewVersion(r)
+			}
+		}
+
+		if err != nil {
+			return fmt.Errorf("Error on creating constraint for %q from %q: %s", d.Name, r, err)
+		}
+	} else if newDep.Branch != "" {
+		d.Constraint = gps.NewBranch(newDep.Branch)
+
+		if err != nil {
+			return fmt.Errorf("Error on creating constraint for %q from %q: %s", d.Name, newDep.Branch, err)
+		}
+	} else {
+		// TODO this is just for now - need a default branch constraint type
+		d.Constraint = gps.Any()
+	}
+
 	d.Repository = newDep.Repository
 	d.VcsType = newDep.VcsType
 	d.Subpackages = newDep.Subpackages
 	d.Arch = newDep.Arch
 	d.Os = newDep.Os
 
-	if d.Reference == "" && newDep.Ref != "" {
-		d.Reference = newDep.Ref
-	}
-
 	// Make sure only legitimate VCS are listed.
 	d.VcsType = filterVcsType(d.VcsType)
 
@@ -447,9 +551,9 @@
 
 	// Make sure we only write the correct vcs type to file
 	t := filterVcsType(d.VcsType)
+
 	newDep := &dep{
 		Name:        d.Name,
-		Reference:   d.Reference,
 		Repository:  d.Repository,
 		VcsType:     t,
 		Subpackages: d.Subpackages,
@@ -457,6 +561,27 @@
 		Os:          d.Os,
 	}
 
+	// Pull out the correct type of constraint
+	if v, ok := d.Constraint.(gps.Version); ok {
+		switch v.Type() {
+		case "any":
+			// Do nothing; nothing here is taken as 'any'
+		case "branch":
+			newDep.Branch = v.String()
+		case "revision", "semver", "version":
+			newDep.Reference = v.String()
+		}
+	} else if gps.IsAny(d.Constraint) {
+		// We do nothing here, as the way any gets represented is with no
+		// constraint information at all
+		// TODO for now, probably until we add first-class 'default branch'
+	} else if d.Constraint != nil {
+		// The only other thing this could really be is a semver range. This
+		// will dump that up appropriately.
+		newDep.Reference = d.Constraint.String()
+	}
+	// Just ignore any other case
+
 	return newDep, nil
 }
 
@@ -497,6 +622,7 @@
 func (d *Dependency) Clone() *Dependency {
 	return &Dependency{
 		Name:             d.Name,
+		Constraint:       d.Constraint,
 		Reference:        d.Reference,
 		Pin:              d.Pin,
 		Repository:       d.Repository,
diff --git a/cfg/config_test.go b/cfg/config_test.go
index 6313ff0..bf6a190 100644
--- a/cfg/config_test.go
+++ b/cfg/config_test.go
@@ -48,8 +48,8 @@
 		t.Errorf("Unable to Unmarshal config yaml")
 	}
 
-	if cfg.Name != "fake/testing" {
-		t.Errorf("Inaccurate name found %s", cfg.Name)
+	if cfg.ProjectRoot != "fake/testing" {
+		t.Errorf("Inaccurate name found %s", cfg.ProjectRoot)
 	}
 
 	if cfg.Description != "foo bar baz" {
@@ -98,15 +98,15 @@
 	}
 
 	cfg2 := cfg.Clone()
-	if cfg2.Name != "fake/testing" {
+	if cfg2.ProjectRoot != "fake/testing" {
 		t.Error("Config cloning failed")
 	}
 	if cfg2.License != "MIT" {
 		t.Error("Config cloning failed to copy License")
 	}
-	cfg.Name = "foo"
+	cfg.ProjectRoot = "foo"
 
-	if cfg.Name == cfg2.Name {
+	if cfg.ProjectRoot == cfg2.ProjectRoot {
 		t.Error("Cloning Config name failed")
 	}
 }
@@ -117,7 +117,7 @@
 		t.Error("ConfigFromYaml failed to parse yaml")
 	}
 
-	if c.Name != "fake/testing" {
+	if c.ProjectRoot != "fake/testing" {
 		t.Error("ConfigFromYaml failed to properly parse yaml")
 	}
 }
diff --git a/cfg/lock.go b/cfg/lock.go
index 62d08ef..b822f29 100644
--- a/cfg/lock.go
+++ b/cfg/lock.go
@@ -2,12 +2,16 @@
 
 import (
 	"crypto/sha256"
+	"encoding/hex"
 	"fmt"
 	"io/ioutil"
 	"sort"
 	"strings"
 	"time"
 
+	"github.com/Masterminds/semver"
+	"github.com/sdboyer/gps"
+
 	"gopkg.in/yaml.v2"
 )
 
@@ -19,6 +23,42 @@
 	DevImports Locks     `yaml:"testImports"`
 }
 
+// LockfileFromSolverLock transforms a gps.Lock into a glide *Lockfile.
+func LockfileFromSolverLock(r gps.Lock) *Lockfile {
+	if r == nil {
+		return nil
+	}
+
+	// Create and write out a new lock file from the result
+	lf := &Lockfile{
+		Hash:    hex.EncodeToString(r.InputHash()),
+		Updated: time.Now(),
+	}
+
+	for _, p := range r.Projects() {
+		pi := p.Ident()
+		l := &Lock{
+			Name:    string(pi.ProjectRoot),
+			VcsType: "", // TODO allow this to be extracted from sm
+		}
+
+		if l.Name != pi.NetworkName && pi.NetworkName != "" {
+			l.Repository = pi.NetworkName
+		}
+
+		v := p.Version()
+		if pv, ok := v.(gps.PairedVersion); ok {
+			l.Version = pv.Underlying().String()
+		} else {
+			l.Version = v.String()
+		}
+
+		lf.Imports = append(lf.Imports, l)
+	}
+
+	return lf
+}
+
 // LockfileFromYaml returns an instance of Lockfile from YAML
 func LockfileFromYaml(yml []byte) (*Lockfile, error) {
 	lock := &Lockfile{}
@@ -28,6 +68,8 @@
 
 // Marshal converts a Config instance to YAML
 func (lf *Lockfile) Marshal() ([]byte, error) {
+	sort.Sort(lf.Imports)
+	sort.Sort(lf.DevImports)
 	yml, err := yaml.Marshal(&lf)
 	if err != nil {
 		return []byte{}, err
@@ -80,6 +122,45 @@
 	return ioutil.WriteFile(lockpath, o, 0666)
 }
 
+// InputHash returns the hash of the input arguments that resulted in this lock
+// file.
+func (lf *Lockfile) InputHash() []byte {
+	b, err := hex.DecodeString(lf.Hash)
+	if err != nil {
+		return nil
+	}
+	return b
+}
+
+// Projects returns the list of projects enumerated in the lock file.
+func (lf *Lockfile) Projects() []gps.LockedProject {
+	all := append(lf.Imports, lf.DevImports...)
+	lp := make([]gps.LockedProject, len(all))
+
+	for k, l := range all {
+		// TODO guess the version type. ugh
+		var v gps.Version
+
+		// semver first
+		_, err := semver.NewVersion(l.Version)
+		if err == nil {
+			v = gps.NewVersion(l.Version)
+		} else {
+			// Crappy heuristic to cover hg and git, but not bzr. Or (lol) svn
+			if len(l.Version) == 40 {
+				v = gps.Revision(l.Version)
+			} else {
+				// Otherwise, assume it's a branch
+				v = gps.NewBranch(l.Version)
+			}
+		}
+
+		lp[k] = gps.NewLockedProject(gps.ProjectRoot(l.Name), v, l.Repository, nil)
+	}
+
+	return lp
+}
+
 // Clone returns a clone of Lockfile
 func (lf *Lockfile) Clone() *Lockfile {
 	n := &Lockfile{}
diff --git a/dependency/analyzer.go b/dependency/analyzer.go
new file mode 100644
index 0000000..e4674fb
--- /dev/null
+++ b/dependency/analyzer.go
@@ -0,0 +1,169 @@
+package dependency
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/Masterminds/glide/cfg"
+	"github.com/Masterminds/glide/gb"
+	"github.com/Masterminds/glide/godep"
+	"github.com/Masterminds/glide/gom"
+	"github.com/Masterminds/glide/gpm"
+	gpath "github.com/Masterminds/glide/path"
+	"github.com/sdboyer/gps"
+)
+
+type notApplicable struct{}
+
+func (notApplicable) Error() string {
+	return ""
+}
+
+// Analyzer implements gps.ProjectAnalyzer. We inject the Analyzer into a
+// gps.SourceManager, and it reports manifest and lock information to the
+// SourceManager on request.
+type Analyzer struct{}
+
+func (a Analyzer) GetInfo(root string, pn gps.ProjectRoot) (gps.Manifest, gps.Lock, error) {
+	// this check should be unnecessary, but keeping it for now as a canary
+	if _, err := os.Lstat(root); err != nil {
+		return nil, nil, fmt.Errorf("No directory exists at %s; cannot produce ProjectInfo", root)
+	}
+
+	m, l, err := a.lookForGlide(root)
+	if err == nil {
+		// TODO verify project name is same as what SourceManager passed in?
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// The happy path of finding a glide manifest and/or lock file failed. Now,
+	// we begin our descent: we must attempt to divine just exactly *which*
+	// circle of hell we're in.
+
+	// Try godep first
+	m, l, err = a.lookForGodep(root)
+	if err == nil {
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// Next, gpm
+	m, l, err = a.lookForGPM(root)
+	if err == nil {
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// Next, gb
+	m, l, err = a.lookForGb(root)
+	if err == nil {
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// Next, gom
+	m, l, err = a.lookForGom(root)
+	if err == nil {
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// If none of our parsers matched, but none had actual errors, then we just
+	// go hands-off; gps itself will do the source analysis and use the Any
+	// constraint for all discovered package.
+	return nil, nil, nil
+}
+
+func (a Analyzer) lookForGlide(root string) (gps.Manifest, gps.Lock, error) {
+	mpath := filepath.Join(root, gpath.GlideFile)
+	if _, err := os.Lstat(mpath); err != nil {
+		return nil, nil, notApplicable{}
+	}
+	// Manifest found, so from here on, we're locked in - a returned error will
+	// make it back to the SourceManager
+
+	yml, err := ioutil.ReadFile(mpath)
+	if err != nil {
+		return nil, nil, fmt.Errorf("Error while reading glide manifest data: %s", root)
+	}
+
+	m, err := cfg.ConfigFromYaml(yml)
+	if err != nil {
+		return nil, nil, fmt.Errorf("Error while parsing glide manifest data: %s", root)
+	}
+
+	// Manifest found, read, and parsed - we're on the happy path. Whether we
+	// find a lock or not, we will produce a valid result back to the
+	// SourceManager.
+	lpath := filepath.Join(root, gpath.LockFile)
+	if _, err := os.Lstat(lpath); err != nil {
+		return m, nil, nil
+	}
+
+	yml, err = ioutil.ReadFile(mpath)
+	if err != nil {
+		return m, nil, nil
+	}
+
+	l, err := cfg.LockfileFromYaml(yml)
+	if err != nil {
+		return m, nil, nil
+	}
+
+	return m, l, nil
+}
+
+func (a Analyzer) lookForGodep(root string) (gps.Manifest, gps.Lock, error) {
+	if !godep.Has(root) {
+		return nil, nil, notApplicable{}
+	}
+
+	d, l, err := godep.AsMetadataPair(root)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return &cfg.Config{ProjectRoot: root, Imports: d}, l, nil
+}
+
+func (a Analyzer) lookForGPM(root string) (gps.Manifest, gps.Lock, error) {
+	if !gpm.Has(root) {
+		return nil, nil, notApplicable{}
+	}
+
+	d, l, err := gpm.AsMetadataPair(root)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return &cfg.Config{ProjectRoot: root, Imports: d}, l, nil
+}
+
+func (a Analyzer) lookForGb(root string) (gps.Manifest, gps.Lock, error) {
+	if !gpm.Has(root) {
+		return nil, nil, notApplicable{}
+	}
+
+	d, l, err := gb.AsMetadataPair(root)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return &cfg.Config{ProjectRoot: root, Imports: d}, l, nil
+}
+
+func (a Analyzer) lookForGom(root string) (gps.Manifest, gps.Lock, error) {
+	if !gpm.Has(root) {
+		return nil, nil, notApplicable{}
+	}
+
+	return gom.AsMetadataPair(root)
+}
diff --git a/gb/gb.go b/gb/gb.go
index 7837a42..ea64258 100644
--- a/gb/gb.go
+++ b/gb/gb.go
@@ -14,8 +14,8 @@
 // Has returns true if this dir has a GB-flavored manifest file.
 func Has(dir string) bool {
 	path := filepath.Join(dir, "vendor/manifest")
-	_, err := os.Stat(path)
-	return err == nil
+	fi, err := os.Stat(path)
+	return err == nil && !fi.IsDir()
 }
 
 // Parse parses a GB-flavored manifest file.
@@ -69,3 +69,42 @@
 	}
 	return buf, nil
 }
+
+// AsMetadataPair attempts to extract manifest and lock data from gb metadata.
+func AsMetadataPair(dir string) (m []*cfg.Dependency, l *cfg.Lockfile, err error) {
+	path := filepath.Join(dir, "vendor/manifest")
+	if _, err = os.Stat(path); err != nil {
+		return
+	}
+
+	file, err := os.Open(path)
+	if err != nil {
+		return
+	}
+	defer file.Close()
+
+	man := Manifest{}
+
+	dec := json.NewDecoder(file)
+	if err = dec.Decode(&man); err != nil {
+		return
+	}
+
+	seen := map[string]bool{}
+
+	for _, d := range man.Dependencies {
+		pkg, _ := util.NormalizeName(d.Importpath)
+		if _, ok := seen[pkg]; ok {
+			seen[pkg] = true
+			dep := &cfg.Dependency{
+				Name: pkg,
+				// TODO we have the branch info here - maybe we should use that
+				Reference:  "*",
+				Repository: d.Repository,
+			}
+			m = append(m, dep)
+			l.Imports = append(l.Imports, &cfg.Lock{Name: pkg, Version: d.Revision})
+		}
+	}
+	return
+}
diff --git a/glide.go b/glide.go
index 1190885..b7b2260 100644
--- a/glide.go
+++ b/glide.go
@@ -202,50 +202,18 @@
    will be removed when most Godeps users have migrated to using the vendor
    folder.`,
 			Flags: []cli.Flag{
-				cli.BoolFlag{
-					Name:  "test",
-					Usage: "Add test dependencies.",
-				},
-				cli.BoolFlag{
-					Name:  "insecure",
-					Usage: "Use http:// rather than https:// to retrieve pacakges.",
-				},
-				cli.BoolFlag{
-					Name:  "no-recursive, quick",
-					Usage: "Disable updating dependencies' dependencies.",
-				},
-				cli.BoolFlag{
-					Name:  "force",
-					Usage: "If there was a change in the repo or VCS switch to new one. Warning, changes will be lost.",
-				},
-				cli.BoolFlag{
-					Name:  "all-dependencies",
-					Usage: "This will resolve all dependencies for all packages, not just those directly used.",
-				},
-				cli.BoolFlag{
-					Name:  "update-vendored, u",
-					Usage: "Update vendored packages (without local VCS repo). Warning, changes will be lost.",
-				},
-				cli.BoolFlag{
-					Name:  "cache",
-					Usage: "When downloading dependencies attempt to cache them.",
-				},
-				cli.BoolFlag{
-					Name:  "cache-gopath",
-					Usage: "When downloading dependencies attempt to put them in the GOPATH, too.",
-				},
-				cli.BoolFlag{
-					Name:  "use-gopath",
-					Usage: "Copy dependencies from the GOPATH if they exist there.",
-				},
-				cli.BoolFlag{
-					Name:  "resolve-current",
-					Usage: "Resolve dependencies for only the current system rather than all build modes.",
-				},
-				cli.BoolFlag{
-					Name:  "strip-vcs, s",
-					Usage: "Removes version control metadata (e.g, .git directory) from the vendor folder.",
-				},
+				//cli.BoolFlag{
+				//Name:  "insecure",
+				//Usage: "Use http:// rather than https:// to retrieve packages.",
+				//},
+				//cli.BoolFlag{
+				//Name:  "cache-gopath",
+				//Usage: "When downloading dependencies attempt to put them in the GOPATH, too.",
+				//},
+				//cli.BoolFlag{
+				//Name:  "resolve-current",
+				//Usage: "Resolve dependencies for only the current system rather than all build modes.",
+				//},
 				cli.BoolFlag{
 					Name:  "strip-vendor, v",
 					Usage: "Removes nested vendor and Godeps/_workspace directories. Requires --strip-vcs.",
@@ -254,37 +222,25 @@
 					Name:  "non-interactive",
 					Usage: "Disable interactive prompts.",
 				},
-				cli.BoolFlag{
-					Name:  "skip-test",
-					Usage: "Resolve dependencies in test files.",
-				},
 			},
 			Action: func(c *cli.Context) {
-				if c.Bool("strip-vendor") && !c.Bool("strip-vcs") {
-					msg.Die("--strip-vendor cannot be used without --strip-vcs")
-				}
-
 				if len(c.Args()) < 1 {
 					fmt.Println("Oops! Package name is required.")
 					os.Exit(1)
 				}
 
-				if c.Bool("resolve-current") {
-					util.ResolveCurrent = true
-					msg.Warn("Only resolving dependencies for the current OS/Arch")
-				}
+				//if c.Bool("resolve-current") {
+				//util.ResolveCurrent = true
+				//msg.Warn("Only resolving dependencies for the current OS/Arch")
+				//}
 
 				inst := repo.NewInstaller()
-				inst.Force = c.Bool("force")
-				inst.UseCache = c.Bool("cache")
-				inst.UseGopath = c.Bool("use-gopath")
-				inst.UseCacheGopath = c.Bool("cache-gopath")
-				inst.UpdateVendored = c.Bool("update-vendored")
-				inst.ResolveAllFiles = c.Bool("all-dependencies")
-				inst.ResolveTest = !c.Bool("skip-test")
+				inst.Home = gpath.Home()
+				//inst.UseCacheGopath = c.Bool("cache-gopath")
+				//inst.ResolveAllFiles = c.Bool("all-dependencies")
 				packages := []string(c.Args())
-				insecure := c.Bool("insecure")
-				action.Get(packages, inst, insecure, c.Bool("no-recursive"), c.Bool("strip-vcs"), c.Bool("strip-vendor"), c.Bool("non-interactive"), c.Bool("test"))
+				//insecure := c.Bool("insecure")
+				action.Get(packages, inst, c.Bool("strip-vendor"), c.Bool("non-interactive"))
 			},
 		},
 		{
@@ -421,49 +377,27 @@
 			Name:      "install",
 			ShortName: "i",
 			Usage:     "Install a project's dependencies",
-			Description: `This uses the native VCS of each packages to install
-   the appropriate version. There are two ways a projects dependencies can
-   be installed. When there is a glide.yaml file defining the dependencies but
-   no lock file (glide.lock) the dependencies are installed using the "update"
-   command and a glide.lock file is generated pinning all dependencies. If a
-   glide.lock file is already present the dependencies are installed or updated
-   from the lock file.`,
+			Description: `This uses the native VCS of each package to install the appropriate version into
+   the vendor directory adjacent to glide.yaml. Installs are always performed
+   from a lock file, which contains pinned, immutable versions. If no lock file
+   exists, glide will compute one first, then run the install (unless
+   --install-only is passed).`,
 			Flags: []cli.Flag{
 				cli.BoolFlag{
-					Name:  "delete",
-					Usage: "Delete vendor packages not specified in config.",
+					Name:  "install-only",
+					Usage: "Install only if a glide.lock file already exists; otherwise, an error is thrown.",
 				},
 				cli.BoolFlag{
-					Name:  "force",
-					Usage: "If there was a change in the repo or VCS switch to new one. Warning: changes will be lost.",
-				},
-				cli.BoolFlag{
-					Name:  "update-vendored, u",
-					Usage: "Update vendored packages (without local VCS repo). Warning: this may destroy local modifications to vendor/.",
-				},
-				cli.StringFlag{
-					Name:  "file, f",
-					Usage: "Save all of the discovered dependencies to a Glide YAML file. (DEPRECATED: This has no impact.)",
-				},
-				cli.BoolFlag{
-					Name:  "cache",
-					Usage: "When downloading dependencies attempt to cache them.",
+					Name:  "synced-only",
+					Usage: "Install only if the glide.lock file is in sync with the glide.yaml, otherwise exit with an error. (Implies --install-only)",
 				},
 				cli.BoolFlag{
 					Name:  "cache-gopath",
 					Usage: "When downloading dependencies attempt to put them in the GOPATH, too.",
 				},
 				cli.BoolFlag{
-					Name:  "use-gopath",
-					Usage: "Copy dependencies from the GOPATH if they exist there.",
-				},
-				cli.BoolFlag{
-					Name:  "strip-vcs, s",
-					Usage: "Removes version control metadata (e.g, .git directory) from the vendor folder.",
-				},
-				cli.BoolFlag{
 					Name:  "strip-vendor, v",
-					Usage: "Removes nested vendor and Godeps/_workspace directories. Requires --strip-vcs.",
+					Usage: "Removes nested vendor and Godeps/_workspace directories.",
 				},
 				cli.BoolFlag{
 					Name:  "skip-test",
@@ -471,21 +405,11 @@
 				},
 			},
 			Action: func(c *cli.Context) {
-				if c.Bool("strip-vendor") && !c.Bool("strip-vcs") {
-					msg.Die("--strip-vendor cannot be used without --strip-vcs")
-				}
-
 				installer := repo.NewInstaller()
-				installer.Force = c.Bool("force")
-				installer.UseCache = c.Bool("cache")
-				installer.UseGopath = c.Bool("use-gopath")
 				installer.UseCacheGopath = c.Bool("cache-gopath")
-				installer.UpdateVendored = c.Bool("update-vendored")
-				installer.Home = c.GlobalString("home")
-				installer.DeleteUnused = c.Bool("delete")
-				installer.ResolveTest = !c.Bool("skip-test")
+				installer.Home = gpath.Home()
 
-				action.Install(installer, c.Bool("strip-vcs"), c.Bool("strip-vendor"))
+				action.Install(installer, c.Bool("install-only"), c.Bool("synced-only"), c.Bool("strip-vendor"))
 			},
 		},
 		{
@@ -493,9 +417,9 @@
 			ShortName: "up",
 			Usage:     "Update a project's dependencies",
 			Description: `This uses the native VCS of each package to try to
-   pull the most applicable updates. Packages with fixed refs (Versions or
-   tags) will not be updated. Packages with no ref or with a branch ref will
-   be updated as expected.
+   pull the most applicable updates. If no arguments are provided, then glide
+   will attempt to update all dependencies. If package names are provided, then
+   glide will attempt to find a solution where only those packages are changed.
 
    If a dependency has a glide.yaml file, update will read that file and
    update those dependencies accordingly. Those dependencies are maintained in
@@ -527,55 +451,24 @@
 
    By default, packages that are discovered are considered transient, and are
    not stored in the glide.yaml file. The --file=NAME.yaml flag allows you
-   to save the discovered dependencies to a YAML file.`,
+   to save the discovered dependencies to a YAML file.
+   `,
 			Flags: []cli.Flag{
 				cli.BoolFlag{
-					Name:  "delete",
-					Usage: "Delete vendor packages not specified in config.",
-				},
-				cli.BoolFlag{
-					Name:  "no-recursive, quick",
-					Usage: "Disable updating dependencies' dependencies. Only update things in glide.yaml.",
-				},
-				cli.BoolFlag{
-					Name:  "force",
-					Usage: "If there was a change in the repo or VCS switch to new one. Warning, changes will be lost.",
-				},
-				cli.BoolFlag{
 					Name:  "all-dependencies",
 					Usage: "This will resolve all dependencies for all packages, not just those directly used.",
 				},
 				cli.BoolFlag{
-					Name:  "update-vendored, u",
-					Usage: "Update vendored packages (without local VCS repo). Warning, changes will be lost.",
-				},
-				cli.StringFlag{
-					Name:  "file, f",
-					Usage: "Save all of the discovered dependencies to a Glide YAML file.",
-				},
-				cli.BoolFlag{
-					Name:  "cache",
-					Usage: "When downloading dependencies attempt to cache them.",
-				},
-				cli.BoolFlag{
 					Name:  "cache-gopath",
 					Usage: "When downloading dependencies attempt to put them in the GOPATH, too.",
 				},
 				cli.BoolFlag{
-					Name:  "use-gopath",
-					Usage: "Copy dependencies from the GOPATH if they exist there.",
-				},
-				cli.BoolFlag{
 					Name:  "resolve-current",
 					Usage: "Resolve dependencies for only the current system rather than all build modes.",
 				},
 				cli.BoolFlag{
-					Name:  "strip-vcs, s",
-					Usage: "Removes version control metadata (e.g, .git directory) from the vendor folder.",
-				},
-				cli.BoolFlag{
 					Name:  "strip-vendor, v",
-					Usage: "Removes nested vendor and Godeps/_workspace directories. Requires --strip-vcs.",
+					Usage: "Removes nested vendor and Godeps/_workspace directories.",
 				},
 				cli.BoolFlag{
 					Name:  "skip-test",
@@ -583,27 +476,17 @@
 				},
 			},
 			Action: func(c *cli.Context) {
-				if c.Bool("strip-vendor") && !c.Bool("strip-vcs") {
-					msg.Die("--strip-vendor cannot be used without --strip-vcs")
-				}
-
 				if c.Bool("resolve-current") {
 					util.ResolveCurrent = true
 					msg.Warn("Only resolving dependencies for the current OS/Arch")
 				}
 
 				installer := repo.NewInstaller()
-				installer.Force = c.Bool("force")
-				installer.UseCache = c.Bool("cache")
-				installer.UseGopath = c.Bool("use-gopath")
 				installer.UseCacheGopath = c.Bool("cache-gopath")
-				installer.UpdateVendored = c.Bool("update-vendored")
 				installer.ResolveAllFiles = c.Bool("all-dependencies")
-				installer.Home = c.GlobalString("home")
-				installer.DeleteUnused = c.Bool("delete")
-				installer.ResolveTest = !c.Bool("skip-test")
+				installer.Home = gpath.Home()
 
-				action.Update(installer, c.Bool("no-recursive"), c.Bool("strip-vcs"), c.Bool("strip-vendor"))
+				action.Update(installer, c.Bool("strip-vendor"), []string(c.Args()))
 			},
 		},
 		{
diff --git a/glide.lock b/glide.lock
index c869e2f..49bed7b 100644
--- a/glide.lock
+++ b/glide.lock
@@ -1,12 +1,18 @@
-hash: 0653c17bcbf6f1df79990f3d2211dbcbc920ca528c513b00f5cab0a508c984ab
-updated: 2016-06-30T10:51:49.633776379-04:00
+hash: 7b0d46d2b21d5d8ff24023a402285f87b14b9f554ae52913cc7ea08bfd17453d
+updated: 2016-07-13T23:09:11.03428654-04:00
 imports:
+- name: github.com/armon/go-radix
+  version: 4239b77079c7b5d1243b7b4736304ce8ddb6f0f2
 - name: github.com/codegangsta/cli
   version: 71f57d300dd6a780ac1856c005c4b518cfd498ec
 - name: github.com/Masterminds/semver
-  version: 8d0431362b544d1a3536cca26684828866a7de09
+  version: b3ef6b1808e9889dfb8767ce7068db923a3d07de
 - name: github.com/Masterminds/vcs
   version: fbe9fb6ad5b5f35b3e82a7c21123cfc526cbf895
+- name: github.com/sdboyer/gps
+  version: a868c10855893c21ed05d0f50d6f9acb12b6366d
+- name: github.com/termie/go-shutil
+  version: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
 - name: gopkg.in/yaml.v2
   version: a83829b6f1293c91addabc89d0571c246397bbf4
 testImports: []
diff --git a/glide.yaml b/glide.yaml
index dfc97e9..cb185ed 100644
--- a/glide.yaml
+++ b/glide.yaml
@@ -10,9 +10,12 @@
   homepage: https://www.mattfarina.com/
 import:
 - package: gopkg.in/yaml.v2
+  branch: v2
 - package: github.com/Masterminds/vcs
   version: ^1.8.0
 - package: github.com/codegangsta/cli
   version: ~1.14.0
 - package: github.com/Masterminds/semver
-  version: ^1.1.1
+  branch: 2.x
+- package: github.com/sdboyer/gps
+  branch: master
diff --git a/godep/godep.go b/godep/godep.go
index 71291b3..2b9ea41 100644
--- a/godep/godep.go
+++ b/godep/godep.go
@@ -99,6 +99,52 @@
 	return buf, nil
 }
 
+func AsMetadataPair(dir string) ([]*cfg.Dependency, *cfg.Lockfile, error) {
+	path := filepath.Join(dir, "Godeps/Godeps.json")
+	if _, err := os.Stat(path); err != nil {
+		return nil, nil, err
+	}
+
+	var m []*cfg.Dependency
+	l := &cfg.Lockfile{}
+	godeps := &Godeps{}
+
+	// Get a handle to the file.
+	file, err := os.Open(path)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer file.Close()
+
+	dec := json.NewDecoder(file)
+	if err := dec.Decode(godeps); err != nil {
+		return nil, nil, err
+	}
+
+	seen := map[string]bool{}
+	for _, d := range godeps.Deps {
+		pkg, _ := util.NormalizeName(d.ImportPath)
+		if _, ok := seen[pkg]; !ok {
+			seen[pkg] = true
+
+			// Place no real *actual* constraint on the project; instead, we
+			// rely on gps using the 'preferred' version mechanism by
+			// working from the lock file. Without this, users would end up with
+			// the same mind-numbing diamond dep problems as currently exist.
+			// This approach does make for an uncomfortably wide possibility
+			// space where deps aren't getting what they expect, but that's
+			// better than just having the solver give up completely.
+			m = append(m, &cfg.Dependency{Name: pkg, Reference: "*"})
+			l.Imports = append(l.Imports, &cfg.Lock{Name: pkg, Version: d.Rev})
+
+			// TODO this fails to differentiate between dev and non-dev imports;
+			// need static analysis for that
+		}
+	}
+
+	return m, l, nil
+}
+
 // RemoveGodepSubpackages strips subpackages from a cfg.Config dependencies that
 // contain "Godeps/_workspace/src" as part of the path.
 func RemoveGodepSubpackages(c *cfg.Config) *cfg.Config {
diff --git a/gom/gom.go b/gom/gom.go
index 51910b0..05785e6 100644
--- a/gom/gom.go
+++ b/gom/gom.go
@@ -9,6 +9,7 @@
 	"github.com/Masterminds/glide/msg"
 	gpath "github.com/Masterminds/glide/path"
 	"github.com/Masterminds/glide/util"
+	"github.com/sdboyer/gps"
 )
 
 // Has returns true if this dir has a Gomfile.
@@ -90,6 +91,94 @@
 	return buf, nil
 }
 
+// AsMetadataPair attempts to extract manifest and lock data from gom metadata.
+func AsMetadataPair(dir string) (gps.Manifest, gps.Lock, error) {
+	path := filepath.Join(dir, "Gomfile")
+	if _, err := os.Stat(path); err != nil {
+		return nil, nil, err
+	}
+
+	goms, err := parseGomfile(path)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var l gps.SimpleLock
+	m := gps.SimpleManifest{}
+
+	for _, gom := range goms {
+		// Do we need to skip this dependency?
+		if val, ok := gom.options["skipdep"]; ok && val.(string) == "true" {
+			continue
+		}
+
+		// Check for custom cloning command
+		if _, ok := gom.options["command"]; ok {
+			return nil, nil, errors.New("Glide does not support custom Gomfile commands")
+		}
+
+		// Check for groups/environments
+		if val, ok := gom.options["group"]; ok {
+			groups := toStringSlice(val)
+			if !stringsContain(groups, "development") && !stringsContain(groups, "production") {
+				// right now we only support development and production
+				continue
+			}
+		}
+
+		pkg, _ := util.NormalizeName(gom.name)
+
+		dep := gps.ProjectConstraint{
+			Ident: gps.ProjectIdentifier{
+				ProjectRoot: gps.ProjectRoot(pkg),
+			},
+		}
+
+		// Our order of preference for things to put in the manifest are
+		//   - Semver
+		//   - Version
+		//   - Branch
+		//   - Revision
+
+		var v gps.UnpairedVersion
+		if val, ok := gom.options["tag"]; ok {
+			body := val.(string)
+			v = gps.NewVersion(body)
+			c, err := gps.NewSemverConstraint(body)
+			if err != nil {
+				c = gps.NewVersion(body)
+			}
+			dep.Constraint = c
+		} else if val, ok := gom.options["branch"]; ok {
+			body := val.(string)
+			v = gps.NewBranch(body)
+			dep.Constraint = gps.NewBranch(body)
+		}
+
+		if val, ok := gom.options["commit"]; ok {
+			body := val.(string)
+			if v != nil {
+				v.Is(gps.Revision(body))
+				l = append(l, gps.NewLockedProject(gps.ProjectRoot(dir), v, dir, nil))
+			} else {
+				// As with the other third-party system integrations, we're
+				// going to choose not to put revisions into a manifest, even
+				// though gom has a lot more information than most and the
+				// argument could be made for it.
+				dep.Constraint = gps.Any()
+				l = append(l, gps.NewLockedProject(gps.ProjectRoot(dir), gps.Revision(body), dir, nil))
+			}
+		} else if v != nil {
+			// This is kinda uncomfortable - lock w/no immut - but OK
+			l = append(l, gps.NewLockedProject(gps.ProjectRoot(dir), v, dir, nil))
+		}
+
+		// TODO We ignore GOOS, GOARCH for now
+	}
+
+	return m, l, nil
+}
+
 func stringsContain(v []string, key string) bool {
 	for _, s := range v {
 		if s == key {
diff --git a/gpm/gpm.go b/gpm/gpm.go
index e58a81d..00ca864 100644
--- a/gpm/gpm.go
+++ b/gpm/gpm.go
@@ -5,6 +5,7 @@
 
 import (
 	"bufio"
+	"fmt"
 	"os"
 	"path/filepath"
 	"strings"
@@ -58,6 +59,41 @@
 	return buf, nil
 }
 
+func AsMetadataPair(dir string) ([]*cfg.Dependency, *cfg.Lockfile, error) {
+	path := filepath.Join(dir, "Godeps")
+	if i, err := os.Stat(path); err != nil {
+		return nil, nil, err
+	} else if i.IsDir() {
+		return nil, nil, fmt.Errorf("Found a Godeps dir, rather than it being a file")
+	}
+
+	var m []*cfg.Dependency
+	l := &cfg.Lockfile{}
+
+	file, err := os.Open(path)
+	if err != nil {
+		return nil, nil, err
+	}
+	scanner := bufio.NewScanner(file)
+	for scanner.Scan() {
+		parts, ok := parseGodepsLine(scanner.Text())
+		if ok {
+			// Place no actual constraint on the project; rely instead on
+			// gps's 'preferred version' reasoning from deps' lock
+			// files...if we have one at all.
+			if len(parts) > 1 {
+				l.Imports = append(l.Imports, &cfg.Lock{Name: parts[0], Version: parts[1]})
+			}
+			m = append(m, &cfg.Dependency{Name: parts[0], Reference: "*"})
+		}
+	}
+	if err := scanner.Err(); err != nil {
+		return nil, nil, err
+	}
+
+	return m, l, nil
+}
+
 func parseGodepsLine(line string) ([]string, bool) {
 	line = strings.TrimSpace(line)
 
diff --git a/repo/installer.go b/repo/installer.go
index 5fd8562..0d6a335 100644
--- a/repo/installer.go
+++ b/repo/installer.go
@@ -90,7 +90,7 @@
 	// Create a config setup based on the Lockfile data to process with
 	// existing commands.
 	newConf := &cfg.Config{}
-	newConf.Name = conf.Name
+	newConf.ProjectRoot = conf.ProjectRoot
 
 	newConf.Imports = make(cfg.Dependencies, len(lock.Imports))
 	for k, v := range lock.Imports {
@@ -449,7 +449,7 @@
 func (m *MissingPackageHandler) NotFound(pkg string, addTest bool) (bool, error) {
 	root := util.GetRootFromPackage(pkg)
 	// Skip any references to the root package.
-	if root == m.Config.Name {
+	if root == m.Config.ProjectRoot {
 		return false, nil
 	}
 
@@ -516,7 +516,7 @@
 	root := util.GetRootFromPackage(pkg)
 
 	// Skip any references to the root package.
-	if root == m.Config.Name {
+	if root == m.Config.ProjectRoot {
 		return false, nil
 	}
 
@@ -546,7 +546,7 @@
 func (m *MissingPackageHandler) InVendor(pkg string, addTest bool) error {
 	root := util.GetRootFromPackage(pkg)
 	// Skip any references to the root package.
-	if root == m.Config.Name {
+	if root == m.Config.ProjectRoot {
 		return nil
 	}
 
@@ -606,7 +606,7 @@
 	root := util.GetRootFromPackage(pkg)
 
 	// Skip any references to the root package.
-	if root == d.Config.Name {
+	if root == d.Config.ProjectRoot {
 		return nil
 	}
 
@@ -643,7 +643,7 @@
 	root := util.GetRootFromPackage(pkg)
 
 	// Skip any references to the root package.
-	if root == d.Config.Name {
+	if root == d.Config.ProjectRoot {
 		return nil
 	}
 
@@ -750,7 +750,7 @@
 			return v
 		}
 
-		if con.Check(ver) {
+		if con.Matches(ver) == nil {
 			singleInfo("Keeping %s %s because it fits constraint '%s'", v.Name, v.Reference, dep.Reference)
 			return v
 		}
@@ -774,7 +774,7 @@
 			return v
 		}
 
-		if con.Check(ver) {
+		if con.Matches(ver) == nil {
 			v.Reference = dep.Reference
 			singleInfo("Using %s %s because it fits constraint '%s'", v.Name, v.Reference, v.Reference)
 			return v
diff --git a/repo/vcs.go b/repo/vcs.go
index 3f5f80d..ba84ca2 100644
--- a/repo/vcs.go
+++ b/repo/vcs.go
@@ -234,7 +234,7 @@
 			sort.Sort(sort.Reverse(semver.Collection(semvers)))
 			found := false
 			for _, v := range semvers {
-				if constraint.Check(v) {
+				if constraint.Matches(v) == nil {
 					found = true
 					// If the constrint passes get the original reference
 					ver = v.Original()
diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md
index c3808ea..2555067 100644
--- a/vendor/github.com/Masterminds/semver/CHANGELOG.md
+++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md
@@ -1,13 +1,9 @@
-# Release 1.1.1 (2016-06-30)
+# Release 1.x.x (xxxx-xx-xx)
 
-## Changed
 - Issue #9: Speed up version comparison performance (thanks @sdboyer)
 - Issue #8: Added benchmarks (thanks @sdboyer)
-- Updated Go Report Card URL to new location
-- Updated Readme to add code snippet formatting (thanks @mh-cbon)
-- Updating tagging to v[SemVer] structure for compatibility with other tools.
 
-# Release 1.1.0 (2016-03-11)
+# Release 1.1.0 (2015-03-11)
 
 - Issue #2: Implemented validation to provide reasons a versions failed a
   constraint.
diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md
index 1edec7a..aa133ea 100644
--- a/vendor/github.com/Masterminds/semver/README.md
+++ b/vendor/github.com/Masterminds/semver/README.md
@@ -7,15 +7,13 @@
 * Check if a semantic version fits within a set of constraints
 * Optionally work with a `v` prefix
 
-[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.png)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
+[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.png)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](http://goreportcard.com/badge/Masterminds/semver)](http://goreportcard.com/report/Masterminds/semver)
 
 ## Parsing Semantic Versions
 
 To parse a semantic version use the `NewVersion` function. For example,
 
-```go
     v, err := semver.NewVersion("1.2.3-beta.1+build345")
-```
 
 If there is an error the version wasn't parseable. The version object has methods
 to get the parts of the version, compare it to other versions, convert the
@@ -27,7 +25,6 @@
 A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/)
 package from the standard library. For example,
 
-```go
     raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
     vs := make([]*semver.Version, len(raw))
 	for i, r := range raw {
@@ -40,14 +37,12 @@
 	}
 
 	sort.Sort(semver.Collection(vs))
-```
 
 ## Checking Version Constraints
 
 Checking a version against version constraints is one of the most featureful
 parts of the package.
 
-```go
     c, err := semver.NewConstraint(">= 1.2.3")
     if err != nil {
         // Handle constraint not being parseable.
@@ -59,7 +54,6 @@
     }
     // Check if the version meets the constraints. The a variable will be true.
     a := c.Check(v)
-```
 
 ## Basic Comparisons
 
@@ -125,7 +119,6 @@
 against a constraint. When validation fails a slice of errors containing why a
 version didn't meet the constraint is returned. For example,
 
-```go
     c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
     if err != nil {
         // Handle constraint not being parseable.
@@ -146,7 +139,6 @@
         // "1.3 is greater than 1.2.3"
         // "1.3 is less than 1.4"
     }
-```
 
 # Contribute
 
diff --git a/vendor/github.com/Masterminds/semver/benchmark_test.go b/vendor/github.com/Masterminds/semver/benchmark_test.go
index 58a5c28..5a76f6a 100644
--- a/vendor/github.com/Masterminds/semver/benchmark_test.go
+++ b/vendor/github.com/Masterminds/semver/benchmark_test.go
@@ -1,16 +1,53 @@
-package semver_test
+package semver
 
-import (
-	"testing"
+import "testing"
 
-	"github.com/Masterminds/semver"
+func init() {
+	// disable constraint and version creation caching
+	CacheConstraints = false
+	CacheVersions = false
+}
+
+var (
+	rc1 = rangeConstraint{
+		min:        newV(1, 5, 0),
+		max:        newV(2, 0, 0),
+		includeMax: true,
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	rc3 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc4 = rangeConstraint{
+		min: newV(1, 7, 0),
+		max: newV(4, 0, 0),
+	}
+	rc5 = rangeConstraint{
+		min: newV(2, 7, 0),
+		max: newV(3, 0, 0),
+	}
+	rc6 = rangeConstraint{
+		min: newV(3, 0, 1),
+		max: newV(3, 0, 4),
+	}
+	rc7 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(1, 2, 0),
+	}
+	// Two fully non-overlapping unions
+	u1 = rc1.Union(rc7)
+	u2 = rc5.Union(rc6)
 )
 
 /* Constraint creation benchmarks */
 
 func benchNewConstraint(c string, b *testing.B) {
 	for i := 0; i < b.N; i++ {
-		semver.NewConstraint(c)
+		NewConstraint(c)
 	}
 }
 
@@ -38,52 +75,17 @@
 	benchNewConstraint("~2.0.0 || =3.1.0", b)
 }
 
-/* Check benchmarks */
-
-func benchCheckVersion(c, v string, b *testing.B) {
-	version, _ := semver.NewVersion(v)
-	constraint, _ := semver.NewConstraint(c)
-
-	for i := 0; i < b.N; i++ {
-		constraint.Check(version)
-	}
-}
-
-func BenchmarkCheckVersionUnary(b *testing.B) {
-	benchCheckVersion("=2.0", "2.0.0", b)
-}
-
-func BenchmarkCheckVersionTilde(b *testing.B) {
-	benchCheckVersion("~2.0.0", "2.0.5", b)
-}
-
-func BenchmarkCheckVersionCaret(b *testing.B) {
-	benchCheckVersion("^2.0.0", "2.1.0", b)
-}
-
-func BenchmarkCheckVersionWildcard(b *testing.B) {
-	benchCheckVersion("1.x", "1.4.0", b)
-}
-
-func BenchmarkCheckVersionRange(b *testing.B) {
-	benchCheckVersion(">=2.1.x, <3.1.0", "2.4.5", b)
-}
-
-func BenchmarkCheckVersionUnion(b *testing.B) {
-	benchCheckVersion("~2.0.0 || =3.1.0", "3.1.0", b)
-}
+/* Validate benchmarks, including fails */
 
 func benchValidateVersion(c, v string, b *testing.B) {
-	version, _ := semver.NewVersion(v)
-	constraint, _ := semver.NewConstraint(c)
+	version, _ := NewVersion(v)
+	constraint, _ := NewConstraint(c)
 
 	for i := 0; i < b.N; i++ {
-		constraint.Validate(version)
+		constraint.Matches(version)
 	}
 }
 
-/* Validate benchmarks, including fails */
-
 func BenchmarkValidateVersionUnary(b *testing.B) {
 	benchValidateVersion("=2.0", "2.0.0", b)
 }
@@ -136,7 +138,7 @@
 
 func benchNewVersion(v string, b *testing.B) {
 	for i := 0; i < b.N; i++ {
-		semver.NewVersion(v)
+		NewVersion(v)
 	}
 }
 
@@ -155,3 +157,103 @@
 func BenchmarkNewVersionMetaDash(b *testing.B) {
 	benchNewVersion("1.0.0+metadata-dash", b)
 }
+
+/* Union benchmarks */
+
+func BenchmarkAdjacentRangeUnion(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Union(rc1, rc2)
+	}
+}
+
+func BenchmarkAdjacentRangeUnionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc1.Union(rc2)
+	}
+}
+
+func BenchmarkDisjointRangeUnion(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Union(rc2, rc3)
+	}
+}
+
+func BenchmarkDisjointRangeUnionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc2.Union(rc3)
+	}
+}
+
+func BenchmarkOverlappingRangeUnion(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Union(rc1, rc4)
+	}
+}
+
+func BenchmarkOverlappingRangeUnionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc1.Union(rc4)
+	}
+}
+
+func BenchmarkUnionUnion(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Union(u1, u2)
+	}
+}
+
+func BenchmarkUnionUnionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		u1.Union(u2)
+	}
+}
+
+/* Intersection benchmarks */
+
+func BenchmarkSubsetRangeIntersection(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Intersection(rc2, rc4)
+	}
+}
+
+func BenchmarkSubsetRangeIntersectionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc2.Intersect(rc4)
+	}
+}
+
+func BenchmarkDisjointRangeIntersection(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Intersection(rc2, rc3)
+	}
+}
+
+func BenchmarkDisjointRangeIntersectionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc2.Intersect(rc3)
+	}
+}
+
+func BenchmarkOverlappingRangeIntersection(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Intersection(rc1, rc4)
+	}
+}
+
+func BenchmarkOverlappingRangeIntersectionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc1.Intersect(rc4)
+	}
+}
+
+func BenchmarkUnionIntersection(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Intersection(u1, u2)
+	}
+}
+
+func BenchmarkUnionIntersectionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		u1.Intersect(u2)
+	}
+}
diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go
index b63f5f6..bf2f500 100644
--- a/vendor/github.com/Masterminds/semver/constraints.go
+++ b/vendor/github.com/Masterminds/semver/constraints.go
@@ -1,126 +1,39 @@
 package semver
 
 import (
-	"errors"
 	"fmt"
 	"regexp"
+	"sort"
 	"strings"
+	"sync"
 )
 
-// Constraints is one or more constraint that a semantic version can be
-// checked against.
-type Constraints struct {
-	constraints [][]*constraint
-}
-
-// NewConstraint returns a Constraints instance that a Version instance can
-// be checked against. If there is a parse error it will be returned.
-func NewConstraint(c string) (*Constraints, error) {
-
-	// Rewrite - ranges into a comparison operation.
-	c = rewriteRange(c)
-
-	ors := strings.Split(c, "||")
-	or := make([][]*constraint, len(ors))
-	for k, v := range ors {
-		cs := strings.Split(v, ",")
-		result := make([]*constraint, len(cs))
-		for i, s := range cs {
-			pc, err := parseConstraint(s)
-			if err != nil {
-				return nil, err
-			}
-
-			result[i] = pc
-		}
-		or[k] = result
-	}
-
-	o := &Constraints{constraints: or}
-	return o, nil
-}
-
-// Check tests if a version satisfies the constraints.
-func (cs Constraints) Check(v *Version) bool {
-	// loop over the ORs and check the inner ANDs
-	for _, o := range cs.constraints {
-		joy := true
-		for _, c := range o {
-			if !c.check(v) {
-				joy = false
-				break
-			}
-		}
-
-		if joy {
-			return true
-		}
-	}
-
-	return false
-}
-
-// Validate checks if a version satisfies a constraint. If not a slice of
-// reasons for the failure are returned in addition to a bool.
-func (cs Constraints) Validate(v *Version) (bool, []error) {
-	// loop over the ORs and check the inner ANDs
-	var e []error
-	for _, o := range cs.constraints {
-		joy := true
-		for _, c := range o {
-			if !c.check(v) {
-				em := fmt.Errorf(c.msg, v, c.orig)
-				e = append(e, em)
-				joy = false
-			}
-		}
-
-		if joy {
-			return true, []error{}
-		}
-	}
-
-	return false, e
-}
-
-var constraintOps map[string]cfunc
-var constraintMsg map[string]string
 var constraintRegex *regexp.Regexp
+var constraintRangeRegex *regexp.Regexp
+
+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
+	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
 
 func init() {
-	constraintOps = map[string]cfunc{
-		"":   constraintTildeOrEqual,
-		"=":  constraintTildeOrEqual,
-		"!=": constraintNotEqual,
-		">":  constraintGreaterThan,
-		"<":  constraintLessThan,
-		">=": constraintGreaterThanEqual,
-		"=>": constraintGreaterThanEqual,
-		"<=": constraintLessThanEqual,
-		"=<": constraintLessThanEqual,
-		"~":  constraintTilde,
-		"~>": constraintTilde,
-		"^":  constraintCaret,
-	}
-
-	constraintMsg = map[string]string{
-		"":   "%s is not equal to %s",
-		"=":  "%s is not equal to %s",
-		"!=": "%s is equal to %s",
-		">":  "%s is less than or equal to %s",
-		"<":  "%s is greater than or equal to %s",
-		">=": "%s is less than %s",
-		"=>": "%s is less than %s",
-		"<=": "%s is greater than %s",
-		"=<": "%s is greater than %s",
-		"~":  "%s does not have same major and minor version as %s",
-		"~>": "%s does not have same major and minor version as %s",
-		"^":  "%s does not have same major version as %s",
+	constraintOps := []string{
+		"",
+		"=",
+		"!=",
+		">",
+		"<",
+		">=",
+		"=>",
+		"<=",
+		"=<",
+		"~",
+		"~>",
+		"^",
 	}
 
 	ops := make([]string, 0, len(constraintOps))
-	for k := range constraintOps {
-		ops = append(ops, regexp.QuoteMeta(k))
+	for _, op := range constraintOps {
+		ops = append(ops, regexp.QuoteMeta(op))
 	}
 
 	constraintRegex = regexp.MustCompile(fmt.Sprintf(
@@ -133,210 +46,250 @@
 		cvRegex, cvRegex))
 }
 
-// An individual constraint
-type constraint struct {
-	// The callback function for the restraint. It performs the logic for
-	// the constraint.
-	function cfunc
+type Constraint interface {
+	// Constraints compose the fmt.Stringer interface. Printing a constraint
+	// will yield a string that, if passed to NewConstraint(), will produce the
+	// original constraint. (Bidirectional serialization)
+	fmt.Stringer
 
-	msg string
+	// Matches checks that a version satisfies the constraint. If it does not,
+	// an error is returned indcating the problem; if it does, the error is nil.
+	Matches(v *Version) error
 
-	// The version used in the constraint check. For example, if a constraint
-	// is '<= 2.0.0' the con a version instance representing 2.0.0.
-	con *Version
+	// Intersect computes the intersection between the receiving Constraint and
+	// passed Constraint, and returns a new Constraint representing the result.
+	Intersect(Constraint) Constraint
 
-	// The original parsed version (e.g., 4.x from != 4.x)
-	orig string
+	// Union computes the union between the receiving Constraint and the passed
+	// Constraint, and returns a new Constraint representing the result.
+	Union(Constraint) Constraint
 
-	// When an x is used as part of the version (e.g., 1.x)
-	minorDirty bool
-	dirty      bool
+	// MatchesAny returns a bool indicating whether there exists any version that
+	// satisfies both the receiver constraint, and the passed Constraint.
+	//
+	// In other words, this reports whether an intersection would be non-empty.
+	MatchesAny(Constraint) bool
+
+	// Restrict implementation of this interface to this package. We need the
+	// flexibility of an interface, but we cover all possibilities here; closing
+	// off the interface to external implementation lets us safely do tricks
+	// with types for magic types (none and any)
+	_private()
 }
 
-// Check if a version meets the constraint
-func (c *constraint) check(v *Version) bool {
-	return c.function(v, c)
+// realConstraint is used internally to differentiate between any, none, and
+// unionConstraints, vs. Version and rangeConstraints.
+type realConstraint interface {
+	Constraint
+	_real()
 }
 
-type cfunc func(v *Version, c *constraint) bool
+// Controls whether or not parsed constraints are cached
+var CacheConstraints = true
+var constraintCache = make(map[string]ccache)
+var constraintCacheLock sync.RWMutex
 
-func parseConstraint(c string) (*constraint, error) {
-	m := constraintRegex.FindStringSubmatch(c)
-	if m == nil {
-		return nil, fmt.Errorf("improper constraint: %s", c)
-	}
-
-	ver := m[2]
-	orig := ver
-	minorDirty := false
-	dirty := false
-	if isX(m[3]) {
-		ver = "0.0.0"
-		dirty = true
-	} else if isX(strings.TrimPrefix(m[4], ".")) {
-		minorDirty = true
-		dirty = true
-		ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
-	} else if isX(strings.TrimPrefix(m[5], ".")) {
-		dirty = true
-		ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
-	}
-
-	con, err := NewVersion(ver)
-	if err != nil {
-
-		// The constraintRegex should catch any regex parsing errors. So,
-		// we should never get here.
-		return nil, errors.New("constraint Parser Error")
-	}
-
-	cs := &constraint{
-		function:   constraintOps[m[1]],
-		msg:        constraintMsg[m[1]],
-		con:        con,
-		orig:       orig,
-		minorDirty: minorDirty,
-		dirty:      dirty,
-	}
-	return cs, nil
+type ccache struct {
+	c   Constraint
+	err error
 }
 
-// Constraint functions
-func constraintNotEqual(v *Version, c *constraint) bool {
-	if c.dirty {
-		if c.con.Major() != v.Major() {
-			return true
+// NewConstraint takes a string representing a set of semver constraints, and
+// returns a corresponding Constraint object. Constraints are suitable
+// for checking Versions for admissibility, or combining with other Constraint
+// objects.
+//
+// If an invalid constraint string is passed, more information is provided in
+// the returned error string.
+func NewConstraint(in string) (Constraint, error) {
+	if CacheConstraints {
+		constraintCacheLock.RLock()
+		if final, exists := constraintCache[in]; exists {
+			constraintCacheLock.RUnlock()
+			return final.c, final.err
 		}
-		if c.con.Minor() != v.Minor() && !c.minorDirty {
-			return true
-		} else if c.minorDirty {
-			return false
+		constraintCacheLock.RUnlock()
+	}
+
+	// Rewrite - ranges into a comparison operation.
+	c := rewriteRange(in)
+
+	ors := strings.Split(c, "||")
+	or := make([]Constraint, len(ors))
+	for k, v := range ors {
+		cs := strings.Split(v, ",")
+		result := make([]Constraint, len(cs))
+		for i, s := range cs {
+			pc, err := parseConstraint(s)
+			if err != nil {
+				if CacheConstraints {
+					constraintCacheLock.Lock()
+					constraintCache[in] = ccache{err: err}
+					constraintCacheLock.Unlock()
+				}
+				return nil, err
+			}
+
+			result[i] = pc
+		}
+		or[k] = Intersection(result...)
+	}
+
+	final := Union(or...)
+
+	if CacheConstraints {
+		constraintCacheLock.Lock()
+		constraintCache[in] = ccache{c: final}
+		constraintCacheLock.Unlock()
+	}
+
+	return final, nil
+}
+
+// Intersection computes the intersection between N Constraints, returning as
+// compact a representation of the intersection as possible.
+//
+// No error is indicated if all the sets are collectively disjoint; you must inspect the
+// return value to see if the result is the empty set (by calling IsNone() on
+// it).
+func Intersection(cg ...Constraint) Constraint {
+	// If there's zero or one constraints in the group, we can quit fast
+	switch len(cg) {
+	case 0:
+		// Zero members, only sane thing to do is return none
+		return None()
+	case 1:
+		// Just one member means that's our final constraint
+		return cg[0]
+	}
+
+	car, cdr := cg[0], cg[1:]
+	for _, c := range cdr {
+		if IsNone(car) {
+			return None()
+		}
+		car = car.Intersect(c)
+	}
+
+	return car
+}
+
+// Union takes a variable number of constraints, and returns the most compact
+// possible representation of those constraints.
+//
+// This effectively ORs together all the provided constraints. If any of the
+// included constraints are the set of all versions (any), that supercedes
+// everything else.
+func Union(cg ...Constraint) Constraint {
+	// If there's zero or one constraints in the group, we can quit fast
+	switch len(cg) {
+	case 0:
+		// Zero members, only sane thing to do is return none
+		return None()
+	case 1:
+		// One member, so the result will just be that
+		return cg[0]
+	}
+
+	// Preliminary pass to look for 'any' in the current set (and bail out early
+	// if found), but also construct a []realConstraint for everything else
+	var real constraintList
+
+	for _, c := range cg {
+		switch tc := c.(type) {
+		case any:
+			return c
+		case none:
+			continue
+		case *Version:
+			//if tc != nil {
+			//heap.Push(&real, tc)
+			//}
+			real = append(real, tc)
+		case rangeConstraint:
+			//heap.Push(&real, tc)
+			real = append(real, tc)
+		case unionConstraint:
+			real = append(real, tc...)
+			//for _, c2 := range tc {
+			//heap.Push(&real, c2)
+			//}
+		default:
+			panic("unknown constraint type")
+		}
+	}
+	// TODO wtf why isn't heap working...so, ugh, have to do this
+
+	// Sort both the versions and ranges into ascending order
+	sort.Sort(real)
+
+	// Iteratively merge the constraintList elements
+	var nuc unionConstraint
+	for _, c := range real {
+		if len(nuc) == 0 {
+			nuc = append(nuc, c)
+			continue
 		}
 
-		return false
+		last := nuc[len(nuc)-1]
+		switch lt := last.(type) {
+		case *Version:
+			switch ct := c.(type) {
+			case *Version:
+				// Two versions in a row; only append if they're not equal
+				if !lt.Equal(ct) {
+					nuc = append(nuc, ct)
+				}
+			case rangeConstraint:
+				// Last was version, current is range. constraintList sorts by
+				// min version, so it's guaranteed that the version will be less
+				// than the range's min, guaranteeing that these are disjoint.
+				//
+				// ...almost. If the min of the range is the same as the
+				// version, then a union should merge the two by making the
+				// range inclusive at the bottom.
+				if lt.Equal(ct.min) {
+					ct.includeMin = true
+					nuc[len(nuc)-1] = ct
+				} else {
+					nuc = append(nuc, c)
+				}
+			}
+		case rangeConstraint:
+			switch ct := c.(type) {
+			case *Version:
+				// Last was range, current is version. constraintList sort invariants guarantee
+				// that the version will be greater than the min, so we have to
+				// determine if the version is less than the max. If it is, we
+				// subsume it into the range with a Union call.
+				//
+				// Lazy version: just union them and let rangeConstraint figure
+				// it out, then switch on the result type.
+				c2 := lt.Union(ct)
+				if crc, ok := c2.(realConstraint); ok {
+					nuc[len(nuc)-1] = crc
+				} else {
+					// Otherwise, all it can be is a union constraint. First
+					// item in the union will be the same range, second will be the
+					// version, so append onto nuc from one back from the end
+					nuc = append(nuc[:len(nuc)-1], c2.(unionConstraint)...)
+				}
+			case rangeConstraint:
+				if lt.MatchesAny(ct) || areAdjacent(lt, ct) {
+					// If the previous range overlaps or is adjacent to the
+					// current range, we know they'll be able to merge together,
+					// so overwrite the last item in nuc with the result of that
+					// merge (which is what Union will produce)
+					nuc[len(nuc)-1] = lt.Union(ct).(realConstraint)
+				} else {
+					nuc = append(nuc, c)
+				}
+			}
+		}
 	}
 
-	return !v.Equal(c.con)
-}
-
-func constraintGreaterThan(v *Version, c *constraint) bool {
-	return v.Compare(c.con) == 1
-}
-
-func constraintLessThan(v *Version, c *constraint) bool {
-	if !c.dirty {
-		return v.Compare(c.con) < 0
+	if len(nuc) == 1 {
+		return nuc[0]
 	}
-
-	if v.Major() > c.con.Major() {
-		return false
-	} else if v.Minor() > c.con.Minor() && !c.minorDirty {
-		return false
-	}
-
-	return true
-}
-
-func constraintGreaterThanEqual(v *Version, c *constraint) bool {
-	return v.Compare(c.con) >= 0
-}
-
-func constraintLessThanEqual(v *Version, c *constraint) bool {
-	if !c.dirty {
-		return v.Compare(c.con) <= 0
-	}
-
-	if v.Major() > c.con.Major() {
-		return false
-	} else if v.Minor() > c.con.Minor() && !c.minorDirty {
-		return false
-	}
-
-	return true
-}
-
-// ~*, ~>* --> >= 0.0.0 (any)
-// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
-// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
-// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
-// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
-// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
-func constraintTilde(v *Version, c *constraint) bool {
-	if v.LessThan(c.con) {
-		return false
-	}
-
-	// ~0.0.0 is a special case where all constraints are accepted. It's
-	// equivalent to >= 0.0.0.
-	if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 {
-		return true
-	}
-
-	if v.Major() != c.con.Major() {
-		return false
-	}
-
-	if v.Minor() != c.con.Minor() && !c.minorDirty {
-		return false
-	}
-
-	return true
-}
-
-// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
-// it's a straight =
-func constraintTildeOrEqual(v *Version, c *constraint) bool {
-	if c.dirty {
-		c.msg = constraintMsg["~"]
-		return constraintTilde(v, c)
-	}
-
-	return v.Equal(c.con)
-}
-
-// ^* --> (any)
-// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0
-// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0
-// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0
-// ^1.2.3 --> >=1.2.3, <2.0.0
-// ^1.2.0 --> >=1.2.0, <2.0.0
-func constraintCaret(v *Version, c *constraint) bool {
-	if v.LessThan(c.con) {
-		return false
-	}
-
-	if v.Major() != c.con.Major() {
-		return false
-	}
-
-	return true
-}
-
-var constraintRangeRegex *regexp.Regexp
-
-const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
-	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
-	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
-
-func isX(x string) bool {
-	switch x {
-	case "x", "*", "X":
-		return true
-	default:
-		return false
-	}
-}
-
-func rewriteRange(i string) string {
-	m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
-	if m == nil {
-		return i
-	}
-	o := i
-	for _, v := range m {
-		t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
-		o = strings.Replace(o, v[0], t, 1)
-	}
-
-	return o
+	return nuc
 }
diff --git a/vendor/github.com/Masterminds/semver/constraints_test.go b/vendor/github.com/Masterminds/semver/constraints_test.go
index 6dad455..6b09d73 100644
--- a/vendor/github.com/Masterminds/semver/constraints_test.go
+++ b/vendor/github.com/Masterminds/semver/constraints_test.go
@@ -1,27 +1,52 @@
 package semver
 
-import (
-	"reflect"
-	"testing"
-)
+import "testing"
 
 func TestParseConstraint(t *testing.T) {
 	tests := []struct {
 		in  string
-		f   cfunc
-		v   string
+		c   Constraint
 		err bool
 	}{
-		{">= 1.2", constraintGreaterThanEqual, "1.2.0", false},
-		{"1.0", constraintTildeOrEqual, "1.0.0", false},
-		{"foo", nil, "", true},
-		{"<= 1.2", constraintLessThanEqual, "1.2.0", false},
-		{"=< 1.2", constraintLessThanEqual, "1.2.0", false},
-		{"=> 1.2", constraintGreaterThanEqual, "1.2.0", false},
-		{"v1.2", constraintTildeOrEqual, "1.2.0", false},
-		{"=1.5", constraintTildeOrEqual, "1.5.0", false},
-		{"> 1.3", constraintGreaterThan, "1.3.0", false},
-		{"< 1.4.1", constraintLessThan, "1.4.1", false},
+		{"*", Any(), false},
+		{">= 1.2", rangeConstraint{
+			min:        newV(1, 2, 0),
+			includeMin: true,
+		}, false},
+		{"1.0", newV(1, 0, 0), false},
+		{"foo", nil, true},
+		{"<= 1.2", rangeConstraint{
+			max:        newV(1, 2, 0),
+			includeMax: true,
+		}, false},
+		{"=< 1.2", rangeConstraint{
+			max:        newV(1, 2, 0),
+			includeMax: true,
+		}, false},
+		{"=> 1.2", rangeConstraint{
+			min:        newV(1, 2, 0),
+			includeMin: true,
+		}, false},
+		{"v1.2", newV(1, 2, 0), false},
+		{"=1.5", newV(1, 5, 0), false},
+		{"> 1.3", rangeConstraint{
+			min: newV(1, 3, 0),
+		}, false},
+		{"< 1.4.1", rangeConstraint{
+			max: newV(1, 4, 1),
+		}, false},
+		{"~1.1.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			max:        newV(1, 2, 0),
+			includeMin: true,
+			includeMax: false,
+		}, false},
+		{"^1.1.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+			includeMax: false,
+		}, false},
 	}
 
 	for _, tc := range tests {
@@ -29,7 +54,7 @@
 		if tc.err && err == nil {
 			t.Errorf("Expected error for %s didn't occur", tc.in)
 		} else if !tc.err && err != nil {
-			t.Errorf("Unexpected error for %s", tc.in)
+			t.Errorf("Unexpected error %q for %s", err, tc.in)
 		}
 
 		// If an error was expected continue the loop and don't try the other
@@ -38,15 +63,84 @@
 			continue
 		}
 
-		if tc.v != c.con.String() {
+		if !constraintEq(tc.c, c) {
 			t.Errorf("Incorrect version found on %s", tc.in)
 		}
+	}
+}
 
-		f1 := reflect.ValueOf(tc.f)
-		f2 := reflect.ValueOf(c.function)
-		if f1 != f2 {
-			t.Errorf("Wrong constraint found for %s", tc.in)
+func constraintEq(c1, c2 Constraint) bool {
+	switch tc1 := c1.(type) {
+	case any:
+		if _, ok := c2.(any); !ok {
+			return false
 		}
+		return true
+	case none:
+		if _, ok := c2.(none); !ok {
+			return false
+		}
+		return true
+	case *Version:
+		if tc2, ok := c2.(*Version); ok {
+			return tc1.Equal(tc2)
+		}
+		return false
+	case rangeConstraint:
+		if tc2, ok := c2.(rangeConstraint); ok {
+			if len(tc1.excl) != len(tc2.excl) {
+				return false
+			}
+
+			if tc1.min != nil {
+				if !(tc1.includeMin == tc2.includeMin && tc1.min.Equal(tc2.min)) {
+					return false
+				}
+			} else if tc2.min != nil {
+				return false
+			}
+
+			if tc1.max != nil {
+				if !(tc1.includeMax == tc2.includeMax && tc1.max.Equal(tc2.max)) {
+					return false
+				}
+			} else if tc2.max != nil {
+				return false
+			}
+
+			for k, e := range tc1.excl {
+				if !e.Equal(tc2.excl[k]) {
+					return false
+				}
+			}
+			return true
+		}
+		return false
+	case unionConstraint:
+		if tc2, ok := c2.(unionConstraint); ok {
+			if len(tc1) != len(tc2) {
+				return false
+			}
+
+			for k, c := range tc1 {
+				if !constraintEq(c, tc2[k]) {
+					return false
+				}
+			}
+			return true
+		}
+		return false
+	}
+
+	panic("unknown type")
+}
+
+// newV is a helper to create a new Version object.
+func newV(major, minor, patch int64) *Version {
+	return &Version{
+		major: major,
+		minor: minor,
+		patch: patch,
 	}
 }
 
@@ -72,9 +166,28 @@
 		{"<=1.1", "0.1.0", true},
 		{"<=1.1", "1.1.0", true},
 		{"<=1.1", "1.1.1", false},
+		{"<=1.1-alpha1", "1.1", false},
+		{"<=2.x", "3.0.0", false},
+		{"<=2.x", "2.9.9", true},
+		{"<2.x", "2.0.0", false},
+		{"<2.x", "1.9.9", true},
+		{">=2.x", "3.0.0", true},
+		{">=2.x", "2.9.9", true},
+		{">=2.x", "1.9.9", false},
+		{">2.x", "3.0.0", true},
+		{">2.x", "2.9.9", false},
+		{">2.x", "1.9.9", false},
+		// TODO these are all pending the changes in #10
+		//{"<=2.x-beta1", "3.0.0-alpha2", false},
+		//{">2.x-beta1", "3.0.0-alpha2", true},
+		//{"<2.0.0", "2.0.0-alpha1", false},
+		//{"<=2.0.0", "2.0.0-alpha1", true},
 	}
 
 	for _, tc := range tests {
+		if testing.Verbose() {
+			t.Logf("Testing if %q allows %q", tc.constraint, tc.version)
+		}
 		c, err := parseConstraint(tc.constraint)
 		if err != nil {
 			t.Errorf("err: %s", err)
@@ -87,9 +200,13 @@
 			continue
 		}
 
-		a := c.check(v)
+		a := c.Matches(v) == nil
 		if a != tc.check {
-			t.Errorf("Constraint '%s' failing", tc.constraint)
+			if tc.check {
+				t.Errorf("%q should have matched %q", tc.constraint, tc.version)
+			} else {
+				t.Errorf("%q should not have matched %q", tc.constraint, tc.version)
+			}
 		}
 	}
 }
@@ -97,22 +214,74 @@
 func TestNewConstraint(t *testing.T) {
 	tests := []struct {
 		input string
-		ors   int
-		count int
+		c     Constraint
 		err   bool
 	}{
-		{">= 1.1", 1, 1, false},
-		{"2.0", 1, 1, false},
-		{">= bar", 0, 0, true},
-		{">= 1.2.3, < 2.0", 1, 2, false},
-		{">= 1.2.3, < 2.0 || => 3.0, < 4", 2, 2, false},
-
-		// The 3-4 should be broken into 2 by the range rewriting
-		{"3-4 || => 3.0, < 4", 2, 2, false},
+		{">= 1.1", rangeConstraint{
+			min:        newV(1, 1, 0),
+			includeMin: true,
+		}, false},
+		{"2.0", newV(2, 0, 0), false},
+		{">= bar", nil, true},
+		{"^1.1.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+		}, false},
+		{">= 1.2.3, < 2.0 || => 3.0, < 4", unionConstraint{
+			rangeConstraint{
+				min:        newV(1, 2, 3),
+				max:        newV(2, 0, 0),
+				includeMin: true,
+			},
+			rangeConstraint{
+				min:        newV(3, 0, 0),
+				max:        newV(4, 0, 0),
+				includeMin: true,
+			},
+		}, false},
+		{"3-4 || => 1.0, < 2", Union(
+			rangeConstraint{
+				min:        newV(3, 0, 0),
+				max:        newV(4, 0, 0),
+				includeMin: true,
+				includeMax: true,
+			},
+			rangeConstraint{
+				min:        newV(1, 0, 0),
+				max:        newV(2, 0, 0),
+				includeMin: true,
+			},
+		), false},
+		// demonstrates union compression
+		{"3-4 || => 3.0, < 4", rangeConstraint{
+			min:        newV(3, 0, 0),
+			max:        newV(4, 0, 0),
+			includeMin: true,
+			includeMax: true,
+		}, false},
+		{">=1.1.0, <2.0.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+			includeMax: false,
+		}, false},
+		{"!=1.4.0", rangeConstraint{
+			excl: []*Version{
+				newV(1, 4, 0),
+			},
+		}, false},
+		{">=1.1.0, !=1.4.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			includeMin: true,
+			excl: []*Version{
+				newV(1, 4, 0),
+			},
+		}, false},
 	}
 
 	for _, tc := range tests {
-		v, err := NewConstraint(tc.input)
+		c, err := NewConstraint(tc.input)
 		if tc.err && err == nil {
 			t.Errorf("expected but did not get error for: %s", tc.input)
 			continue
@@ -124,16 +293,8 @@
 			continue
 		}
 
-		l := len(v.constraints)
-		if tc.ors != l {
-			t.Errorf("Expected %s to have %d ORs but got %d",
-				tc.input, tc.ors, l)
-		}
-
-		l = len(v.constraints[0])
-		if tc.count != l {
-			t.Errorf("Expected %s to have %d constraints but got %d",
-				tc.input, tc.count, l)
+		if !constraintEq(tc.c, c) {
+			t.Errorf("%q produced constraint %q, but expected %q", tc.input, c, tc.c)
 		}
 	}
 }
@@ -145,7 +306,9 @@
 		check      bool
 	}{
 		{"*", "1.2.3", true},
-		{"~0.0.0", "1.2.3", true},
+		{"~0.0.0", "1.2.3", false}, // npm allows this weird thing, but we don't
+		{"~0.0.0", "0.1.9", false},
+		{"~0.0.0", "0.0.9", true},
 		{"= 2.0", "1.2.3", false},
 		{"= 2.0", "2.0.0", true},
 		{"4.1", "4.1.0", true},
@@ -162,10 +325,12 @@
 		{"<1.1", "0.1.0", true},
 		{"<1.1", "1.1.0", false},
 		{"<1.1", "1.1.1", false},
-		{"<1.x", "1.1.1", true},
+		{"<1.x", "1.1.1", false},
+		{"<1.x", "0.9.1", true},
 		{"<1.x", "2.1.1", false},
 		{"<1.1.x", "1.2.1", false},
-		{"<1.1.x", "1.1.500", true},
+		{"<1.1.x", "1.1.500", false},
+		{"<1.1.x", "1.0.500", true},
 		{"<1.2.x", "1.1.1", true},
 		{">=1.1", "4.1.0", true},
 		{">=1.1", "1.1.0", true},
@@ -215,9 +380,52 @@
 			continue
 		}
 
-		a := c.Check(v)
+		a := c.Matches(v) == nil
 		if a != tc.check {
-			t.Errorf("Constraint '%s' failing with '%s'", tc.constraint, tc.version)
+			if a {
+				t.Errorf("Input %q produced constraint %q; should not have admitted %q, but did", tc.constraint, c, tc.version)
+			} else {
+				t.Errorf("Input %q produced constraint %q; should have admitted %q, but did not", tc.constraint, c, tc.version)
+			}
+		}
+	}
+}
+
+func TestBidirectionalSerialization(t *testing.T) {
+	tests := []struct {
+		io string
+		eq bool
+	}{
+		{"*", true},         // any
+		{"~0.0.0", false},   // tildes expand into ranges
+		{"^2.0", false},     // carets expand into ranges
+		{"=2.0", false},     // abbreviated versions print as full
+		{"4.1.x", false},    // wildcards expand into ranges
+		{">= 1.1.0", false}, // does not produce spaces on ranges
+		{"4.1.0", true},
+		{"!=4.1.0", true},
+		{">=1.1.0", true},
+		{">=1.1.0, <2.0.0", true},
+		{">1.0.0, <=1.1.0", true},
+		{"<=1.1.0", true},
+		{">=1.1.0, <2.0.0, !=1.2.3", true},
+		{">=1.1.0, <2.0.0, !=1.2.3 || >3.0.0", true},
+		{">=1.1.0, <2.0.0, !=1.2.3 || >=3.0.0", true},
+	}
+
+	for _, fix := range tests {
+		c, err := NewConstraint(fix.io)
+		if err != nil {
+			t.Errorf("Valid constraint string produced unexpected error: %s", err)
+		}
+
+		eq := fix.io == c.String()
+		if eq != fix.eq {
+			if eq {
+				t.Errorf("Constraint %q should not have reproduced input string %q, but did", c, fix.io)
+			} else {
+				t.Errorf("Constraint should have reproduced input string %q, but instead produced %q", fix.io, c)
+			}
 		}
 	}
 }
@@ -261,168 +469,119 @@
 	}
 }
 
-func TestConstraintsValidate(t *testing.T) {
-	tests := []struct {
-		constraint string
-		version    string
-		check      bool
-	}{
-		{"*", "1.2.3", true},
-		{"~0.0.0", "1.2.3", true},
-		{"= 2.0", "1.2.3", false},
-		{"= 2.0", "2.0.0", true},
-		{"4.1", "4.1.0", true},
-		{"4.1.x", "4.1.3", true},
-		{"1.x", "1.4", true},
-		{"!=4.1", "4.1.0", false},
-		{"!=4.1", "5.1.0", true},
-		{"!=4.x", "5.1.0", true},
-		{"!=4.x", "4.1.0", false},
-		{"!=4.1.x", "4.2.0", true},
-		{"!=4.2.x", "4.2.3", false},
-		{">1.1", "4.1.0", true},
-		{">1.1", "1.1.0", false},
-		{"<1.1", "0.1.0", true},
-		{"<1.1", "1.1.0", false},
-		{"<1.1", "1.1.1", false},
-		{"<1.x", "1.1.1", true},
-		{"<1.x", "2.1.1", false},
-		{"<1.1.x", "1.2.1", false},
-		{"<1.1.x", "1.1.500", true},
-		{"<1.2.x", "1.1.1", true},
-		{">=1.1", "4.1.0", true},
-		{">=1.1", "1.1.0", true},
-		{">=1.1", "0.0.9", false},
-		{"<=1.1", "0.1.0", true},
-		{"<=1.1", "1.1.0", true},
-		{"<=1.x", "1.1.0", true},
-		{"<=2.x", "3.1.0", false},
-		{"<=1.1", "1.1.1", false},
-		{"<=1.1.x", "1.2.500", false},
-		{">1.1, <2", "1.1.1", true},
-		{">1.1, <3", "4.3.2", false},
-		{">=1.1, <2, !=1.2.3", "1.2.3", false},
-		{">=1.1, <2, !=1.2.3 || > 3", "3.1.2", true},
-		{">=1.1, <2, !=1.2.3 || >= 3", "3.0.0", true},
-		{">=1.1, <2, !=1.2.3 || > 3", "3.0.0", false},
-		{">=1.1, <2, !=1.2.3 || > 3", "1.2.3", false},
-		{"1.1 - 2", "1.1.1", true},
-		{"1.1-3", "4.3.2", false},
-		{"^1.1", "1.1.1", true},
-		{"^1.1", "4.3.2", false},
-		{"^1.x", "1.1.1", true},
-		{"^2.x", "1.1.1", false},
-		{"^1.x", "2.1.1", false},
-		{"~*", "2.1.1", true},
-		{"~1.x", "2.1.1", false},
-		{"~1.x", "1.3.5", true},
-		{"~1.x", "1.4", true},
-		{"~1.1", "1.1.1", true},
-		{"~1.2.3", "1.2.5", true},
-		{"~1.2.3", "1.2.2", false},
-		{"~1.2.3", "1.3.2", false},
-		{"~1.1", "1.2.3", false},
-		{"~1.3", "2.4.5", false},
+func TestUnionErr(t *testing.T) {
+	u1 := Union(
+		rangeConstraint{
+			min:        newV(3, 0, 0),
+			max:        newV(4, 0, 0),
+			includeMin: true,
+			includeMax: true,
+		},
+		rangeConstraint{
+			min:        newV(1, 0, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+		},
+	)
+	fail := u1.Matches(newV(2, 5, 0))
+	failstr := `2.5.0 is greater than or equal to the maximum of >=1.0.0, <2.0.0
+2.5.0 is less than the minimum of >=3.0.0, <=4.0.0`
+	if fail.Error() != failstr {
+		t.Errorf("Did not get expected failure message from union, got %q", fail)
+	}
+}
+
+func TestIsSuperset(t *testing.T) {
+	rc := []rangeConstraint{
+		rangeConstraint{
+			min:        newV(1, 2, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+		},
+		rangeConstraint{
+			min: newV(1, 2, 0),
+			max: newV(2, 1, 0),
+		},
+		rangeConstraint{
+			max: newV(1, 10, 0),
+		},
+		rangeConstraint{
+			min: newV(2, 0, 0),
+		},
+		rangeConstraint{
+			min:        newV(1, 2, 0),
+			max:        newV(2, 0, 0),
+			includeMax: true,
+		},
 	}
 
-	for _, tc := range tests {
-		c, err := NewConstraint(tc.constraint)
-		if err != nil {
-			t.Errorf("err: %s", err)
-			continue
+	for _, c := range rc {
+
+		// Superset comparison is not strict, so a range should always be a superset
+		// of itself.
+		if !c.isSupersetOf(c) {
+			t.Errorf("Ranges should be supersets of themselves; %s indicated it was not", c)
 		}
+	}
 
-		v, err := NewVersion(tc.version)
-		if err != nil {
-			t.Errorf("err: %s", err)
-			continue
+	pairs := []struct{ l, r rangeConstraint }{
+		{
+			// ensures lte is handled correctly (min side)
+			l: rc[0],
+			r: rc[1],
+		},
+		{
+			// ensures nil on min side works well
+			l: rc[0],
+			r: rc[2],
+		},
+		{
+			// ensures nil on max side works well
+			l: rc[0],
+			r: rc[3],
+		},
+		{
+			// ensures nils on both sides work well
+			l: rc[2],
+			r: rc[3],
+		},
+		{
+			// ensures gte is handled correctly (max side)
+			l: rc[2],
+			r: rc[4],
+		},
+	}
+
+	for _, p := range pairs {
+		if p.l.isSupersetOf(p.r) {
+			t.Errorf("%s is not a superset of %s", p.l, p.r)
 		}
-
-		a, msgs := c.Validate(v)
-		if a != tc.check {
-			t.Errorf("Constraint '%s' failing with '%s'", tc.constraint, tc.version)
-		} else if a == false && len(msgs) == 0 {
-			t.Errorf("%q failed with %q but no errors returned", tc.constraint, tc.version)
+		if p.r.isSupersetOf(p.l) {
+			t.Errorf("%s is not a superset of %s", p.r, p.l)
 		}
-
-		// if a == false {
-		// 	for _, m := range msgs {
-		// 		t.Errorf("%s", m)
-		// 	}
-		// }
 	}
 
-	v, err := NewVersion("1.2.3")
-	if err != nil {
-		t.Errorf("err: %s", err)
+	rc[1].max.minor = 0
+
+	if !rc[0].isSupersetOf(rc[1]) {
+		t.Errorf("%s is a superset of %s", rc[0], rc[1])
+	}
+	rc[1].includeMax = true
+	if rc[1].isSupersetOf(rc[0]) {
+		t.Errorf("%s is not a superset of %s", rc[1], rc[0])
+	}
+	rc[0].includeMin = false
+	if !rc[1].isSupersetOf(rc[0]) {
+		t.Errorf("%s is a superset of %s", rc[1], rc[0])
 	}
 
-	c, err := NewConstraint("!= 1.2.5, ^2, <= 1.1.x")
-	if err != nil {
-		t.Errorf("err: %s", err)
+	// isSupersetOf ignores excludes, so even though this would make rc[1] not a
+	// superset of rc[0] anymore, it should still say it is.
+	rc[1].excl = []*Version{
+		newV(1, 5, 0),
 	}
 
-	_, msgs := c.Validate(v)
-	if len(msgs) != 2 {
-		t.Error("Invalid number of validations found")
-	}
-	e := msgs[0].Error()
-	if e != "1.2.3 does not have same major version as 2" {
-		t.Error("Did not get expected message: 1.2.3 does not have same major version as 2")
-	}
-	e = msgs[1].Error()
-	if e != "1.2.3 is greater than 1.1.x" {
-		t.Error("Did not get expected message: 1.2.3 is greater than 1.1.x")
-	}
-
-	tests2 := []struct {
-		constraint, version, msg string
-	}{
-		{"= 2.0", "1.2.3", "1.2.3 is not equal to 2.0"},
-		{"!=4.1", "4.1.0", "4.1.0 is equal to 4.1"},
-		{"!=4.x", "4.1.0", "4.1.0 is equal to 4.x"},
-		{"!=4.2.x", "4.2.3", "4.2.3 is equal to 4.2.x"},
-		{">1.1", "1.1.0", "1.1.0 is less than or equal to 1.1"},
-		{"<1.1", "1.1.0", "1.1.0 is greater than or equal to 1.1"},
-		{"<1.1", "1.1.1", "1.1.1 is greater than or equal to 1.1"},
-		{"<1.x", "2.1.1", "2.1.1 is greater than or equal to 1.x"},
-		{"<1.1.x", "1.2.1", "1.2.1 is greater than or equal to 1.1.x"},
-		{">=1.1", "0.0.9", "0.0.9 is less than 1.1"},
-		{"<=2.x", "3.1.0", "3.1.0 is greater than 2.x"},
-		{"<=1.1", "1.1.1", "1.1.1 is greater than 1.1"},
-		{"<=1.1.x", "1.2.500", "1.2.500 is greater than 1.1.x"},
-		{">1.1, <3", "4.3.2", "4.3.2 is greater than or equal to 3"},
-		{">=1.1, <2, !=1.2.3", "1.2.3", "1.2.3 is equal to 1.2.3"},
-		{">=1.1, <2, !=1.2.3 || > 3", "3.0.0", "3.0.0 is greater than or equal to 2"},
-		{">=1.1, <2, !=1.2.3 || > 3", "1.2.3", "1.2.3 is equal to 1.2.3"},
-		{"1.1-3", "4.3.2", "4.3.2 is greater than 3"},
-		{"^1.1", "4.3.2", "4.3.2 does not have same major version as 1.1"},
-		{"^2.x", "1.1.1", "1.1.1 does not have same major version as 2.x"},
-		{"^1.x", "2.1.1", "2.1.1 does not have same major version as 1.x"},
-		{"~1.x", "2.1.1", "2.1.1 does not have same major and minor version as 1.x"},
-		{"~1.2.3", "1.2.2", "1.2.2 does not have same major and minor version as 1.2.3"},
-		{"~1.2.3", "1.3.2", "1.3.2 does not have same major and minor version as 1.2.3"},
-		{"~1.1", "1.2.3", "1.2.3 does not have same major and minor version as 1.1"},
-		{"~1.3", "2.4.5", "2.4.5 does not have same major and minor version as 1.3"},
-	}
-
-	for _, tc := range tests2 {
-		c, err := NewConstraint(tc.constraint)
-		if err != nil {
-			t.Errorf("err: %s", err)
-			continue
-		}
-
-		v, err := NewVersion(tc.version)
-		if err != nil {
-			t.Errorf("err: %s", err)
-			continue
-		}
-
-		_, msgs := c.Validate(v)
-		e := msgs[0].Error()
-		if e != tc.msg {
-			t.Errorf("Did not get expected message %q: %s", tc.msg, e)
-		}
+	if !rc[1].isSupersetOf(rc[0]) {
+		t.Errorf("%s is still a superset of %s, because isSupersetOf is supposed to ignore excluded versions", rc[1], rc[0])
 	}
 }
diff --git a/vendor/github.com/Masterminds/semver/error.go b/vendor/github.com/Masterminds/semver/error.go
new file mode 100644
index 0000000..4fb7345
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/error.go
@@ -0,0 +1,69 @@
+package semver
+
+import (
+	"bytes"
+	"fmt"
+)
+
+var rangeErrs = [...]string{
+	"%s is less than the minimum of %s",
+	"%s is less than or equal to the minimum of %s",
+	"%s is greater than the maximum of %s",
+	"%s is greater than or equal to the maximum of %s",
+	"%s is specifically disallowed in %s",
+}
+
+const (
+	rerrLT = iota
+	rerrLTE
+	rerrGT
+	rerrGTE
+	rerrNE
+)
+
+type MatchFailure interface {
+	error
+	Pair() (v *Version, c Constraint)
+}
+
+type RangeMatchFailure struct {
+	v   *Version
+	rc  rangeConstraint
+	typ int8
+}
+
+func (rce RangeMatchFailure) Error() string {
+	return fmt.Sprintf(rangeErrs[rce.typ], rce.v, rce.rc)
+}
+
+func (rce RangeMatchFailure) Pair() (v *Version, r Constraint) {
+	return rce.v, rce.rc
+}
+
+type VersionMatchFailure struct {
+	v, other *Version
+}
+
+func (vce VersionMatchFailure) Error() string {
+	return fmt.Sprintf("%s is not equal to %s", vce.v, vce.other)
+}
+
+func (vce VersionMatchFailure) Pair() (v *Version, r Constraint) {
+	return vce.v, vce.other
+}
+
+type MultiMatchFailure []MatchFailure
+
+func (mmf MultiMatchFailure) Error() string {
+	var buf bytes.Buffer
+
+	for k, e := range mmf {
+		if k < len(mmf)-1 {
+			fmt.Fprintf(&buf, "%s\n", e)
+		} else {
+			fmt.Fprintf(&buf, e.Error())
+		}
+	}
+
+	return buf.String()
+}
diff --git a/vendor/github.com/Masterminds/semver/magic.go b/vendor/github.com/Masterminds/semver/magic.go
new file mode 100644
index 0000000..9a8d353
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/magic.go
@@ -0,0 +1,99 @@
+package semver
+
+import "errors"
+
+var noneErr = errors.New("The 'None' constraint admits no versions.")
+
+// Any is a constraint that is satisfied by any valid semantic version.
+type any struct{}
+
+// Any creates a constraint that will match any version.
+func Any() Constraint {
+	return any{}
+}
+
+func (any) String() string {
+	return "*"
+}
+
+// Matches checks that a version satisfies the constraint. As all versions
+// satisfy Any, this always returns nil.
+func (any) Matches(v *Version) error {
+	return nil
+}
+
+// Intersect computes the intersection between two constraints.
+//
+// As Any is the set of all possible versions, any intersection with that
+// infinite set will necessarily be the entirety of the second set. Thus, this
+// simply returns the passed constraint.
+func (any) Intersect(c Constraint) Constraint {
+	return c
+}
+
+// MatchesAny indicates whether there exists any version that can satisfy both
+// this constraint, and the passed constraint. As all versions
+// satisfy Any, this is always true - unless none is passed.
+func (any) MatchesAny(c Constraint) bool {
+	if _, ok := c.(none); ok {
+		return false
+	}
+	return true
+}
+
+func (any) Union(c Constraint) Constraint {
+	return Any()
+}
+
+func (any) _private() {}
+
+// None is an unsatisfiable constraint - it represents the empty set.
+type none struct{}
+
+// None creates a constraint that matches no versions (the empty set).
+func None() Constraint {
+	return none{}
+}
+
+func (none) String() string {
+	return ""
+}
+
+// Matches checks that a version satisfies the constraint. As no version can
+// satisfy None, this always fails (returns an error).
+func (none) Matches(v *Version) error {
+	return noneErr
+}
+
+// Intersect computes the intersection between two constraints.
+//
+// None is the empty set of versions, and any intersection with the empty set is
+// necessarily the empty set. Thus, this always returns None.
+func (none) Intersect(Constraint) Constraint {
+	return None()
+}
+
+func (none) Union(c Constraint) Constraint {
+	return c
+}
+
+// MatchesAny indicates whether there exists any version that can satisfy the
+// constraint. As no versions satisfy None, this is always false.
+func (none) MatchesAny(c Constraint) bool {
+	return false
+}
+
+func (none) _private() {}
+
+// IsNone indicates if a constraint will match no versions - that is, the
+// constraint represents the empty set.
+func IsNone(c Constraint) bool {
+	_, ok := c.(none)
+	return ok
+}
+
+// IsAny indicates if a constraint will match any and all versions.
+func IsAny(c Constraint) bool {
+	_, ok := c.(any)
+	return ok
+}
diff --git a/vendor/github.com/Masterminds/semver/parse.go b/vendor/github.com/Masterminds/semver/parse.go
new file mode 100644
index 0000000..a6e6a97
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/parse.go
@@ -0,0 +1,217 @@
+package semver
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+)
+
+func rewriteRange(i string) string {
+	m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
+	if m == nil {
+		return i
+	}
+	o := i
+	for _, v := range m {
+		t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
+		o = strings.Replace(o, v[0], t, 1)
+	}
+
+	return o
+}
+
+func parseConstraint(c string) (Constraint, error) {
+	m := constraintRegex.FindStringSubmatch(c)
+	if m == nil {
+		return nil, fmt.Errorf("Malformed constraint: %s", c)
+	}
+
+	// Handle the full wildcard case first - easy!
+	if isX(m[3]) {
+		return any{}, nil
+	}
+
+	ver := m[2]
+	var wildPatch, wildMinor bool
+	if isX(strings.TrimPrefix(m[4], ".")) {
+		wildPatch = true
+		wildMinor = true
+		ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
+	} else if isX(strings.TrimPrefix(m[5], ".")) {
+		wildPatch = true
+		ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
+	}
+
+	v, err := NewVersion(ver)
+	if err != nil {
+		// The constraintRegex should catch any regex parsing errors. So,
+		// we should never get here.
+		return nil, errors.New("constraint Parser Error")
+	}
+
+	switch m[1] {
+	case "^":
+		// Caret always expands to a range
+		return expandCaret(v), nil
+	case "~":
+		// Tilde always expands to a range
+		return expandTilde(v, wildMinor), nil
+	case "!=":
+		// Not equals expands to a range if no element isX(); otherwise expands
+		// to a union of ranges
+		return expandNeq(v, wildMinor, wildPatch), nil
+	case "", "=":
+		if wildPatch || wildMinor {
+			// Equalling a wildcard has the same behavior as expanding tilde
+			return expandTilde(v, wildMinor), nil
+		}
+		return v, nil
+	case ">":
+		return expandGreater(v, wildMinor, wildPatch, false), nil
+	case ">=", "=>":
+		return expandGreater(v, wildMinor, wildPatch, true), nil
+	case "<":
+		return expandLess(v, wildMinor, wildPatch, false), nil
+	case "<=", "=<":
+		return expandLess(v, wildMinor, wildPatch, true), nil
+	default:
+		// Shouldn't be possible to get here, unless the regex is allowing
+		// predicate we don't know about...
+		return nil, fmt.Errorf("Unrecognized predicate %q", m[1])
+	}
+}
+
+func expandCaret(v *Version) Constraint {
+	maxv := &Version{
+		major: v.major + 1,
+		minor: 0,
+		patch: 0,
+	}
+
+	return rangeConstraint{
+		min:        v,
+		max:        maxv,
+		includeMin: true,
+		includeMax: false,
+	}
+}
+
+func expandTilde(v *Version, wildMinor bool) Constraint {
+	if wildMinor {
+		// When minor is wild on a tilde, behavior is same as caret
+		return expandCaret(v)
+	}
+
+	maxv := &Version{
+		major: v.major,
+		minor: v.minor + 1,
+		patch: 0,
+	}
+
+	return rangeConstraint{
+		min:        v,
+		max:        maxv,
+		includeMin: true,
+		includeMax: false,
+	}
+}
+
+// expandNeq expands a "not-equals" constraint.
+//
+// If the constraint has any wildcards, it will expand into a unionConstraint
+// (which is how we represent a disjoint set). If there are no wildcards, it
+// will expand to a rangeConstraint with no min or max, but having the one
+// exception.
+func expandNeq(v *Version, wildMinor, wildPatch bool) Constraint {
+	if !(wildMinor || wildPatch) {
+		return rangeConstraint{
+			excl: []*Version{v},
+		}
+	}
+
+	// Create the low range with no min, and the max as the floor admitted by
+	// the wildcard
+	lr := rangeConstraint{
+		max:        v,
+		includeMax: false,
+	}
+
+	// The high range uses the derived version (bumped depending on where the
+	// wildcards were) as the min, and is inclusive
+	minv := &Version{
+		major: v.major,
+		minor: v.minor,
+		patch: v.patch,
+	}
+
+	if wildMinor {
+		minv.major++
+	} else {
+		minv.minor++
+	}
+
+	hr := rangeConstraint{
+		min:        minv,
+		includeMin: true,
+	}
+
+	return Union(lr, hr)
+}
+
+func expandGreater(v *Version, wildMinor, wildPatch, eq bool) Constraint {
+	if (wildMinor || wildPatch) && !eq {
+		// wildcards negate the meaning of prerelease and other info
+		v = &Version{
+			major: v.major,
+			minor: v.minor,
+			patch: v.patch,
+		}
+
+		// Not equal but with wildcards is the weird case - we have to bump up
+		// the next version AND make it equal
+		if wildMinor {
+			v.major++
+		} else {
+			v.minor++
+		}
+		return rangeConstraint{
+			min:        v,
+			includeMin: true,
+		}
+	}
+
+	return rangeConstraint{
+		min:        v,
+		includeMin: eq,
+	}
+}
+
+func expandLess(v *Version, wildMinor, wildPatch, eq bool) Constraint {
+	if eq && (wildMinor || wildPatch) {
+		// wildcards negate the meaning of prerelease and other info
+		v = &Version{
+			major: v.major,
+			minor: v.minor,
+			patch: v.patch,
+		}
+		if wildMinor {
+			v.major++
+		} else if wildPatch {
+			v.minor++
+		}
+		return rangeConstraint{
+			max:        v,
+			includeMax: false,
+		}
+	}
+
+	return rangeConstraint{
+		max:        v,
+		includeMax: eq,
+	}
+}
+
+func isX(x string) bool {
+	l := strings.ToLower(x)
+	return l == "x" || l == "*"
+}
diff --git a/vendor/github.com/Masterminds/semver/range.go b/vendor/github.com/Masterminds/semver/range.go
new file mode 100644
index 0000000..0e0c6a8
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/range.go
@@ -0,0 +1,452 @@
+package semver
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+type rangeConstraint struct {
+	min, max               *Version
+	includeMin, includeMax bool
+	excl                   []*Version
+}
+
+func (rc rangeConstraint) Matches(v *Version) error {
+	var fail bool
+
+	rce := RangeMatchFailure{
+		v:  v,
+		rc: rc,
+	}
+
+	if rc.min != nil {
+		// TODO ensure sane handling of prerelease versions (which are strictly
+		// less than the normal version, but should be admitted in a geq range)
+		cmp := rc.min.Compare(v)
+		if rc.includeMin {
+			rce.typ = rerrLT
+			fail = cmp == 1
+		} else {
+			rce.typ = rerrLTE
+			fail = cmp != -1
+		}
+
+		if fail {
+			return rce
+		}
+	}
+
+	if rc.max != nil {
+		// TODO ensure sane handling of prerelease versions (which are strictly
+		// less than the normal version, but should be admitted in a geq range)
+		cmp := rc.max.Compare(v)
+		if rc.includeMax {
+			rce.typ = rerrGT
+			fail = cmp == -1
+		} else {
+			rce.typ = rerrGTE
+			fail = cmp != 1
+		}
+
+		if fail {
+			return rce
+		}
+	}
+
+	for _, excl := range rc.excl {
+		if excl.Equal(v) {
+			rce.typ = rerrNE
+			return rce
+		}
+	}
+
+	return nil
+}
+
+func (rc rangeConstraint) dup() rangeConstraint {
+	// Only need to do anything if there are some excludes
+	if len(rc.excl) == 0 {
+		return rc
+	}
+
+	var excl []*Version
+	excl = make([]*Version, len(rc.excl))
+	copy(excl, rc.excl)
+
+	return rangeConstraint{
+		min:        rc.min,
+		max:        rc.max,
+		includeMin: rc.includeMin,
+		includeMax: rc.includeMax,
+		excl:       excl,
+	}
+}
+
+func (rc rangeConstraint) Intersect(c Constraint) Constraint {
+	switch oc := c.(type) {
+	case any:
+		return rc
+	case none:
+		return None()
+	case unionConstraint:
+		return oc.Intersect(rc)
+	case *Version:
+		if err := rc.Matches(oc); err != nil {
+			return None()
+		} else {
+			return c
+		}
+	case rangeConstraint:
+		nr := rangeConstraint{
+			min:        rc.min,
+			max:        rc.max,
+			includeMin: rc.includeMin,
+			includeMax: rc.includeMax,
+		}
+
+		if oc.min != nil {
+			if nr.min == nil || nr.min.LessThan(oc.min) {
+				nr.min = oc.min
+				nr.includeMin = oc.includeMin
+			} else if oc.min.Equal(nr.min) && !oc.includeMin {
+				// intersection means we must follow the least inclusive
+				nr.includeMin = false
+			}
+		}
+
+		if oc.max != nil {
+			if nr.max == nil || nr.max.GreaterThan(oc.max) {
+				nr.max = oc.max
+				nr.includeMax = oc.includeMax
+			} else if oc.max.Equal(nr.max) && !oc.includeMax {
+				// intersection means we must follow the least inclusive
+				nr.includeMax = false
+			}
+		}
+
+		// Ensure any applicable excls from oc are included in nc
+		for _, e := range append(rc.excl, oc.excl...) {
+			if nr.Matches(e) == nil {
+				nr.excl = append(nr.excl, e)
+			}
+		}
+
+		if nr.min == nil || nr.max == nil {
+			return nr
+		}
+
+		if nr.min.Equal(nr.max) {
+			// min and max are equal. if range is inclusive, return that
+			// version; otherwise, none
+			if nr.includeMin && nr.includeMax {
+				return nr.min
+			}
+			return None()
+		}
+
+		if nr.min.GreaterThan(nr.max) {
+			// min is greater than max - not possible, so we return none
+			return None()
+		}
+
+		// range now fully validated, return what we have
+		return nr
+
+	default:
+		panic("unknown type")
+	}
+}
+
+func (rc rangeConstraint) Union(c Constraint) Constraint {
+	switch oc := c.(type) {
+	case any:
+		return Any()
+	case none:
+		return rc
+	case unionConstraint:
+		return Union(rc, oc)
+	case *Version:
+		if err := rc.Matches(oc); err == nil {
+			return rc
+		} else if len(rc.excl) > 0 { // TODO (re)checking like this is wasteful
+			// ensure we don't have an excl-specific mismatch; if we do, remove
+			// it and return that
+			for k, e := range rc.excl {
+				if e.Equal(oc) {
+					excl := make([]*Version, len(rc.excl)-1)
+
+					if k == len(rc.excl)-1 {
+						copy(excl, rc.excl[:k])
+					} else {
+						copy(excl, append(rc.excl[:k], rc.excl[k+1:]...))
+					}
+
+					return rangeConstraint{
+						min:        rc.min,
+						max:        rc.max,
+						includeMin: rc.includeMin,
+						includeMax: rc.includeMax,
+						excl:       excl,
+					}
+				}
+			}
+		}
+
+		if oc.LessThan(rc.min) {
+			return unionConstraint{oc, rc.dup()}
+		}
+		if areEq(oc, rc.min) {
+			ret := rc.dup()
+			ret.includeMin = true
+			return ret
+		}
+		if areEq(oc, rc.max) {
+			ret := rc.dup()
+			ret.includeMax = true
+			return ret
+		}
+		// Only possibility left is gt
+		return unionConstraint{rc.dup(), oc}
+	case rangeConstraint:
+		if (rc.min == nil && oc.max == nil) || (rc.max == nil && oc.min == nil) {
+			rcl, ocl := len(rc.excl), len(oc.excl)
+			// Quick check for open case
+			if rcl == 0 && ocl == 0 {
+				return Any()
+			}
+
+			// This is inefficient, but it's such an absurdly corner case...
+			if len(dedupeExcls(rc.excl, oc.excl)) == rcl+ocl {
+				// If deduped excludes are the same length as the individual
+				// excludes, then they have no overlapping elements, so the
+				// union knocks out the excludes and we're back to Any.
+				return Any()
+			}
+
+			// There's at least some dupes, which are all we need to include
+			nc := rangeConstraint{}
+			for _, e1 := range rc.excl {
+				for _, e2 := range oc.excl {
+					if e1.Equal(e2) {
+						nc.excl = append(nc.excl, e1)
+					}
+				}
+			}
+
+			return nc
+		} else if areAdjacent(rc, oc) {
+			// Receiver adjoins the input from below
+			nc := rc.dup()
+
+			nc.max = oc.max
+			nc.includeMax = oc.includeMax
+			nc.excl = append(nc.excl, oc.excl...)
+
+			return nc
+		} else if areAdjacent(oc, rc) {
+			// Input adjoins the receiver from below
+			nc := oc.dup()
+
+			nc.max = rc.max
+			nc.includeMax = rc.includeMax
+			nc.excl = append(nc.excl, rc.excl...)
+
+			return nc
+
+		} else if rc.MatchesAny(oc) {
+			// Receiver and input overlap; form a new range accordingly.
+			nc := rangeConstraint{}
+
+			// For efficiency, we simultaneously determine if either of the
+			// ranges are supersets of the other, while also selecting the min
+			// and max of the new range
+			var info uint8
+
+			const (
+				lminlt uint8             = 1 << iota // left (rc) min less than right
+				rminlt                               // right (oc) min less than left
+				lmaxgt                               // left max greater than right
+				rmaxgt                               // right max greater than left
+				lsupr  = lminlt | lmaxgt             // left is superset of right
+				rsupl  = rminlt | rmaxgt             // right is superset of left
+			)
+
+			// Pick the min
+			if rc.min != nil {
+				if oc.min == nil || rc.min.GreaterThan(oc.min) || (rc.min.Equal(oc.min) && !rc.includeMin && oc.includeMin) {
+					info |= rminlt
+					nc.min = oc.min
+					nc.includeMin = oc.includeMin
+				} else {
+					info |= lminlt
+					nc.min = rc.min
+					nc.includeMin = rc.includeMin
+				}
+			} else if oc.min != nil {
+				info |= lminlt
+				nc.min = rc.min
+				nc.includeMin = rc.includeMin
+			}
+
+			// Pick the max
+			if rc.max != nil {
+				if oc.max == nil || rc.max.LessThan(oc.max) || (rc.max.Equal(oc.max) && !rc.includeMax && oc.includeMax) {
+					info |= rmaxgt
+					nc.max = oc.max
+					nc.includeMax = oc.includeMax
+				} else {
+					info |= lmaxgt
+					nc.max = rc.max
+					nc.includeMax = rc.includeMax
+				}
+			} else if oc.max != nil {
+				info |= lmaxgt
+				nc.max = rc.max
+				nc.includeMax = rc.includeMax
+			}
+
+			// Reincorporate any excluded versions
+			if info&lsupr != lsupr {
+				// rc is not superset of oc, so must walk oc.excl
+				for _, e := range oc.excl {
+					if rc.Matches(e) != nil {
+						nc.excl = append(nc.excl, e)
+					}
+				}
+			}
+
+			if info&rsupl != rsupl {
+				// oc is not superset of rc, so must walk rc.excl
+				for _, e := range rc.excl {
+					if oc.Matches(e) != nil {
+						nc.excl = append(nc.excl, e)
+					}
+				}
+			}
+
+			return nc
+		} else {
+			// Don't call Union() here b/c it would duplicate work
+			uc := constraintList{rc, oc}
+			sort.Sort(uc)
+			return unionConstraint(uc)
+		}
+	}
+
+	panic("unknown type")
+}
+
+// isSupersetOf computes whether the receiver rangeConstraint is a superset of
+// the passed rangeConstraint.
+//
+// This is NOT a strict superset comparison, so identical ranges will both
+// report being supersets of each other.
+//
+// Note also that this does *not* compare excluded versions - it only compares
+// range endpoints.
+func (rc rangeConstraint) isSupersetOf(rc2 rangeConstraint) bool {
+	if rc.min != nil {
+		if rc2.min == nil || rc.min.GreaterThan(rc2.min) || (rc.min.Equal(rc2.min) && !rc.includeMin && rc2.includeMin) {
+			return false
+		}
+	}
+
+	if rc.max != nil {
+		if rc2.max == nil || rc.max.LessThan(rc2.max) || (rc.max.Equal(rc2.max) && !rc.includeMax && rc2.includeMax) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (rc rangeConstraint) String() string {
+	// TODO express using caret or tilde, where applicable
+	var pieces []string
+	if rc.min != nil {
+		if rc.includeMin {
+			pieces = append(pieces, fmt.Sprintf(">=%s", rc.min))
+		} else {
+			pieces = append(pieces, fmt.Sprintf(">%s", rc.min))
+		}
+	}
+
+	if rc.max != nil {
+		if rc.includeMax {
+			pieces = append(pieces, fmt.Sprintf("<=%s", rc.max))
+		} else {
+			pieces = append(pieces, fmt.Sprintf("<%s", rc.max))
+		}
+	}
+
+	for _, e := range rc.excl {
+		pieces = append(pieces, fmt.Sprintf("!=%s", e))
+	}
+
+	return strings.Join(pieces, ", ")
+}
+
+// areAdjacent tests two constraints to determine if they are adjacent,
+// but non-overlapping.
+//
+// If either constraint is not a range, returns false. We still allow it at the
+// type level, however, to make the check convenient elsewhere.
+//
+// Assumes the first range is less than the second; it is incumbent on the
+// caller to arrange the inputs appropriately.
+func areAdjacent(c1, c2 Constraint) bool {
+	var rc1, rc2 rangeConstraint
+	var ok bool
+	if rc1, ok = c1.(rangeConstraint); !ok {
+		return false
+	}
+	if rc2, ok = c2.(rangeConstraint); !ok {
+		return false
+	}
+
+	if !areEq(rc1.max, rc2.min) {
+		return false
+	}
+
+	return (rc1.includeMax && !rc2.includeMin) ||
+		(!rc1.includeMax && rc2.includeMin)
+}
+
+func (rc rangeConstraint) MatchesAny(c Constraint) bool {
+	if _, ok := rc.Intersect(c).(none); ok {
+		return false
+	}
+	return true
+}
+
+func dedupeExcls(ex1, ex2 []*Version) []*Version {
+	// TODO stupid inefficient, but these are really only ever going to be
+	// small, so not worth optimizing right now
+	var ret []*Version
+oloop:
+	for _, e1 := range ex1 {
+		for _, e2 := range ex2 {
+			if e1.Equal(e2) {
+				continue oloop
+			}
+		}
+		ret = append(ret, e1)
+	}
+
+	return append(ret, ex2...)
+}
+
+func (rangeConstraint) _private() {}
+func (rangeConstraint) _real()    {}
+
+func areEq(v1, v2 *Version) bool {
+	if v1 == nil && v2 == nil {
+		return true
+	}
+
+	if v1 != nil && v2 != nil {
+		return v1.Equal(v2)
+	}
+	return false
+}
diff --git a/vendor/github.com/Masterminds/semver/set_ops_test.go b/vendor/github.com/Masterminds/semver/set_ops_test.go
new file mode 100644
index 0000000..363e848
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/set_ops_test.go
@@ -0,0 +1,914 @@
+package semver
+
+import "testing"
+
+func TestIntersection(t *testing.T) {
+	var actual Constraint
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = Intersection(); !IsNone(actual) {
+		t.Errorf("Intersection of nothing should always produce None; got %q", actual)
+	}
+
+	if actual = Intersection(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Intersection of one item should always return that item; got %q")
+	}
+
+	if actual = Intersection(rc1, None()); !IsNone(actual) {
+		t.Errorf("Intersection of anything with None should always produce None; got %q", actual)
+	}
+
+	if actual = Intersection(rc1, Any()); !constraintEq(actual, rc1) {
+		t.Errorf("Intersection of anything with Any should return self; got %q", actual)
+	}
+
+	v1 := newV(1, 5, 0)
+	if actual = Intersection(rc1, v1); !constraintEq(actual, v1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, v1)
+	}
+
+	rc2 := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 2, 0),
+	}
+	result := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = Intersection(rc1, rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	u1 := unionConstraint{
+		rangeConstraint{
+			min: newV(1, 2, 0),
+			max: newV(3, 0, 0),
+		},
+		newV(3, 1, 0),
+	}
+
+	if actual = Intersection(u1, rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = Intersection(rc1, newV(2, 0, 5), u1); !IsNone(actual) {
+		t.Errorf("First two are disjoint, should have gotten None but got %q", actual)
+	}
+}
+
+func TestRangeIntersection(t *testing.T) {
+	var actual Constraint
+	// Test magic cases
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	if actual = rc1.Intersect(Any()); !constraintEq(actual, rc1) {
+		t.Errorf("Intersection of anything with Any should return self; got %q", actual)
+	}
+	if actual = rc1.Intersect(None()); !IsNone(actual) {
+		t.Errorf("Intersection of anything with None should always produce None; got %q", actual)
+	}
+
+	// Test single version cases
+
+	// single v, in range
+	v1 := newV(1, 5, 0)
+
+	if actual = rc1.Intersect(v1); !constraintEq(actual, v1) {
+		t.Errorf("Intersection of version with matching range should return the version; got %q", actual)
+	}
+
+	// now exclude just that version
+	rc1.excl = []*Version{v1}
+	if actual = rc1.Intersect(v1); !IsNone(actual) {
+		t.Errorf("Intersection of version with range having specific exclude for that version should produce None; got %q", actual)
+	}
+
+	// and, of course, none if the version is out of range
+	v2 := newV(0, 5, 0)
+	if actual = rc1.Intersect(v2); !IsNone(actual) {
+		t.Errorf("Intersection of version with non-matching range should produce None; got %q", actual)
+	}
+
+	// Test basic overlap case
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 2, 0),
+	}
+	result := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// And with includes
+	rc1.includeMin = true
+	rc1.includeMax = true
+	rc2.includeMin = true
+	rc2.includeMax = true
+	result.includeMin = true
+	result.includeMax = true
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Overlaps with nils
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		max: newV(2, 2, 0),
+	}
+	result = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 2, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// And with includes
+	rc1.includeMin = true
+	rc2.includeMax = true
+	result.includeMin = true
+	result.includeMax = true
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test superset overlap case
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	result = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Make sure irrelevant includes don't leak in
+	rc2.includeMin = true
+	rc2.includeMax = true
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// But relevant includes get used
+	rc1.includeMin = true
+	rc1.includeMax = true
+	result.includeMin = true
+	result.includeMax = true
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test disjoint case
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(1, 6, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, None()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, None())
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, None()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, None())
+	}
+
+	// Test disjoint at gt/lt boundary (non-adjacent)
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, None()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, None())
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, None()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, None())
+	}
+
+	// Now, just have them touch at a single version
+	rc1.includeMax = true
+	rc2.includeMin = true
+
+	vresult := newV(2, 0, 0)
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, vresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, vresult)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, vresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, vresult)
+	}
+
+	// Test excludes in intersection range
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+		excl: []*Version{
+			newV(1, 6, 0),
+		},
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+
+	// Test excludes not in intersection range
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+		excl: []*Version{
+			newV(1, 1, 0),
+		},
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+
+	// Test min, and greater min
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min:        newV(1, 5, 0),
+		includeMin: true,
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test max, and lesser max
+	rc1 = rangeConstraint{
+		max: newV(1, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		max: newV(1, 5, 0),
+	}
+	result = rangeConstraint{
+		max: newV(1, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Ensure pure excludes come through as they should
+	rc1 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 6, 0),
+		},
+	}
+
+	rc2 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 6, 0),
+			newV(1, 7, 0),
+		},
+	}
+
+	if actual = Any().Intersect(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc1.Intersect(Any()); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+
+	// TODO test the pre-release special range stuff
+}
+
+func TestRangeUnion(t *testing.T) {
+	var actual Constraint
+	// Test magic cases
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	if actual = rc1.Union(Any()); !IsAny(actual) {
+		t.Errorf("Union of anything with Any should always produce Any; got %q", actual)
+	}
+	if actual = rc1.Union(None()); !constraintEq(actual, rc1) {
+		t.Errorf("Union of anything with None should return self; got %q", actual)
+	}
+
+	// Test single version cases
+
+	// single v, in range
+	v1 := newV(1, 5, 0)
+
+	if actual = rc1.Union(v1); !constraintEq(actual, rc1) {
+		t.Errorf("Union of version with matching range should return the range; got %q", actual)
+	}
+
+	// now exclude just that version
+	rc2 := rc1.dup()
+	rc2.excl = []*Version{v1}
+	if actual = rc2.Union(v1); !constraintEq(actual, rc1) {
+		t.Errorf("Union of version with range having specific exclude for that version should produce the range without that exclude; got %q", actual)
+	}
+
+	// and a union if the version is not within the range
+	v2 := newV(0, 5, 0)
+	uresult := unionConstraint{v2, rc1}
+	if actual = rc1.Union(v2); !constraintEq(actual, uresult) {
+		t.Errorf("Union of version with non-matching range should produce a unionConstraint with those two; got %q", actual)
+	}
+
+	// union with version at the min should ensure "oreq"
+	v2 = newV(1, 0, 0)
+	rc3 := rc1
+	rc3.includeMin = true
+
+	if actual = rc1.Union(v2); !constraintEq(actual, rc3) {
+		t.Errorf("Union of range with version at min end should add includeMin (%q), but got %q", rc3, actual)
+	}
+	if actual = v2.Union(rc1); !constraintEq(actual, rc3) {
+		t.Errorf("Union of range with version at min end should add includeMin (%q), but got %q", rc3, actual)
+	}
+
+	// same at max end
+	v2 = newV(2, 0, 0)
+	rc3.includeMin = false
+	rc3.includeMax = true
+
+	if actual = rc1.Union(v2); !constraintEq(actual, rc3) {
+		t.Errorf("Union of range with version at max end should add includeMax (%q), but got %q", rc3, actual)
+	}
+	if actual = v2.Union(rc1); !constraintEq(actual, rc3) {
+		t.Errorf("Union of range with version at max end should add includeMax (%q), but got %q", rc3, actual)
+	}
+
+	// Test basic overlap case
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 2, 0),
+	}
+	result := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 2, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// And with includes
+	rc1.includeMin = true
+	rc1.includeMax = true
+	rc2.includeMin = true
+	rc2.includeMax = true
+	result.includeMin = true
+	result.includeMax = true
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Overlaps with nils
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		max: newV(2, 2, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, Any()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, Any())
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, Any()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, Any())
+	}
+
+	// Just one nil in overlap
+	rc1.max = newV(2, 0, 0)
+	result = rangeConstraint{
+		max: newV(2, 2, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	rc1.max = nil
+	rc2.min = newV(1, 5, 0)
+	result = rangeConstraint{
+		min: newV(1, 0, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test superset overlap case
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+
+	// Test disjoint case
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(1, 6, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	uresult = unionConstraint{rc1, rc2}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, uresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, uresult)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, uresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, uresult)
+	}
+
+	// Test disjoint at gt/lt boundary (non-adjacent)
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	uresult = unionConstraint{rc1, rc2}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, uresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, uresult)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, uresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, uresult)
+	}
+
+	// Now, just have them touch at a single version
+	rc1.includeMax = true
+	rc2.includeMin = true
+	result = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// And top-adjacent at that version
+	rc2.includeMin = false
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	// And bottom-adjacent at that version
+	rc1.includeMax = false
+	rc2.includeMin = true
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test excludes in overlapping range
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+		excl: []*Version{
+			newV(1, 6, 0),
+		},
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+
+	// Test excludes not in non-overlapping range
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+		excl: []*Version{
+			newV(1, 1, 0),
+		},
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+
+	// Ensure pure excludes come through as they should
+	rc1 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 6, 0),
+		},
+	}
+
+	rc2 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 6, 0),
+			newV(1, 7, 0),
+		},
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+
+	rc1 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 5, 0),
+		},
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, Any()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, Any())
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, Any()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, Any())
+	}
+
+	// TODO test the pre-release special range stuff
+}
+
+func TestUnionIntersection(t *testing.T) {
+	var actual Constraint
+	// magic first
+	u1 := unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+		newV(1, 3, 0),
+	}
+	if actual = u1.Intersect(Any()); !constraintEq(actual, u1) {
+		t.Errorf("Intersection of anything with Any should return self; got %s", actual)
+	}
+	if actual = u1.Intersect(None()); !IsNone(actual) {
+		t.Errorf("Intersection of anything with None should always produce None; got %s", actual)
+	}
+	if u1.MatchesAny(None()) {
+		t.Errorf("Can't match any when intersected with None")
+	}
+
+	// intersect of unions with single versions
+	v1 := newV(1, 1, 0)
+	if actual = u1.Intersect(v1); !constraintEq(actual, v1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, v1)
+	}
+	if actual = v1.Intersect(u1); !constraintEq(actual, v1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, v1)
+	}
+
+	// intersect of range with union of versions
+	u1 = unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+		newV(1, 3, 0),
+	}
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = u1.Intersect(rc1); !constraintEq(actual, u1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u1)
+	}
+	if actual = rc1.Intersect(u1); !constraintEq(actual, u1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u1)
+	}
+
+	u2 := unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+	}
+
+	if actual = u1.Intersect(u2); !constraintEq(actual, u2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u2)
+	}
+
+	// Overlapping sub/supersets
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(1, 6, 0),
+	}
+	rc2 := rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	rc3 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	rc4 := rangeConstraint{
+		min: newV(2, 5, 0),
+		max: newV(2, 6, 0),
+	}
+	u1 = unionConstraint{rc1, rc2}
+	u2 = unionConstraint{rc3, rc4}
+	ur := unionConstraint{rc1, rc4}
+
+	if actual = u1.Intersect(u2); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = u2.Intersect(u1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+
+	// Ensure excludes carry as they should
+	rc1.excl = []*Version{newV(1, 5, 5)}
+	u1 = unionConstraint{rc1, rc2}
+	ur = unionConstraint{rc1, rc4}
+
+	if actual = u1.Intersect(u2); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = u2.Intersect(u1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+}
+
+func TestUnionUnion(t *testing.T) {
+	var actual Constraint
+	// magic first
+	u1 := unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+		newV(1, 3, 0),
+	}
+	if actual = u1.Union(Any()); !IsAny(actual) {
+		t.Errorf("Union of anything with Any should always return Any; got %s", actual)
+	}
+	if actual = u1.Union(None()); !constraintEq(actual, u1) {
+		t.Errorf("Union of anything with None should always return self; got %s", actual)
+	}
+
+	// union of uc with single versions
+	// already present
+	v1 := newV(1, 2, 0)
+	if actual = u1.Union(v1); !constraintEq(actual, u1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u1)
+	}
+	if actual = v1.Union(u1); !constraintEq(actual, u1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u1)
+	}
+
+	// not present
+	v2 := newV(1, 4, 0)
+	ur := append(u1, v2)
+	if actual = u1.Union(v2); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = v2.Union(u1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+
+	// union of uc with uc, all versions
+	u2 := unionConstraint{
+		newV(1, 3, 0),
+		newV(1, 4, 0),
+		newV(1, 5, 0),
+	}
+	ur = unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+		newV(1, 3, 0),
+		newV(1, 4, 0),
+		newV(1, 5, 0),
+	}
+
+	if actual = u1.Union(u2); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = u2.Union(u1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+
+	// union that should compress versions into range
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = u1.Union(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc1.Union(u1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+
+	rc1.max = newV(1, 4, 5)
+	u3 := append(u2, newV(1, 7, 0))
+	ur = unionConstraint{
+		rc1,
+		newV(1, 5, 0),
+		newV(1, 7, 0),
+	}
+
+	if actual = u3.Union(rc1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = rc1.Union(u3); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+}
+
+// Most version stuff got tested by range and/or union b/c most tests were
+// repeated bidirectionally (set operations are commutative; testing in pairs
+// helps us catch any situation where we fail to maintain that invariant)
+func TestVersionSetOps(t *testing.T) {
+	var actual Constraint
+
+	v1 := newV(1, 0, 0)
+
+	if actual = v1.Intersect(v1); !constraintEq(actual, v1) {
+		t.Errorf("Version intersected with itself should be itself, got %q", actual)
+	}
+	if !v1.MatchesAny(v1) {
+		t.Errorf("MatchesAny should work with a version against itself")
+	}
+
+	v2 := newV(2, 0, 0)
+	if actual = v1.Intersect(v2); !IsNone(actual) {
+		t.Errorf("Versions should only intersect with themselves, got %q", actual)
+	}
+	if v1.MatchesAny(v2) {
+		t.Errorf("MatchesAny should not work when combined with anything other than itself")
+	}
+
+	result := unionConstraint{v1, v2}
+
+	if actual = v1.Union(v1); !constraintEq(actual, v1) {
+		t.Errorf("Version union with itself should return self, got %q", actual)
+	}
+
+	if actual = v1.Union(v2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = v1.Union(v2); !constraintEq(actual, result) {
+		// Duplicate just to make sure ordering works right
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+}
+
+func TestAreAdjacent(t *testing.T) {
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 2, 0),
+	}
+
+	if areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges overlap, should not indicate as adjacent")
+	}
+
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+	}
+
+	if areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges are non-overlapping and non-adjacent, but reported as adjacent")
+	}
+
+	rc2.includeMin = true
+
+	if !areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges are non-overlapping and adjacent, but reported as non-adjacent")
+	}
+
+	rc1.includeMax = true
+
+	if areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges are overlapping at a single version, but reported as adjacent")
+	}
+
+	rc2.includeMin = false
+	if !areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges are non-overlapping and adjacent, but reported as non-adjacent")
+	}
+}
diff --git a/vendor/github.com/Masterminds/semver/union.go b/vendor/github.com/Masterminds/semver/union.go
new file mode 100644
index 0000000..2659828
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/union.go
@@ -0,0 +1,141 @@
+package semver
+
+import "strings"
+
+type unionConstraint []realConstraint
+
+func (uc unionConstraint) Matches(v *Version) error {
+	var uce MultiMatchFailure
+	for _, c := range uc {
+		if err := c.Matches(v); err == nil {
+			return nil
+		} else {
+			uce = append(uce, err.(MatchFailure))
+		}
+	}
+
+	return uce
+}
+
+func (uc unionConstraint) Intersect(c2 Constraint) Constraint {
+	var other []realConstraint
+
+	switch tc2 := c2.(type) {
+	case none:
+		return None()
+	case any:
+		return uc
+	case *Version:
+		return c2
+	case rangeConstraint:
+		other = append(other, tc2)
+	case unionConstraint:
+		other = c2.(unionConstraint)
+	default:
+		panic("unknown type")
+	}
+
+	var newc []Constraint
+	// TODO there's a smarter way to do this than NxN, but...worth it?
+	for _, c := range uc {
+		for _, oc := range other {
+			i := c.Intersect(oc)
+			if !IsNone(i) {
+				newc = append(newc, i)
+			}
+		}
+	}
+
+	return Union(newc...)
+}
+
+func (uc unionConstraint) MatchesAny(c Constraint) bool {
+	for _, ic := range uc {
+		if ic.MatchesAny(c) {
+			return true
+		}
+	}
+	return false
+}
+
+func (uc unionConstraint) Union(c Constraint) Constraint {
+	return Union(uc, c)
+}
+
+func (uc unionConstraint) String() string {
+	var pieces []string
+	for _, c := range uc {
+		pieces = append(pieces, c.String())
+	}
+
+	return strings.Join(pieces, " || ")
+}
+func (unionConstraint) _private() {}
+
+type constraintList []realConstraint
+
+func (cl constraintList) Len() int {
+	return len(cl)
+}
+
+func (cl constraintList) Swap(i, j int) {
+	cl[i], cl[j] = cl[j], cl[i]
+}
+
+func (cl constraintList) Less(i, j int) bool {
+	ic, jc := cl[i], cl[j]
+
+	switch tic := ic.(type) {
+	case *Version:
+		switch tjc := jc.(type) {
+		case *Version:
+			return tic.LessThan(tjc)
+		case rangeConstraint:
+			if tjc.min == nil {
+				return false
+			}
+
+			// Because we don't assume stable sort, always put versions ahead of
+			// range mins if they're equal and includeMin is on
+			if tjc.includeMin && tic.Equal(tjc.min) {
+				return false
+			}
+			return tic.LessThan(tjc.min)
+		}
+	case rangeConstraint:
+		switch tjc := jc.(type) {
+		case *Version:
+			if tic.min == nil {
+				return true
+			}
+
+			// Because we don't assume stable sort, always put versions ahead of
+			// range mins if they're equal and includeMin is on
+			if tic.includeMin && tjc.Equal(tic.min) {
+				return false
+			}
+			return tic.min.LessThan(tjc)
+		case rangeConstraint:
+			if tic.min == nil {
+				return true
+			}
+			if tjc.min == nil {
+				return false
+			}
+			return tic.min.LessThan(tjc.min)
+		}
+	}
+
+	panic("unreachable")
+}
+
+func (cl *constraintList) Push(x interface{}) {
+	*cl = append(*cl, x.(realConstraint))
+}
+
+func (cl *constraintList) Pop() interface{} {
+	o := *cl
+	c := o[len(o)-1]
+	*cl = o[:len(o)-1]
+	return c
+}
diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go
index dbb93f8..e9261ca 100644
--- a/vendor/github.com/Masterminds/semver/version.go
+++ b/vendor/github.com/Masterminds/semver/version.go
@@ -7,6 +7,7 @@
 	"regexp"
 	"strconv"
 	"strings"
+	"sync"
 )
 
 // The compiled version of the regex created at init() is cached here so it
@@ -19,6 +20,25 @@
 	ErrInvalidSemVer = errors.New("Invalid Semantic Version")
 )
 
+// Error type; lets us defer string interpolation
+type badVersionSegment struct {
+	e error
+}
+
+func (b badVersionSegment) Error() string {
+	return fmt.Sprintf("Error parsing version segment: %s", b.e)
+}
+
+// Controls whether or not parsed constraints are cached
+var CacheVersions = true
+var versionCache = make(map[string]vcache)
+var versionCacheLock sync.RWMutex
+
+type vcache struct {
+	v   *Version
+	err error
+}
+
 // SemVerRegex id the regular expression used to parse a semantic version.
 const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
 	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
@@ -39,8 +59,22 @@
 // NewVersion parses a given version and returns an instance of Version or
 // an error if unable to parse the version.
 func NewVersion(v string) (*Version, error) {
+	if CacheVersions {
+		versionCacheLock.RLock()
+		if sv, exists := versionCache[v]; exists {
+			versionCacheLock.RUnlock()
+			return sv.v, sv.err
+		}
+		versionCacheLock.RUnlock()
+	}
+
 	m := versionRegex.FindStringSubmatch(v)
 	if m == nil {
+		if CacheVersions {
+			versionCacheLock.Lock()
+			versionCache[v] = vcache{err: ErrInvalidSemVer}
+			versionCacheLock.Unlock()
+		}
 		return nil, ErrInvalidSemVer
 	}
 
@@ -53,14 +87,28 @@
 	var temp int64
 	temp, err := strconv.ParseInt(m[1], 10, 32)
 	if err != nil {
-		return nil, fmt.Errorf("Error parsing version segment: %s", err)
+		bvs := badVersionSegment{e: err}
+		if CacheVersions {
+			versionCacheLock.Lock()
+			versionCache[v] = vcache{err: bvs}
+			versionCacheLock.Unlock()
+		}
+
+		return nil, bvs
 	}
 	sv.major = temp
 
 	if m[2] != "" {
 		temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 32)
 		if err != nil {
-			return nil, fmt.Errorf("Error parsing version segment: %s", err)
+			bvs := badVersionSegment{e: err}
+			if CacheVersions {
+				versionCacheLock.Lock()
+				versionCache[v] = vcache{err: bvs}
+				versionCacheLock.Unlock()
+			}
+
+			return nil, bvs
 		}
 		sv.minor = temp
 	} else {
@@ -70,13 +118,26 @@
 	if m[3] != "" {
 		temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 32)
 		if err != nil {
-			return nil, fmt.Errorf("Error parsing version segment: %s", err)
+			bvs := badVersionSegment{e: err}
+			if CacheVersions {
+				versionCacheLock.Lock()
+				versionCache[v] = vcache{err: bvs}
+				versionCacheLock.Unlock()
+			}
+
+			return nil, bvs
 		}
 		sv.patch = temp
 	} else {
 		sv.patch = 0
 	}
 
+	if CacheVersions {
+		versionCacheLock.Lock()
+		versionCache[v] = vcache{v: sv}
+		versionCacheLock.Unlock()
+	}
+
 	return sv, nil
 }
 
@@ -131,11 +192,21 @@
 
 // LessThan tests if one version is less than another one.
 func (v *Version) LessThan(o *Version) bool {
+	// If a nil version was passed, fail and bail out early.
+	if o == nil {
+		return false
+	}
+
 	return v.Compare(o) < 0
 }
 
 // GreaterThan tests if one version is greater than another one.
 func (v *Version) GreaterThan(o *Version) bool {
+	// If a nil version was passed, fail and bail out early.
+	if o == nil {
+		return false
+	}
+
 	return v.Compare(o) > 0
 }
 
@@ -143,6 +214,11 @@
 // Note, versions can be equal with different metadata since metadata
 // is not considered part of the comparable version.
 func (v *Version) Equal(o *Version) bool {
+	// If a nil version was passed, fail and bail out early.
+	if o == nil {
+		return false
+	}
+
 	return v.Compare(o) == 0
 }
 
@@ -181,6 +257,46 @@
 	return comparePrerelease(ps, po)
 }
 
+func (v *Version) Matches(v2 *Version) error {
+	if v.Equal(v2) {
+		return nil
+	}
+
+	return VersionMatchFailure{v: v, other: v2}
+}
+
+func (v *Version) MatchesAny(c Constraint) bool {
+	if v2, ok := c.(*Version); ok {
+		return v.Equal(v2)
+	} else {
+		// The other implementations all have specific handling for this; fall
+		// back on theirs.
+		return c.MatchesAny(v)
+	}
+}
+
+func (v *Version) Intersect(c Constraint) Constraint {
+	if v2, ok := c.(*Version); ok {
+		if v.Equal(v2) {
+			return v
+		}
+		return none{}
+	}
+
+	return c.Intersect(v)
+}
+
+func (v *Version) Union(c Constraint) Constraint {
+	if v2, ok := c.(*Version); ok && v.Equal(v2) {
+		return v
+	} else {
+		return Union(v, c)
+	}
+}
+
+func (Version) _private() {}
+func (Version) _real()    {}
+
 func compareSegment(v, o int64) int {
 	if v < o {
 		return -1
diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore
new file mode 100644
index 0000000..0026861
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml
new file mode 100644
index 0000000..1a0bbea
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+go:
+  - tip
diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE
new file mode 100644
index 0000000..a5df10e
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Armon Dadgar
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md
new file mode 100644
index 0000000..26f42a2
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/README.md
@@ -0,0 +1,38 @@
+go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix)
+=========
+
+Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
+The package only provides a single `Tree` implementation, optimized for sparse nodes.
+
+As a radix tree, it provides the following:
+ * O(k) operations. In many cases, this can be faster than a hash table since
+   the hash function is an O(k) operation, and hash tables have very poor cache locality.
+ * Minimum / Maximum value lookups
+ * Ordered iteration
+
+For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix).
+
+Example
+=======
+
+Below is a simple example of usage
+
+```go
+// Create a tree
+r := radix.New()
+r.Insert("foo", 1)
+r.Insert("bar", 2)
+r.Insert("foobar", 2)
+
+// Find the longest prefix match
+m, _, _ := r.LongestPrefix("foozip")
+if m != "foo" {
+    panic("should be foo")
+}
+```
+
diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go
new file mode 100644
index 0000000..d2914c1
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/radix.go
@@ -0,0 +1,496 @@
+package radix
+
+import (
+	"sort"
+	"strings"
+)
+
+// WalkFn is used when walking the tree. Takes a
+// key and value, returning if iteration should
+// be terminated.
+type WalkFn func(s string, v interface{}) bool
+
+// leafNode is used to represent a value
+type leafNode struct {
+	key string
+	val interface{}
+}
+
+// edge is used to represent an edge node
+type edge struct {
+	label byte
+	node  *node
+}
+
+type node struct {
+	// leaf is used to store possible leaf
+	leaf *leafNode
+
+	// prefix is the common prefix we ignore
+	prefix string
+
+	// Edges should be stored in-order for iteration.
+	// We avoid a fully materialized slice to save memory,
+	// since in most cases we expect to be sparse
+	edges edges
+}
+
+func (n *node) isLeaf() bool {
+	return n.leaf != nil
+}
+
+func (n *node) addEdge(e edge) {
+	n.edges = append(n.edges, e)
+	n.edges.Sort()
+}
+
+func (n *node) replaceEdge(e edge) {
+	num := len(n.edges)
+	idx := sort.Search(num, func(i int) bool {
+		return n.edges[i].label >= e.label
+	})
+	if idx < num && n.edges[idx].label == e.label {
+		n.edges[idx].node = e.node
+		return
+	}
+	panic("replacing missing edge")
+}
+
+func (n *node) getEdge(label byte) *node {
+	num := len(n.edges)
+	idx := sort.Search(num, func(i int) bool {
+		return n.edges[i].label >= label
+	})
+	if idx < num && n.edges[idx].label == label {
+		return n.edges[idx].node
+	}
+	return nil
+}
+
+func (n *node) delEdge(label byte) {
+	num := len(n.edges)
+	idx := sort.Search(num, func(i int) bool {
+		return n.edges[i].label >= label
+	})
+	if idx < num && n.edges[idx].label == label {
+		copy(n.edges[idx:], n.edges[idx+1:])
+		n.edges[len(n.edges)-1] = edge{}
+		n.edges = n.edges[:len(n.edges)-1]
+	}
+}
+
+type edges []edge
+
+func (e edges) Len() int {
+	return len(e)
+}
+
+func (e edges) Less(i, j int) bool {
+	return e[i].label < e[j].label
+}
+
+func (e edges) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e edges) Sort() {
+	sort.Sort(e)
+}
+
+// Tree implements a radix tree. This can be treated as a
+// Dictionary abstract data type. The main advantage over
+// a standard hash map is prefix-based lookups and
+// ordered iteration,
+type Tree struct {
+	root *node
+	size int
+}
+
+// New returns an empty Tree
+func New() *Tree {
+	return NewFromMap(nil)
+}
+
+// NewFromMap returns a new tree containing the keys
+// from an existing map
+func NewFromMap(m map[string]interface{}) *Tree {
+	t := &Tree{root: &node{}}
+	for k, v := range m {
+		t.Insert(k, v)
+	}
+	return t
+}
+
+// Len is used to return the number of elements in the tree
+func (t *Tree) Len() int {
+	return t.size
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 string) int {
+	max := len(k1)
+	if l := len(k2); l < max {
+		max = l
+	}
+	var i int
+	for i = 0; i < max; i++ {
+		if k1[i] != k2[i] {
+			break
+		}
+	}
+	return i
+}
+
+// Insert is used to add a newentry or update
+// an existing entry. Returns if updated.
+func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) {
+	var parent *node
+	n := t.root
+	search := s
+	for {
+		// Handle key exhaution
+		if len(search) == 0 {
+			if n.isLeaf() {
+				old := n.leaf.val
+				n.leaf.val = v
+				return old, true
+			}
+
+			n.leaf = &leafNode{
+				key: s,
+				val: v,
+			}
+			t.size++
+			return nil, false
+		}
+
+		// Look for the edge
+		parent = n
+		n = n.getEdge(search[0])
+
+		// No edge, create one
+		if n == nil {
+			e := edge{
+				label: search[0],
+				node: &node{
+					leaf: &leafNode{
+						key: s,
+						val: v,
+					},
+					prefix: search,
+				},
+			}
+			parent.addEdge(e)
+			t.size++
+			return nil, false
+		}
+
+		// Determine longest prefix of the search key on match
+		commonPrefix := longestPrefix(search, n.prefix)
+		if commonPrefix == len(n.prefix) {
+			search = search[commonPrefix:]
+			continue
+		}
+
+		// Split the node
+		t.size++
+		child := &node{
+			prefix: search[:commonPrefix],
+		}
+		parent.replaceEdge(edge{
+			label: search[0],
+			node:  child,
+		})
+
+		// Restore the existing node
+		child.addEdge(edge{
+			label: n.prefix[commonPrefix],
+			node:  n,
+		})
+		n.prefix = n.prefix[commonPrefix:]
+
+		// Create a new leaf node
+		leaf := &leafNode{
+			key: s,
+			val: v,
+		}
+
+		// If the new key is a subset, add to to this node
+		search = search[commonPrefix:]
+		if len(search) == 0 {
+			child.leaf = leaf
+			return nil, false
+		}
+
+		// Create a new edge for the node
+		child.addEdge(edge{
+			label: search[0],
+			node: &node{
+				leaf:   leaf,
+				prefix: search,
+			},
+		})
+		return nil, false
+	}
+}
+
+// Delete is used to delete a key, returning the previous
+// value and if it was deleted
+func (t *Tree) Delete(s string) (interface{}, bool) {
+	var parent *node
+	var label byte
+	n := t.root
+	search := s
+	for {
+		// Check for key exhaution
+		if len(search) == 0 {
+			if !n.isLeaf() {
+				break
+			}
+			goto DELETE
+		}
+
+		// Look for an edge
+		parent = n
+		label = search[0]
+		n = n.getEdge(label)
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+	return nil, false
+
+DELETE:
+	// Delete the leaf
+	leaf := n.leaf
+	n.leaf = nil
+	t.size--
+
+	// Check if we should delete this node from the parent
+	if parent != nil && len(n.edges) == 0 {
+		parent.delEdge(label)
+	}
+
+	// Check if we should merge this node
+	if n != t.root && len(n.edges) == 1 {
+		n.mergeChild()
+	}
+
+	// Check if we should merge the parent's other child
+	if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
+		parent.mergeChild()
+	}
+
+	return leaf.val, true
+}
+
+func (n *node) mergeChild() {
+	e := n.edges[0]
+	child := e.node
+	n.prefix = n.prefix + child.prefix
+	n.leaf = child.leaf
+	n.edges = child.edges
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Tree) Get(s string) (interface{}, bool) {
+	n := t.root
+	search := s
+	for {
+		// Check for key exhaution
+		if len(search) == 0 {
+			if n.isLeaf() {
+				return n.leaf.val, true
+			}
+			break
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+	return nil, false
+}
+
+// LongestPrefix is like Get, but instead of an
+// exact match, it will return the longest prefix match.
+func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) {
+	var last *leafNode
+	n := t.root
+	search := s
+	for {
+		// Look for a leaf node
+		if n.isLeaf() {
+			last = n.leaf
+		}
+
+		// Check for key exhaution
+		if len(search) == 0 {
+			break
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+	if last != nil {
+		return last.key, last.val, true
+	}
+	return "", nil, false
+}
+
+// Minimum is used to return the minimum value in the tree
+func (t *Tree) Minimum() (string, interface{}, bool) {
+	n := t.root
+	for {
+		if n.isLeaf() {
+			return n.leaf.key, n.leaf.val, true
+		}
+		if len(n.edges) > 0 {
+			n = n.edges[0].node
+		} else {
+			break
+		}
+	}
+	return "", nil, false
+}
+
+// Maximum is used to return the maximum value in the tree
+func (t *Tree) Maximum() (string, interface{}, bool) {
+	n := t.root
+	for {
+		if num := len(n.edges); num > 0 {
+			n = n.edges[num-1].node
+			continue
+		}
+		if n.isLeaf() {
+			return n.leaf.key, n.leaf.val, true
+		}
+		break
+	}
+	return "", nil, false
+}
+
+// Walk is used to walk the tree
+func (t *Tree) Walk(fn WalkFn) {
+	recursiveWalk(t.root, fn)
+}
+
+// WalkPrefix is used to walk the tree under a prefix
+func (t *Tree) WalkPrefix(prefix string, fn WalkFn) {
+	n := t.root
+	search := prefix
+	for {
+		// Check for key exhaution
+		if len(search) == 0 {
+			recursiveWalk(n, fn)
+			return
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+
+		} else if strings.HasPrefix(n.prefix, search) {
+			// Child may be under our search prefix
+			recursiveWalk(n, fn)
+			return
+		} else {
+			break
+		}
+	}
+
+}
+
+// WalkPath is used to walk the tree, but only visiting nodes
+// from the root down to a given leaf. Where WalkPrefix walks
+// all the entries *under* the given prefix, this walks the
+// entries *above* the given prefix.
+func (t *Tree) WalkPath(path string, fn WalkFn) {
+	n := t.root
+	search := path
+	for {
+		// Visit the leaf values if any
+		if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+			return
+		}
+
+		// Check for key exhaution
+		if len(search) == 0 {
+			return
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			return
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+}
+
+// recursiveWalk is used to do a pre-order walk of a node
+// recursively. Returns true if the walk should be aborted
+func recursiveWalk(n *node, fn WalkFn) bool {
+	// Visit the leaf values if any
+	if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+		return true
+	}
+
+	// Recurse on the children
+	for _, e := range n.edges {
+		if recursiveWalk(e.node, fn) {
+			return true
+		}
+	}
+	return false
+}
+
+// ToMap is used to walk the tree and convert it into a map
+func (t *Tree) ToMap() map[string]interface{} {
+	out := make(map[string]interface{}, t.size)
+	t.Walk(func(k string, v interface{}) bool {
+		out[k] = v
+		return false
+	})
+	return out
+}
diff --git a/vendor/github.com/armon/go-radix/radix_test.go b/vendor/github.com/armon/go-radix/radix_test.go
new file mode 100644
index 0000000..300f0d4
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/radix_test.go
@@ -0,0 +1,319 @@
+package radix
+
+import (
+	crand "crypto/rand"
+	"fmt"
+	"reflect"
+	"sort"
+	"testing"
+)
+
+func TestRadix(t *testing.T) {
+	var min, max string
+	inp := make(map[string]interface{})
+	for i := 0; i < 1000; i++ {
+		gen := generateUUID()
+		inp[gen] = i
+		if gen < min || i == 0 {
+			min = gen
+		}
+		if gen > max || i == 0 {
+			max = gen
+		}
+	}
+
+	r := NewFromMap(inp)
+	if r.Len() != len(inp) {
+		t.Fatalf("bad length: %v %v", r.Len(), len(inp))
+	}
+
+	r.Walk(func(k string, v interface{}) bool {
+		println(k)
+		return false
+	})
+
+	for k, v := range inp {
+		out, ok := r.Get(k)
+		if !ok {
+			t.Fatalf("missing key: %v", k)
+		}
+		if out != v {
+			t.Fatalf("value mis-match: %v %v", out, v)
+		}
+	}
+
+	// Check min and max
+	outMin, _, _ := r.Minimum()
+	if outMin != min {
+		t.Fatalf("bad minimum: %v %v", outMin, min)
+	}
+	outMax, _, _ := r.Maximum()
+	if outMax != max {
+		t.Fatalf("bad maximum: %v %v", outMax, max)
+	}
+
+	for k, v := range inp {
+		out, ok := r.Delete(k)
+		if !ok {
+			t.Fatalf("missing key: %v", k)
+		}
+		if out != v {
+			t.Fatalf("value mis-match: %v %v", out, v)
+		}
+	}
+	if r.Len() != 0 {
+		t.Fatalf("bad length: %v", r.Len())
+	}
+}
+
+func TestRoot(t *testing.T) {
+	r := New()
+	_, ok := r.Delete("")
+	if ok {
+		t.Fatalf("bad")
+	}
+	_, ok = r.Insert("", true)
+	if ok {
+		t.Fatalf("bad")
+	}
+	val, ok := r.Get("")
+	if !ok || val != true {
+		t.Fatalf("bad: %v", val)
+	}
+	val, ok = r.Delete("")
+	if !ok || val != true {
+		t.Fatalf("bad: %v", val)
+	}
+}
+
+func TestDelete(t *testing.T) {
+
+	r := New()
+
+	s := []string{"", "A", "AB"}
+
+	for _, ss := range s {
+		r.Insert(ss, true)
+	}
+
+	for _, ss := range s {
+		_, ok := r.Delete(ss)
+		if !ok {
+			t.Fatalf("bad %q", ss)
+		}
+	}
+}
+
+func TestLongestPrefix(t *testing.T) {
+	r := New()
+
+	keys := []string{
+		"",
+		"foo",
+		"foobar",
+		"foobarbaz",
+		"foobarbazzip",
+		"foozip",
+	}
+	for _, k := range keys {
+		r.Insert(k, nil)
+	}
+	if r.Len() != len(keys) {
+		t.Fatalf("bad len: %v %v", r.Len(), len(keys))
+	}
+
+	type exp struct {
+		inp string
+		out string
+	}
+	cases := []exp{
+		{"a", ""},
+		{"abc", ""},
+		{"fo", ""},
+		{"foo", "foo"},
+		{"foob", "foo"},
+		{"foobar", "foobar"},
+		{"foobarba", "foobar"},
+		{"foobarbaz", "foobarbaz"},
+		{"foobarbazzi", "foobarbaz"},
+		{"foobarbazzip", "foobarbazzip"},
+		{"foozi", "foo"},
+		{"foozip", "foozip"},
+		{"foozipzap", "foozip"},
+	}
+	for _, test := range cases {
+		m, _, ok := r.LongestPrefix(test.inp)
+		if !ok {
+			t.Fatalf("no match: %v", test)
+		}
+		if m != test.out {
+			t.Fatalf("mis-match: %v %v", m, test)
+		}
+	}
+}
+
+func TestWalkPrefix(t *testing.T) {
+	r := New()
+
+	keys := []string{
+		"foobar",
+		"foo/bar/baz",
+		"foo/baz/bar",
+		"foo/zip/zap",
+		"zipzap",
+	}
+	for _, k := range keys {
+		r.Insert(k, nil)
+	}
+	if r.Len() != len(keys) {
+		t.Fatalf("bad len: %v %v", r.Len(), len(keys))
+	}
+
+	type exp struct {
+		inp string
+		out []string
+	}
+	cases := []exp{
+		{
+			"f",
+			[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
+		},
+		{
+			"foo",
+			[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
+		},
+		{
+			"foob",
+			[]string{"foobar"},
+		},
+		{
+			"foo/",
+			[]string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
+		},
+		{
+			"foo/b",
+			[]string{"foo/bar/baz", "foo/baz/bar"},
+		},
+		{
+			"foo/ba",
+			[]string{"foo/bar/baz", "foo/baz/bar"},
+		},
+		{
+			"foo/bar",
+			[]string{"foo/bar/baz"},
+		},
+		{
+			"foo/bar/baz",
+			[]string{"foo/bar/baz"},
+		},
+		{
+			"foo/bar/bazoo",
+			[]string{},
+		},
+		{
+			"z",
+			[]string{"zipzap"},
+		},
+	}
+
+	for _, test := range cases {
+		out := []string{}
+		fn := func(s string, v interface{}) bool {
+			out = append(out, s)
+			return false
+		}
+		r.WalkPrefix(test.inp, fn)
+		sort.Strings(out)
+		sort.Strings(test.out)
+		if !reflect.DeepEqual(out, test.out) {
+			t.Fatalf("mis-match: %v %v", out, test.out)
+		}
+	}
+}
+
+func TestWalkPath(t *testing.T) {
+	r := New()
+
+	keys := []string{
+		"foo",
+		"foo/bar",
+		"foo/bar/baz",
+		"foo/baz/bar",
+		"foo/zip/zap",
+		"zipzap",
+	}
+	for _, k := range keys {
+		r.Insert(k, nil)
+	}
+	if r.Len() != len(keys) {
+		t.Fatalf("bad len: %v %v", r.Len(), len(keys))
+	}
+
+	type exp struct {
+		inp string
+		out []string
+	}
+	cases := []exp{
+		{
+			"f",
+			[]string{},
+		},
+		{
+			"foo",
+			[]string{"foo"},
+		},
+		{
+			"foo/",
+			[]string{"foo"},
+		},
+		{
+			"foo/ba",
+			[]string{"foo"},
+		},
+		{
+			"foo/bar",
+			[]string{"foo", "foo/bar"},
+		},
+		{
+			"foo/bar/baz",
+			[]string{"foo", "foo/bar", "foo/bar/baz"},
+		},
+		{
+			"foo/bar/bazoo",
+			[]string{"foo", "foo/bar", "foo/bar/baz"},
+		},
+		{
+			"z",
+			[]string{},
+		},
+	}
+
+	for _, test := range cases {
+		out := []string{}
+		fn := func(s string, v interface{}) bool {
+			out = append(out, s)
+			return false
+		}
+		r.WalkPath(test.inp, fn)
+		sort.Strings(out)
+		sort.Strings(test.out)
+		if !reflect.DeepEqual(out, test.out) {
+			t.Fatalf("mis-match: %v %v", out, test.out)
+		}
+	}
+}
+
+// generateUUID is used to generate a random UUID
+func generateUUID() string {
+	buf := make([]byte, 16)
+	if _, err := crand.Read(buf); err != nil {
+		panic(fmt.Errorf("failed to read random bytes: %v", err))
+	}
+
+	return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+		buf[0:4],
+		buf[4:6],
+		buf[6:8],
+		buf[8:10],
+		buf[10:16])
+}
diff --git a/vendor/github.com/sdboyer/gps/.gitignore b/vendor/github.com/sdboyer/gps/.gitignore
new file mode 100644
index 0000000..22d0d82
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/.gitignore
@@ -0,0 +1 @@
+vendor
diff --git a/vendor/github.com/sdboyer/gps/CODE_OF_CONDUCT.md b/vendor/github.com/sdboyer/gps/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..660ee84
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+  advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at sam (at) samboyer.org. All complaints
+will be reviewed and investigated and will result in a response that is deemed
+necessary and appropriate to the circumstances. The project team is obligated to
+maintain confidentiality with regard to the reporter of an incident. Further
+details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/sdboyer/gps/CONTRIBUTING.md b/vendor/github.com/sdboyer/gps/CONTRIBUTING.md
new file mode 100644
index 0000000..3ff03b3
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/CONTRIBUTING.md
@@ -0,0 +1,58 @@
+# Contributing to `gps`
+
+:+1::tada: First, we're thrilled you're thinking about contributing! :tada::+1:
+
+As a library trying to cover all the bases in Go package management, it's
+crucial that we incorporate a broad range of experiences and use cases. There is
+a strong, motivating design behind `gps`, but we are always open to discussion
+on ways we can improve the library, particularly if it allows `gps` to cover
+more of the Go package management possibility space.
+
+`gps` has no CLA, but we do have a [Code of Conduct](https://github.com/sdboyer/gps/blob/master/CODE_OF_CONDUCT.md). By
+participating, you are expected to uphold this code.
+
+## How can I contribute?
+
+It may be best to start by getting a handle on what `gps` actually is. Our
+wiki has a [general introduction](https://github.com/sdboyer/gps/wiki/Introduction-to-gps), a
+[guide for tool implementors](https://github.com/sdboyer/gps/wiki/gps-for-Implementors), and
+a [guide for contributors](https://github.com/sdboyer/gps/wiki/gps-for-contributors).
+There's also a [discursive essay](https://medium.com/@sdboyer/so-you-want-to-write-a-package-manager-4ae9c17d9527)
+that lays out the big-picture goals and considerations driving the `gps` design.
+
+There are a number of ways to contribute, all highly valuable and deeply
+appreciated:
+
+* **Helping "translate" existing issues:** as `gps` exits its larval stage, it still
+  has a number of issues that may be incomprehensible to everyone except
+  @sdboyer. Simply asking clarifying questions on these issues is helpful!
+* **Identifying missed use cases:** the loose `gps` rule of thumb is, "if you can do
+  it in Go, we support it in `gps`." Posting issues about cases we've missed
+  helps us reach that goal.
+* **Writing tests:** in the same vein, `gps` has a [large suite](https://github.com/sdboyer/gps/blob/master/CODE_OF_CONDUCT.md) of solving tests, but
+  they still only scratch the surface. Writing tests is not only helpful, but is
+  also a great way to get a feel for how `gps` works.
+* **Suggesting enhancements:** `gps` has plenty of missing chunks. Help fill them in!
+* **Reporting bugs**: `gps` being a library means this isn't always the easiest.
+  However, you could always compile the [example](https://github.com/sdboyer/gps/blob/master/example.go), run that against some of
+  your projects, and report problems you encounter.
+* **Building experimental tools with `gps`:** probably the best and fastest ways to
+  kick the tires!
+
+`gps` is still beta-ish software. There are plenty of bugs to squash! APIs are
+stabilizing, but are still subject to change.
+
+## Issues and Pull Requests
+
+Pull requests are the preferred way to submit changes to 'gps'. Unless the
+changes are quite small, pull requests should generally reference an
+already-opened issue. Make sure to explain clearly in the body of the PR what
+the reasoning behind the change is.
+
+The changes themselves should generally conform to the following guidelines:
+
+* Git commit messages should be [well-written](http://chris.beams.io/posts/git-commit/#seven-rules).
+* Code should be `gofmt`-ed.
+* New or changed logic should be accompanied by tests.
+* Maintainable, table-based tests are strongly preferred, even if it means
+  writing a new testing harness to execute them.
diff --git a/vendor/github.com/sdboyer/gps/LICENSE b/vendor/github.com/sdboyer/gps/LICENSE
new file mode 100644
index 0000000..d4a1dcc
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Sam Boyer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/sdboyer/gps/README.md b/vendor/github.com/sdboyer/gps/README.md
new file mode 100644
index 0000000..227bf6b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/README.md
@@ -0,0 +1,95 @@
+# gps
+![map-marker-icon copy](https://cloud.githubusercontent.com/assets/21599/16779217/4f5cdc6c-483f-11e6-9de3-661f13d9b215.png)
+--
+
+[![CircleCI](https://circleci.com/gh/sdboyer/gps.svg?style=svg)](https://circleci.com/gh/sdboyer/gps) [![Go Report Card](https://goreportcard.com/badge/github.com/sdboyer/gps)](https://goreportcard.com/report/github.com/sdboyer/gps) [![GoDoc](https://godoc.org/github.com/sdboyer/gps?status.svg)](https://godoc.org/github.com/sdboyer/gps)
+
+`gps` is the Go Packaging Solver. It is an engine for tackling dependency
+management problems in Go. You can replicate the fetching bits of `go get`,
+modulo arguments, [in about 30 lines of
+code](https://github.com/sdboyer/gps/blob/master/example.go) with `gps`.
+
+`gps` is _not_ Yet Another Go Package Management Tool. Rather, it's a library
+that package management (and adjacent) tools can use to solve the
+[hard](https://en.wikipedia.org/wiki/Boolean_satisfiability_problem) parts of
+the problem in a consistent,
+[holistic](https://medium.com/@sdboyer/so-you-want-to-write-a-package-manager-4ae9c17d9527)
+way. `gps` is [on track](https://github.com/Masterminds/glide/pull/384) to become the engine behind [glide](https://glide.sh).
+
+The wiki has a [general introduction to the `gps`
+approach](https://github.com/sdboyer/gps/wiki/Introduction-to-gps), as well
+as guides for folks [implementing
+tools](https://github.com/sdboyer/gps/wiki/gps-for-Implementors) or [looking
+to contribute](https://github.com/sdboyer/gps/wiki/Introduction-to-gps).
+
+**`gps` is progressing rapidly, but still beta, with a liberal sprinkling of panics.**
+
+## Wait...a package management _library_?!
+
+Yup. Because it's what the Go ecosystem needs right now.
+
+There are [scads of
+tools](https://github.com/golang/go/wiki/PackageManagementTools) out there, each
+tackling some slice of the Go package management domain. Some handle more than
+others, some impose more restrictions than others, and most are mutually
+incompatible (or mutually indifferent, which amounts to the same). This
+fragments the Go FLOSS ecosystem, harming the community as a whole.
+
+As in all epic software arguments, some of the points of disagreement between
+tools/their authors are a bit silly. Many, though, are based on legitimate
+differences of opinion about what workflows, controls, and interfaces are
+best to give Go developers.
+
+Now, we're certainly no less opinionated than anyone else. But part of the
+challenge has been that, with a problem as
+[complex](https://medium.com/@sdboyer/so-you-want-to-write-a-package-manager-4ae9c17d9527)
+as package management, subtle design decisions made in pursuit of a particular
+workflow or interface can have far-reaching effects on architecture, leading to
+deep incompatibilities between tools and approaches.
+
+We believe that many of [these
+differences](https://docs.google.com/document/d/1xrV9D5u8AKu1ip-A1W9JqhUmmeOhoI6d6zjVwvdn5mc/edit?usp=sharing)
+are incidental - and, given the right general solution, reconcilable. `gps` is
+our attempt at such a solution.
+
+By separating out the underlying problem into a standalone library, we are
+hoping to provide a common foundation for different tools. Such a foundation
+could improve interoperability, reduce harm to the ecosystem, and make the
+communal process of figuring out what's right for Go more about collaboration,
+and less about fiefdoms.
+
+### Assumptions
+
+Ideally, `gps` could provide this shared foundation with no additional
+assumptions beyond pure Go source files. Sadly, package management is too
+complex to be assumption-less. So, `gps` tries to keep its assumptions to the
+minimum, supporting as many situations as possible while still maintaining a
+predictable, well-formed system.
+
+* Go 1.6, or 1.5 with `GO15VENDOREXPERIMENT = 1` set. `vendor/`
+  directories are a requirement.
+* You don't manually change what's under `vendor/`. That’s tooling’s
+  job.
+* A **project** concept, where projects comprise the set of Go packages in a
+  rooted directory tree.  By happy (not) accident, `vendor/` directories also
+  just happen to cover a rooted tree.
+* A [**manifest**](https://godoc.org/github.com/sdboyer/gps#Manifest) and
+  [**lock**](https://godoc.org/github.com/sdboyer/gps#Lock) approach to
+  tracking version and constraint information. The solver takes manifest (and,
+  optionally, lock)-type data as inputs, and produces lock-type data as its
+  output. Tools decide how to actually store this data, but these should
+  generally be at the root of the project tree.
+
+Manifests? Locks? Eeew. Yes, we also think it'd be swell if we didn't need
+metadata files. We love the idea of Go packages as standalone, self-describing
+code. Unfortunately, the wheels come off that idea as soon as versioning and
+cross-project/repository dependencies happen. But universe alignment is hard;
+trying to intermix version information directly with the code would only make
+matters worse.
+
+## Contributing
+
+Yay, contributing! Please see
+[CONTRIBUTING.md](https://github.com/sdboyer/gps/blob/master/CONTRIBUTING.md).
+Note that `gps` also abides by a [Code of
+Conduct](https://github.com/sdboyer/gps/blob/master/CODE_OF_CONDUCT.md), and is MIT-licensed.
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/a.go
new file mode 100644
index 0000000..e4e2ced
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	S = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/disallow/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/a.go
new file mode 100644
index 0000000..59d2f72
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/a.go
@@ -0,0 +1,14 @@
+package disallow
+
+import (
+	"sort"
+	"disallow/testdata"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+	_ = testdata.H
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/disallow/testdata/another.go b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/testdata/another.go
new file mode 100644
index 0000000..6defdae
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/testdata/another.go
@@ -0,0 +1,7 @@
+package testdata
+
+import "hash"
+
+var (
+	H = hash.Hash
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/a.go
new file mode 100644
index 0000000..04cac6a
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/a.go
@@ -0,0 +1,12 @@
+package base
+
+import (
+	"go/parser"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = parser.ParseFile
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/nm.go b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/nm.go
new file mode 100644
index 0000000..44a0abb
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/nm.go
@@ -0,0 +1,12 @@
+package nm
+
+import (
+	"os"
+
+	"github.com/Masterminds/semver"
+)
+
+var (
+	V = os.FileInfo
+	_ = semver.Constraint
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/empty/.gitkeep b/vendor/github.com/sdboyer/gps/_testdata/src/empty/.gitkeep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/empty/.gitkeep
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmain/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmain/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmain/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmain/igmain.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmain/igmain.go
new file mode 100644
index 0000000..52129ef
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmain/igmain.go
@@ -0,0 +1,7 @@
+// +build ignore
+
+package main
+
+import "unicode"
+
+var _ = unicode.In
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/igmain.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/igmain.go
new file mode 100644
index 0000000..efee3f9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/igmain.go
@@ -0,0 +1,9 @@
+// Another comment, which the parser should ignore and still see builds tags
+
+// +build ignore
+
+package main
+
+import "unicode"
+
+var _ = unicode.In
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/igmain.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/igmain.go
new file mode 100644
index 0000000..52129ef
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/igmain.go
@@ -0,0 +1,7 @@
+// +build ignore
+
+package main
+
+import "unicode"
+
+var _ = unicode.In
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/t_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/t_test.go
new file mode 100644
index 0000000..ff4f77b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/t_test.go
@@ -0,0 +1,11 @@
+package simple
+
+import (
+	"math/rand"
+	"strconv"
+)
+
+var (
+	_ = rand.Int()
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/missing/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/missing/a.go
new file mode 100644
index 0000000..8522bdd
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/missing/a.go
@@ -0,0 +1,14 @@
+package simple
+
+import (
+	"sort"
+
+	"missing/missing"
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+	_ = missing.Foo
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/nest/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/nest/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/nest/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/ren/simple/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/ren/simple/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/ren/simple/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simple/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/simple/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simple/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a_test.go
new file mode 100644
index 0000000..72a3014
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a_test.go
@@ -0,0 +1,11 @@
+package simple_test
+
+import (
+	"sort"
+	"strconv"
+)
+
+var (
+	_ = sort.Strings
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/t_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/t_test.go
new file mode 100644
index 0000000..ff4f77b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/t_test.go
@@ -0,0 +1,11 @@
+package simple
+
+import (
+	"math/rand"
+	"strconv"
+)
+
+var (
+	_ = rand.Int()
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simplet/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/simplet/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simplet/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simplet/t_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/simplet/t_test.go
new file mode 100644
index 0000000..ff4f77b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simplet/t_test.go
@@ -0,0 +1,11 @@
+package simple
+
+import (
+	"math/rand"
+	"strconv"
+)
+
+var (
+	_ = rand.Int()
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a_test.go
new file mode 100644
index 0000000..72a3014
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a_test.go
@@ -0,0 +1,11 @@
+package simple_test
+
+import (
+	"sort"
+	"strconv"
+)
+
+var (
+	_ = sort.Strings
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/t/t_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/t/t_test.go
new file mode 100644
index 0000000..ff4f77b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/t/t_test.go
@@ -0,0 +1,11 @@
+package simple
+
+import (
+	"math/rand"
+	"strconv"
+)
+
+var (
+	_ = rand.Int()
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/locals.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/locals.go
new file mode 100644
index 0000000..5c7e6c7
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/locals.go
@@ -0,0 +1,13 @@
+package main
+
+import (
+	"varied/namemismatch"
+	"varied/otherpath"
+	"varied/simple"
+)
+
+var (
+	_ = simple.S
+	_ = nm.V
+	_ = otherpath.O
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/a.go
new file mode 100644
index 0000000..65fd7ca
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	M = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/main.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/main.go
new file mode 100644
index 0000000..92c3dc1
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/main.go
@@ -0,0 +1,9 @@
+package main
+
+import (
+	"net/http"
+)
+
+var (
+	_ = http.Client
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/namemismatch/nm.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/namemismatch/nm.go
new file mode 100644
index 0000000..44a0abb
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/namemismatch/nm.go
@@ -0,0 +1,12 @@
+package nm
+
+import (
+	"os"
+
+	"github.com/Masterminds/semver"
+)
+
+var (
+	V = os.FileInfo
+	_ = semver.Constraint
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/otherpath/otherpath_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/otherpath/otherpath_test.go
new file mode 100644
index 0000000..73891e6
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/otherpath/otherpath_test.go
@@ -0,0 +1,5 @@
+package otherpath
+
+import "varied/m1p"
+
+var O = m1p.M
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another.go
new file mode 100644
index 0000000..85368da
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another.go
@@ -0,0 +1,7 @@
+package another
+
+import "hash"
+
+var (
+	H = hash.Hash
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another_test.go
new file mode 100644
index 0000000..72a89ad
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another_test.go
@@ -0,0 +1,7 @@
+package another
+
+import "encoding/binary"
+
+var (
+	_ = binary.PutVarint
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/locals.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/locals.go
new file mode 100644
index 0000000..d8d0316
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/locals.go
@@ -0,0 +1,5 @@
+package another
+
+import "varied/m1p"
+
+var _ = m1p.M
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/locals.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/locals.go
new file mode 100644
index 0000000..6ebb90f
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/locals.go
@@ -0,0 +1,7 @@
+package simple
+
+import "varied/simple/another"
+
+var (
+	_ = another.H
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/simple.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/simple.go
new file mode 100644
index 0000000..c8fbb05
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/simple.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"go/parser"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = parser.ParseFile
+	S = gps.Prepare
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/xt/a_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/xt/a_test.go
new file mode 100644
index 0000000..72a3014
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/xt/a_test.go
@@ -0,0 +1,11 @@
+package simple_test
+
+import (
+	"sort"
+	"strconv"
+)
+
+var (
+	_ = sort.Strings
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/analysis.go b/vendor/github.com/sdboyer/gps/analysis.go
new file mode 100644
index 0000000..0cb93ba
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/analysis.go
@@ -0,0 +1,950 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"go/build"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"text/scanner"
+)
+
+var osList []string
+var archList []string
+var stdlib = make(map[string]bool)
+
+const stdlibPkgs string = "archive archive/tar archive/zip bufio builtin bytes compress compress/bzip2 compress/flate compress/gzip compress/lzw compress/zlib container container/heap container/list container/ring context crypto crypto/aes crypto/cipher crypto/des crypto/dsa crypto/ecdsa crypto/elliptic crypto/hmac crypto/md5 crypto/rand crypto/rc4 crypto/rsa crypto/sha1 crypto/sha256 crypto/sha512 crypto/subtle crypto/tls crypto/x509 crypto/x509/pkix database database/sql database/sql/driver debug debug/dwarf debug/elf debug/gosym debug/macho debug/pe debug/plan9obj encoding encoding/ascii85 encoding/asn1 encoding/base32 encoding/base64 encoding/binary encoding/csv encoding/gob encoding/hex encoding/json encoding/pem encoding/xml errors expvar flag fmt go go/ast go/build go/constant go/doc go/format go/importer go/parser go/printer go/scanner go/token go/types hash hash/adler32 hash/crc32 hash/crc64 hash/fnv html html/template image image/color image/color/palette image/draw image/gif image/jpeg image/png index index/suffixarray io io/ioutil log log/syslog math math/big math/cmplx math/rand mime mime/multipart mime/quotedprintable net net/http net/http/cgi net/http/cookiejar net/http/fcgi net/http/httptest net/http/httputil net/http/pprof net/mail net/rpc net/rpc/jsonrpc net/smtp net/textproto net/url os os/exec os/signal os/user path path/filepath reflect regexp regexp/syntax runtime runtime/cgo runtime/debug runtime/msan runtime/pprof runtime/race runtime/trace sort strconv strings sync sync/atomic syscall testing testing/iotest testing/quick text text/scanner text/tabwriter text/template text/template/parse time unicode unicode/utf16 unicode/utf8 unsafe"
+
+// Before appengine moved to google.golang.org/appengine, it had a magic
+// stdlib-like import path. We have to ignore all of these.
+const appenginePkgs string = "appengine/aetest appengine/blobstore appengine/capability appengine/channel appengine/cloudsql appengine/cmd appengine/cmd/aebundler appengine/cmd/aedeploy appengine/cmd/aefix appengine/datastore appengine/delay appengine/demos appengine/demos/guestbook appengine/demos/guestbook/templates appengine/demos/helloworld appengine/file appengine/image appengine/internal appengine/internal/aetesting appengine/internal/app_identity appengine/internal/base appengine/internal/blobstore appengine/internal/capability appengine/internal/channel appengine/internal/datastore appengine/internal/image appengine/internal/log appengine/internal/mail appengine/internal/memcache appengine/internal/modules appengine/internal/remote_api appengine/internal/search appengine/internal/socket appengine/internal/system appengine/internal/taskqueue appengine/internal/urlfetch appengine/internal/user appengine/internal/xmpp appengine/log appengine/mail appengine/memcache appengine/module appengine/remote_api appengine/runtime appengine/search appengine/socket appengine/taskqueue appengine/urlfetch appengine/user appengine/xmpp"
+
+func init() {
+	// The supported systems are listed in
+	// https://github.com/golang/go/blob/master/src/go/build/syslist.go
+	// The lists are not exported so we need to duplicate them here.
+	osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows"
+	osList = strings.Split(osListString, " ")
+
+	archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64"
+	archList = strings.Split(archListString, " ")
+
+	for _, pkg := range strings.Split(stdlibPkgs, " ") {
+		stdlib[pkg] = true
+	}
+	for _, pkg := range strings.Split(appenginePkgs, " ") {
+		stdlib[pkg] = true
+	}
+
+	// Also ignore C
+	// TODO(sdboyer) actually figure out how to deal with cgo
+	stdlib["C"] = true
+}
+
+// listPackages lists info for all packages at or below the provided fileRoot.
+//
+// Directories without any valid Go files are excluded. Directories with
+// multiple packages are excluded.
+//
+// The importRoot parameter is prepended to the relative path when determining
+// the import path for each package. The obvious case is for something typical,
+// like:
+//
+//  fileRoot = "/home/user/go/src/github.com/foo/bar"
+//  importRoot = "github.com/foo/bar"
+//
+// where the fileRoot and importRoot align. However, if you provide:
+//
+//  fileRoot = "/home/user/workspace/path/to/repo"
+//  importRoot = "github.com/foo/bar"
+//
+// then the root package at path/to/repo will be ascribed import path
+// "github.com/foo/bar", and its subpackage "baz" will be
+// "github.com/foo/bar/baz".
+//
+// A PackageTree is returned, which contains the ImportRoot and map of import path
+// to PackageOrErr - each path under the root that exists will have either a
+// Package, or an error describing why the directory is not a valid package.
+func listPackages(fileRoot, importRoot string) (PackageTree, error) {
+	// Set up a build.ctx for parsing
+	ctx := build.Default
+	ctx.GOROOT = ""
+	ctx.GOPATH = ""
+	ctx.UseAllFiles = true
+
+	ptree := PackageTree{
+		ImportRoot: importRoot,
+		Packages:   make(map[string]PackageOrErr),
+	}
+
+	// mkfilter returns two funcs that can be injected into a build.Context,
+	// letting us filter the results into an "in" and "out" set.
+	mkfilter := func(files map[string]struct{}) (in, out func(dir string) (fi []os.FileInfo, err error)) {
+		in = func(dir string) (fi []os.FileInfo, err error) {
+			all, err := ioutil.ReadDir(dir)
+			if err != nil {
+				return nil, err
+			}
+
+			for _, f := range all {
+				if _, exists := files[f.Name()]; exists {
+					fi = append(fi, f)
+				}
+			}
+			return fi, nil
+		}
+
+		out = func(dir string) (fi []os.FileInfo, err error) {
+			all, err := ioutil.ReadDir(dir)
+			if err != nil {
+				return nil, err
+			}
+
+			for _, f := range all {
+				if _, exists := files[f.Name()]; !exists {
+					fi = append(fi, f)
+				}
+			}
+			return fi, nil
+		}
+
+		return
+	}
+
+	// helper func to create a Package from a *build.Package
+	happy := func(importPath string, p *build.Package) Package {
+		// Happy path - simple parsing worked
+		pkg := Package{
+			ImportPath:  importPath,
+			CommentPath: p.ImportComment,
+			Name:        p.Name,
+			Imports:     p.Imports,
+			TestImports: dedupeStrings(p.TestImports, p.XTestImports),
+		}
+
+		return pkg
+	}
+
+	err := filepath.Walk(fileRoot, func(path string, fi os.FileInfo, err error) error {
+		if err != nil && err != filepath.SkipDir {
+			return err
+		}
+		if !fi.IsDir() {
+			return nil
+		}
+
+		// Skip dirs that are known to hold non-local/dependency code.
+		//
+		// We don't skip _*, or testdata dirs because, while it may be poor
+		// form, importing them is not a compilation error.
+		switch fi.Name() {
+		case "vendor", "Godeps":
+			return filepath.SkipDir
+		}
+		// We do skip dot-dirs, though, because it's such a ubiquitous standard
+		// that they not be visited by normal commands, and because things get
+		// really weird if we don't.
+		//
+		// TODO(sdboyer) does this entail that we should chuck dot-led import
+		// paths later on?
+		if strings.HasPrefix(fi.Name(), ".") {
+			return filepath.SkipDir
+		}
+
+		// Compute the import path. Run the result through ToSlash(), so that windows
+		// paths are normalized to Unix separators, as import paths are expected
+		// to be.
+		ip := filepath.ToSlash(filepath.Join(importRoot, strings.TrimPrefix(path, fileRoot)))
+
+		// Find all the imports, across all os/arch combos
+		p, err := ctx.ImportDir(path, analysisImportMode())
+		var pkg Package
+		if err == nil {
+			pkg = happy(ip, p)
+		} else {
+			switch terr := err.(type) {
+			case *build.NoGoError:
+				ptree.Packages[ip] = PackageOrErr{
+					Err: err,
+				}
+				return nil
+			case *build.MultiplePackageError:
+				// Set this up preemptively, so we can easily just return out if
+				// something goes wrong. Otherwise, it'll get transparently
+				// overwritten later.
+				ptree.Packages[ip] = PackageOrErr{
+					Err: err,
+				}
+
+				// For now, we're punting entirely on dealing with os/arch
+				// combinations. That will be a more significant refactor.
+				//
+				// However, there is one case we want to allow here - one or
+				// more files with "+build ignore" with package `main`. (Ignore
+				// is just a convention, but for now it's good enough to just
+				// check that.) This is a fairly common way to give examples,
+				// and to make a more sophisticated build system than a Makefile
+				// allows, so we want to support that case. So, transparently
+				// lump the deps together.
+				mains := make(map[string]struct{})
+				for k, pkgname := range terr.Packages {
+					if pkgname == "main" {
+						tags, err2 := readFileBuildTags(filepath.Join(path, terr.Files[k]))
+						if err2 != nil {
+							return nil
+						}
+
+						var hasignore bool
+						for _, t := range tags {
+							if t == "ignore" {
+								hasignore = true
+								break
+							}
+						}
+						if !hasignore {
+							// No ignore tag found - bail out
+							return nil
+						}
+						mains[terr.Files[k]] = struct{}{}
+					}
+				}
+				// Make filtering funcs that will let us look only at the main
+				// files, and exclude the main files; inf and outf, respectively
+				inf, outf := mkfilter(mains)
+
+				// outf first; if there's another err there, we bail out with a
+				// return
+				ctx.ReadDir = outf
+				po, err2 := ctx.ImportDir(path, analysisImportMode())
+				if err2 != nil {
+					return nil
+				}
+				ctx.ReadDir = inf
+				pi, err2 := ctx.ImportDir(path, analysisImportMode())
+				if err2 != nil {
+					return nil
+				}
+				ctx.ReadDir = nil
+
+				// Use the other files as baseline, they're the main stuff
+				pkg = happy(ip, po)
+				mpkg := happy(ip, pi)
+				pkg.Imports = dedupeStrings(pkg.Imports, mpkg.Imports)
+				pkg.TestImports = dedupeStrings(pkg.TestImports, mpkg.TestImports)
+			default:
+				return err
+			}
+		}
+
+		// This area has some...fuzzy rules, but check all the imports for
+		// local/relative/dot-ness, and record an error for the package if we
+		// see any.
+		var lim []string
+		for _, imp := range append(pkg.Imports, pkg.TestImports...) {
+			switch {
+			// Do allow the single-dot, at least for now
+			case imp == "..":
+				lim = append(lim, imp)
+				// ignore stdlib done this way, b/c that's what the go tooling does
+			case strings.HasPrefix(imp, "./"):
+				if stdlib[imp[2:]] {
+					lim = append(lim, imp)
+				}
+			case strings.HasPrefix(imp, "../"):
+				if stdlib[imp[3:]] {
+					lim = append(lim, imp)
+				}
+			}
+		}
+
+		if len(lim) > 0 {
+			ptree.Packages[ip] = PackageOrErr{
+				Err: &LocalImportsError{
+					Dir:          ip,
+					LocalImports: lim,
+				},
+			}
+		} else {
+			ptree.Packages[ip] = PackageOrErr{
+				P: pkg,
+			}
+		}
+
+		return nil
+	})
+
+	if err != nil {
+		return PackageTree{}, err
+	}
+
+	return ptree, nil
+}
+
+// LocalImportsError indicates that a package contains at least one relative
+// import that will prevent it from compiling.
+//
+// TODO(sdboyer) add a Files property once we're doing our own per-file parsing
+type LocalImportsError struct {
+	Dir          string
+	LocalImports []string
+}
+
+func (e *LocalImportsError) Error() string {
+	return fmt.Sprintf("import path %s had problematic local imports", e.Dir)
+}
+
+type wm struct {
+	err error
+	ex  map[string]bool
+	in  map[string]bool
+}
+
+// wmToReach takes an externalReach()-style workmap and transitively walks all
+// internal imports until they reach an external path or terminate, then
+// translates the results into a slice of external imports for each internal
+// pkg.
+//
+// The basedir string, with a trailing slash ensured, will be stripped from the
+// keys of the returned map.
+func wmToReach(workmap map[string]wm, basedir string) map[string][]string {
+	// Uses depth-first exploration to compute reachability into external
+	// packages, dropping any internal packages on "poisoned paths" - a path
+	// containing a package with an error, or with a dep on an internal package
+	// that's missing.
+
+	const (
+		white uint8 = iota
+		grey
+		black
+	)
+
+	colors := make(map[string]uint8)
+	allreachsets := make(map[string]map[string]struct{})
+
+	// poison is a helper func to eliminate specific reachsets from allreachsets
+	poison := func(path []string) {
+		for _, ppkg := range path {
+			delete(allreachsets, ppkg)
+		}
+	}
+
+	var dfe func(string, []string) bool
+
+	// dfe is the depth-first-explorer that computes safe, error-free external
+	// reach map.
+	//
+	// pkg is the import path of the pkg currently being visited; path is the
+	// stack of parent packages we've visited to get to pkg. The return value
+	// indicates whether the level completed successfully (true) or if it was
+	// poisoned (false).
+	//
+	// TODO(sdboyer) some deft improvements could probably be made by passing the list of
+	// parent reachsets, rather than a list of parent package string names.
+	// might be able to eliminate the use of allreachsets map-of-maps entirely.
+	dfe = func(pkg string, path []string) bool {
+		// white is the zero value of uint8, which is what we want if the pkg
+		// isn't in the colors map, so this works fine
+		switch colors[pkg] {
+		case white:
+			// first visit to this pkg; mark it as in-process (grey)
+			colors[pkg] = grey
+
+			// make sure it's present and w/out errs
+			w, exists := workmap[pkg]
+			if !exists || w.err != nil {
+				// Does not exist or has an err; poison self and all parents
+				poison(path)
+
+				// we know we're done here, so mark it black
+				colors[pkg] = black
+				return false
+			}
+			// pkg exists with no errs. mark it as in-process (grey), and start
+			// a reachmap for it
+			//
+			// TODO(sdboyer) use sync.Pool here? can be lots of explicit map alloc/dealloc
+			rs := make(map[string]struct{})
+
+			// Push self onto the path slice. Passing this as a value has the
+			// effect of auto-popping the slice, while also giving us safe
+			// memory reuse.
+			path = append(path, pkg)
+
+			// Dump this package's external pkgs into its own reachset. Separate
+			// loop from the parent dump to avoid nested map loop lookups.
+			for ex := range w.ex {
+				rs[ex] = struct{}{}
+			}
+			allreachsets[pkg] = rs
+
+			// Push this pkg's external imports into all parent reachsets. Not
+			// all parents will necessarily have a reachset; none, some, or all
+			// could have been poisoned by a different path than what we're on
+			// right now. (Or we could be at depth 0)
+			for _, ppkg := range path {
+				if prs, exists := allreachsets[ppkg]; exists {
+					for ex := range w.ex {
+						prs[ex] = struct{}{}
+					}
+				}
+			}
+
+			// Now, recurse until done, or a false bubbles up, indicating the
+			// path is poisoned.
+			var clean bool
+			for in := range w.in {
+				// It's possible, albeit weird, for a package to import itself.
+				// If we try to visit self, though, then it erroneously poisons
+				// the path, as it would be interpreted as grey. In reality,
+				// this becomes a no-op, so just skip it.
+				if in == pkg {
+					continue
+				}
+
+				clean = dfe(in, path)
+				if !clean {
+					// Path is poisoned. Our reachmap was already deleted by the
+					// path we're returning from; mark ourselves black, then
+					// bubble up the poison. This is OK to do early, before
+					// exploring all internal imports, because the outer loop
+					// visits all internal packages anyway.
+					//
+					// In fact, stopping early is preferable - white subpackages
+					// won't have to iterate pointlessly through a parent path
+					// with no reachset.
+					colors[pkg] = black
+					return false
+				}
+			}
+
+			// Fully done with this pkg; no transitive problems.
+			colors[pkg] = black
+			return true
+
+		case grey:
+			// grey means an import cycle; guaranteed badness right here.
+			//
+			// FIXME handle import cycles by dropping everything involved. i
+			// think we need to compute SCC, then drop *all* of them?
+			colors[pkg] = black
+			poison(append(path, pkg)) // poison self and parents
+
+		case black:
+			// black means we're done with the package. If it has an entry in
+			// allreachsets, it completed successfully. If not, it was poisoned,
+			// and we need to bubble the poison back up.
+			rs, exists := allreachsets[pkg]
+			if !exists {
+				// just poison parents; self was necessarily already poisoned
+				poison(path)
+				return false
+			}
+
+			// It's good; pull over of the external imports from its reachset
+			// into all non-poisoned parent reachsets
+			for _, ppkg := range path {
+				if prs, exists := allreachsets[ppkg]; exists {
+					for ex := range rs {
+						prs[ex] = struct{}{}
+					}
+				}
+			}
+			return true
+
+		default:
+			panic(fmt.Sprintf("invalid color marker %v for %s", colors[pkg], pkg))
+		}
+
+		// shouldn't ever hit this
+		return false
+	}
+
+	// Run the depth-first exploration.
+	//
+	// Don't bother computing graph sources, this straightforward loop works
+	// comparably well, and fits nicely with an escape hatch in the dfe.
+	var path []string
+	for pkg := range workmap {
+		dfe(pkg, path)
+	}
+
+	if len(allreachsets) == 0 {
+		return nil
+	}
+
+	// Flatten allreachsets into the final reachlist
+	rt := strings.TrimSuffix(basedir, string(os.PathSeparator)) + string(os.PathSeparator)
+	rm := make(map[string][]string)
+	for pkg, rs := range allreachsets {
+		rlen := len(rs)
+		if rlen == 0 {
+			rm[strings.TrimPrefix(pkg, rt)] = nil
+			continue
+		}
+
+		edeps := make([]string, rlen)
+		k := 0
+		for opkg := range rs {
+			edeps[k] = opkg
+			k++
+		}
+
+		sort.Strings(edeps)
+		rm[strings.TrimPrefix(pkg, rt)] = edeps
+	}
+
+	return rm
+}
+
+func readBuildTags(p string) ([]string, error) {
+	_, err := os.Stat(p)
+	if err != nil {
+		return []string{}, err
+	}
+
+	d, err := os.Open(p)
+	if err != nil {
+		return []string{}, err
+	}
+
+	objects, err := d.Readdir(-1)
+	if err != nil {
+		return []string{}, err
+	}
+
+	var tags []string
+	for _, obj := range objects {
+
+		// only process Go files
+		if strings.HasSuffix(obj.Name(), ".go") {
+			fp := filepath.Join(p, obj.Name())
+
+			co, err := readGoContents(fp)
+			if err != nil {
+				return []string{}, err
+			}
+
+			// Only look at places where we had a code comment.
+			if len(co) > 0 {
+				t := findTags(co)
+				for _, tg := range t {
+					found := false
+					for _, tt := range tags {
+						if tt == tg {
+							found = true
+						}
+					}
+					if !found {
+						tags = append(tags, tg)
+					}
+				}
+			}
+		}
+	}
+
+	return tags, nil
+}
+
+func readFileBuildTags(fp string) ([]string, error) {
+	co, err := readGoContents(fp)
+	if err != nil {
+		return []string{}, err
+	}
+
+	var tags []string
+	// Only look at places where we had a code comment.
+	if len(co) > 0 {
+		t := findTags(co)
+		for _, tg := range t {
+			found := false
+			for _, tt := range tags {
+				if tt == tg {
+					found = true
+				}
+			}
+			if !found {
+				tags = append(tags, tg)
+			}
+		}
+	}
+
+	return tags, nil
+}
+
+// Read contents of a Go file up to the package declaration. This can be used
+// to find the the build tags.
+func readGoContents(fp string) ([]byte, error) {
+	f, err := os.Open(fp)
+	defer f.Close()
+	if err != nil {
+		return []byte{}, err
+	}
+
+	var s scanner.Scanner
+	s.Init(f)
+	var tok rune
+	var pos scanner.Position
+	for tok != scanner.EOF {
+		tok = s.Scan()
+
+		// Getting the token text will skip comments by default.
+		tt := s.TokenText()
+		// build tags will not be after the package declaration.
+		if tt == "package" {
+			pos = s.Position
+			break
+		}
+	}
+
+	var buf bytes.Buffer
+	f.Seek(0, 0)
+	_, err = io.CopyN(&buf, f, int64(pos.Offset))
+	if err != nil {
+		return []byte{}, err
+	}
+
+	return buf.Bytes(), nil
+}
+
+// From a byte slice of a Go file find the tags.
+func findTags(co []byte) []string {
+	p := co
+	var tgs []string
+	for len(p) > 0 {
+		line := p
+		if i := bytes.IndexByte(line, '\n'); i >= 0 {
+			line, p = line[:i], p[i+1:]
+		} else {
+			p = p[len(p):]
+		}
+		line = bytes.TrimSpace(line)
+		// Only look at comment lines that are well formed in the Go style
+		if bytes.HasPrefix(line, []byte("//")) {
+			line = bytes.TrimSpace(line[len([]byte("//")):])
+			if len(line) > 0 && line[0] == '+' {
+				f := strings.Fields(string(line))
+
+				// We've found a +build tag line.
+				if f[0] == "+build" {
+					for _, tg := range f[1:] {
+						tgs = append(tgs, tg)
+					}
+				}
+			}
+		}
+	}
+
+	return tgs
+}
+
+// Get an OS value that's not the one passed in.
+func getOsValue(n string) string {
+	for _, o := range osList {
+		if o != n {
+			return o
+		}
+	}
+
+	return n
+}
+
+func isSupportedOs(n string) bool {
+	for _, o := range osList {
+		if o == n {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Get an Arch value that's not the one passed in.
+func getArchValue(n string) string {
+	for _, o := range archList {
+		if o != n {
+			return o
+		}
+	}
+
+	return n
+}
+
+func isSupportedArch(n string) bool {
+	for _, o := range archList {
+		if o == n {
+			return true
+		}
+	}
+
+	return false
+}
+
+func ensureTrailingSlash(s string) string {
+	return strings.TrimSuffix(s, string(os.PathSeparator)) + string(os.PathSeparator)
+}
+
+// helper func to merge, dedupe, and sort strings
+func dedupeStrings(s1, s2 []string) (r []string) {
+	dedupe := make(map[string]bool)
+
+	if len(s1) > 0 && len(s2) > 0 {
+		for _, i := range s1 {
+			dedupe[i] = true
+		}
+		for _, i := range s2 {
+			dedupe[i] = true
+		}
+
+		for i := range dedupe {
+			r = append(r, i)
+		}
+		// And then re-sort them
+		sort.Strings(r)
+	} else if len(s1) > 0 {
+		r = s1
+	} else if len(s2) > 0 {
+		r = s2
+	}
+
+	return
+}
+
+// A PackageTree represents the results of recursively parsing a tree of
+// packages, starting at the ImportRoot. The results of parsing the files in the
+// directory identified by each import path - a Package or an error - are stored
+// in the Packages map, keyed by that import path.
+type PackageTree struct {
+	ImportRoot string
+	Packages   map[string]PackageOrErr
+}
+
+// PackageOrErr stores the results of attempting to parse a single directory for
+// Go source code.
+type PackageOrErr struct {
+	P   Package
+	Err error
+}
+
+// ExternalReach looks through a PackageTree and computes the list of external
+// packages (not logical children of PackageTree.ImportRoot) that are
+// transitively imported by the internal packages in the tree.
+//
+// main indicates whether (true) or not (false) to include main packages in the
+// analysis. main packages are generally excluded when analyzing anything other
+// than the root project, as they inherently can't be imported.
+//
+// tests indicates whether (true) or not (false) to include imports from test
+// files in packages when computing the reach map.
+//
+// ignore is a map of import paths that, if encountered, should be excluded from
+// analysis. This exclusion applies to both internal and external packages. If
+// an external import path is ignored, it is simply omitted from the results.
+//
+// If an internal path is ignored, then it is excluded from all transitive
+// dependency chains and does not appear as a key in the final map. That is, if
+// you ignore A/foo, then the external package list for all internal packages
+// that import A/foo will not include external packages that are only reachable
+// through A/foo.
+//
+// Visually, this means that, given a PackageTree with root A and packages at A,
+// A/foo, and A/bar, and the following import chain:
+//
+//  A -> A/foo -> A/bar -> B/baz
+//
+// If you ignore A/foo, then the returned map would be:
+//
+//  map[string][]string{
+// 	"A": []string{},
+// 	"A/bar": []string{"B/baz"},
+//  }
+//
+// It is safe to pass a nil map if there are no packages to ignore.
+func (t PackageTree) ExternalReach(main, tests bool, ignore map[string]bool) map[string][]string {
+	if ignore == nil {
+		ignore = make(map[string]bool)
+	}
+
+	// world's simplest adjacency list
+	workmap := make(map[string]wm)
+
+	var imps []string
+	for ip, perr := range t.Packages {
+		if perr.Err != nil {
+			workmap[ip] = wm{
+				err: perr.Err,
+			}
+			continue
+		}
+		p := perr.P
+
+		// Skip main packages, unless param says otherwise
+		if p.Name == "main" && !main {
+			continue
+		}
+		// Skip ignored packages
+		if ignore[ip] {
+			continue
+		}
+
+		imps = imps[:0]
+		imps = p.Imports
+		if tests {
+			imps = dedupeStrings(imps, p.TestImports)
+		}
+
+		w := wm{
+			ex: make(map[string]bool),
+			in: make(map[string]bool),
+		}
+
+		for _, imp := range imps {
+			// Skip ignored imports
+			if ignore[imp] {
+				continue
+			}
+
+			if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
+				w.ex[imp] = true
+			} else {
+				if w2, seen := workmap[imp]; seen {
+					for i := range w2.ex {
+						w.ex[i] = true
+					}
+					for i := range w2.in {
+						w.in[i] = true
+					}
+				} else {
+					w.in[imp] = true
+				}
+			}
+		}
+
+		workmap[ip] = w
+	}
+
+	//return wmToReach(workmap, t.ImportRoot)
+	return wmToReach(workmap, "") // TODO(sdboyer) this passes tests, but doesn't seem right
+}
+
+// ListExternalImports computes a sorted, deduplicated list of all the external
+// packages that are reachable through imports from all valid packages in the
+// PackageTree.
+//
+// main and tests determine whether main packages and test imports should be
+// included in the calculation. "External" is defined as anything not prefixed,
+// after path cleaning, by the PackageTree.ImportRoot. This includes stdlib.
+//
+// If an internal path is ignored, all of the external packages that it uniquely
+// imports are omitted. Note, however, that no internal transitivity checks are
+// made here - every non-ignored package in the tree is considered independently
+// (with one set of exceptions, noted below). That means, given a PackageTree
+// with root A and packages at A, A/foo, and A/bar, and the following import
+// chain:
+//
+//  A -> A/foo -> A/bar -> B/baz
+//
+// If you ignore A or A/foo, A/bar will still be visited, and B/baz will be
+// returned, because this method visits ALL packages in the tree, not only those reachable
+// from the root (or any other) packages. If your use case requires interrogating
+// external imports with respect to only specific package entry points, you need
+// ExternalReach() instead.
+//
+// It is safe to pass a nil map if there are no packages to ignore.
+//
+// If an internal package has an error (that is, PackageOrErr is Err), it is excluded from
+// consideration. Internal packages that transitively import the error package
+// are also excluded. So, if:
+//
+//    -> B/foo
+//   /
+//  A
+//   \
+//    -> A/bar -> B/baz
+//
+// And A/bar has some error in it, then both A and A/bar will be eliminated from
+// consideration; neither B/foo nor B/baz will be in the results. If A/bar, with
+// its errors, is ignored, however, then A will remain, and B/foo will be in the
+// results.
+//
+// Finally, note that if a directory is named "testdata", or has a leading dot
+// or underscore, it will not be directly analyzed as a source. This is in
+// keeping with Go tooling conventions that such directories should be ignored.
+// So, if:
+//
+//  A -> B/foo
+//  A/.bar -> B/baz
+//  A/_qux -> B/baz
+//  A/testdata -> B/baz
+//
+// Then B/foo will be returned, but B/baz will not, because all three of the
+// packages that import it are in directories with disallowed names.
+//
+// HOWEVER, in keeping with the Go compiler, if one of those packages in a
+// disallowed directory is imported by a package in an allowed directory, then
+// it *will* be used. That is, while tools like go list will ignore a directory
+// named .foo, you can still import from .foo. Thus, it must be included. So,
+// if:
+//
+//    -> B/foo
+//   /
+//  A
+//   \
+//    -> A/.bar -> B/baz
+//
+// A is legal, and it imports A/.bar, so the results will include B/baz.
+func (t PackageTree) ListExternalImports(main, tests bool, ignore map[string]bool) []string {
+	// First, we need a reachmap
+	rm := t.ExternalReach(main, tests, ignore)
+
+	exm := make(map[string]struct{})
+	for pkg, reach := range rm {
+		// Eliminate import paths with any elements having leading dots, leading
+		// underscores, or testdata. If these are internally reachable (which is
+		// a no-no, but possible), any external imports will have already been
+		// pulled up through ExternalReach. The key here is that we don't want
+		// to treat such packages as themselves being sources.
+		//
+		// TODO(sdboyer) strings.Split will always heap alloc, which isn't great to do
+		// in a loop like this. We could also just parse it ourselves...
+		var skip bool
+		for _, elem := range strings.Split(pkg, "/") {
+			if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
+				skip = true
+				break
+			}
+		}
+
+		if !skip {
+			for _, ex := range reach {
+				exm[ex] = struct{}{}
+			}
+		}
+	}
+
+	if len(exm) == 0 {
+		return nil
+	}
+
+	ex := make([]string, len(exm))
+	k := 0
+	for p := range exm {
+		ex[k] = p
+		k++
+	}
+
+	sort.Strings(ex)
+	return ex
+}
+
+// checkPrefixSlash checks to see if the prefix is a prefix of the string as-is,
+// and that it is either equal OR the prefix + / is still a prefix.
+func checkPrefixSlash(s, prefix string) bool {
+	if !strings.HasPrefix(s, prefix) {
+		return false
+	}
+	return s == prefix || strings.HasPrefix(s, ensureTrailingSlash(prefix))
+}
diff --git a/vendor/github.com/sdboyer/gps/analysis_test.go b/vendor/github.com/sdboyer/gps/analysis_test.go
new file mode 100644
index 0000000..210d036
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/analysis_test.go
@@ -0,0 +1,1262 @@
+package gps
+
+import (
+	"fmt"
+	"go/build"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+// PackageTree.ExternalReach() uses an easily separable algorithm, wmToReach(),
+// to turn a discovered set of packages and their imports into a proper external
+// reach map.
+//
+// That algorithm is purely symbolic (no filesystem interaction), and thus is
+// easy to test. This is that test.
+func TestWorkmapToReach(t *testing.T) {
+	empty := func() map[string]bool {
+		return make(map[string]bool)
+	}
+
+	table := map[string]struct {
+		workmap map[string]wm
+		basedir string
+		out     map[string][]string
+	}{
+		"single": {
+			workmap: map[string]wm{
+				"foo": {
+					ex: empty(),
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"foo": nil,
+			},
+		},
+		"no external": {
+			workmap: map[string]wm{
+				"foo": {
+					ex: empty(),
+					in: empty(),
+				},
+				"foo/bar": {
+					ex: empty(),
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"foo":     nil,
+				"foo/bar": nil,
+			},
+		},
+		"no external with subpkg": {
+			workmap: map[string]wm{
+				"foo": {
+					ex: empty(),
+					in: map[string]bool{
+						"foo/bar": true,
+					},
+				},
+				"foo/bar": {
+					ex: empty(),
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"foo":     nil,
+				"foo/bar": nil,
+			},
+		},
+		"simple base transitive": {
+			workmap: map[string]wm{
+				"foo": {
+					ex: empty(),
+					in: map[string]bool{
+						"foo/bar": true,
+					},
+				},
+				"foo/bar": {
+					ex: map[string]bool{
+						"baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"foo": {
+					"baz",
+				},
+				"foo/bar": {
+					"baz",
+				},
+			},
+		},
+		"missing package is poison": {
+			workmap: map[string]wm{
+				"A": {
+					ex: map[string]bool{
+						"B/foo": true,
+					},
+					in: map[string]bool{
+						"A/foo": true, // missing
+						"A/bar": true,
+					},
+				},
+				"A/bar": {
+					ex: map[string]bool{
+						"B/baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"A/bar": {
+					"B/baz",
+				},
+			},
+		},
+		"transitive missing package is poison": {
+			workmap: map[string]wm{
+				"A": {
+					ex: map[string]bool{
+						"B/foo": true,
+					},
+					in: map[string]bool{
+						"A/foo":  true, // transitively missing
+						"A/quux": true,
+					},
+				},
+				"A/foo": {
+					ex: map[string]bool{
+						"C/flugle": true,
+					},
+					in: map[string]bool{
+						"A/bar": true, // missing
+					},
+				},
+				"A/quux": {
+					ex: map[string]bool{
+						"B/baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"A/quux": {
+					"B/baz",
+				},
+			},
+		},
+		"err'd package is poison": {
+			workmap: map[string]wm{
+				"A": {
+					ex: map[string]bool{
+						"B/foo": true,
+					},
+					in: map[string]bool{
+						"A/foo": true, // err'd
+						"A/bar": true,
+					},
+				},
+				"A/foo": {
+					err: fmt.Errorf("err pkg"),
+				},
+				"A/bar": {
+					ex: map[string]bool{
+						"B/baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"A/bar": {
+					"B/baz",
+				},
+			},
+		},
+		"transitive err'd package is poison": {
+			workmap: map[string]wm{
+				"A": {
+					ex: map[string]bool{
+						"B/foo": true,
+					},
+					in: map[string]bool{
+						"A/foo":  true, // transitively err'd
+						"A/quux": true,
+					},
+				},
+				"A/foo": {
+					ex: map[string]bool{
+						"C/flugle": true,
+					},
+					in: map[string]bool{
+						"A/bar": true, // err'd
+					},
+				},
+				"A/bar": {
+					err: fmt.Errorf("err pkg"),
+				},
+				"A/quux": {
+					ex: map[string]bool{
+						"B/baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"A/quux": {
+					"B/baz",
+				},
+			},
+		},
+	}
+
+	for name, fix := range table {
+		out := wmToReach(fix.workmap, fix.basedir)
+		if !reflect.DeepEqual(out, fix.out) {
+			t.Errorf("wmToReach(%q): Did not get expected reach map:\n\t(GOT): %s\n\t(WNT): %s", name, out, fix.out)
+		}
+	}
+}
+
+func TestListPackages(t *testing.T) {
+	srcdir := filepath.Join(getwd(t), "_testdata", "src")
+	j := func(s string) string {
+		return filepath.Join(srcdir, s)
+	}
+
+	table := map[string]struct {
+		fileRoot   string
+		importRoot string
+		out        PackageTree
+		err        error
+	}{
+		"empty": {
+			fileRoot:   j("empty"),
+			importRoot: "empty",
+			out: PackageTree{
+				ImportRoot: "empty",
+				Packages: map[string]PackageOrErr{
+					"empty": {
+						Err: &build.NoGoError{
+							Dir: j("empty"),
+						},
+					},
+				},
+			},
+			err: nil,
+		},
+		"code only": {
+			fileRoot:   j("simple"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"impose import path": {
+			fileRoot:   j("simple"),
+			importRoot: "arbitrary",
+			out: PackageTree{
+				ImportRoot: "arbitrary",
+				Packages: map[string]PackageOrErr{
+					"arbitrary": {
+						P: Package{
+							ImportPath:  "arbitrary",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"test only": {
+			fileRoot:   j("t"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports:     []string{},
+							TestImports: []string{
+								"math/rand",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"xtest only": {
+			fileRoot:   j("xt"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports:     []string{},
+							TestImports: []string{
+								"sort",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code and test": {
+			fileRoot:   j("simplet"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+							TestImports: []string{
+								"math/rand",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code and xtest": {
+			fileRoot:   j("simplext"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+							TestImports: []string{
+								"sort",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code, test, xtest": {
+			fileRoot:   j("simpleallt"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+							TestImports: []string{
+								"math/rand",
+								"sort",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"one pkg multifile": {
+			fileRoot:   j("m1p"),
+			importRoot: "m1p",
+			out: PackageTree{
+				ImportRoot: "m1p",
+				Packages: map[string]PackageOrErr{
+					"m1p": {
+						P: Package{
+							ImportPath:  "m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"one nested below": {
+			fileRoot:   j("nest"),
+			importRoot: "nest",
+			out: PackageTree{
+				ImportRoot: "nest",
+				Packages: map[string]PackageOrErr{
+					"nest": {
+						P: Package{
+							ImportPath:  "nest",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+					"nest/m1p": {
+						P: Package{
+							ImportPath:  "nest/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"two nested under empty root": {
+			fileRoot:   j("ren"),
+			importRoot: "ren",
+			out: PackageTree{
+				ImportRoot: "ren",
+				Packages: map[string]PackageOrErr{
+					"ren": {
+						Err: &build.NoGoError{
+							Dir: j("ren"),
+						},
+					},
+					"ren/m1p": {
+						P: Package{
+							ImportPath:  "ren/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+					"ren/simple": {
+						P: Package{
+							ImportPath:  "ren/simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"internal name mismatch": {
+			fileRoot:   j("doublenest"),
+			importRoot: "doublenest",
+			out: PackageTree{
+				ImportRoot: "doublenest",
+				Packages: map[string]PackageOrErr{
+					"doublenest": {
+						P: Package{
+							ImportPath:  "doublenest",
+							CommentPath: "",
+							Name:        "base",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"go/parser",
+							},
+						},
+					},
+					"doublenest/namemismatch": {
+						P: Package{
+							ImportPath:  "doublenest/namemismatch",
+							CommentPath: "",
+							Name:        "nm",
+							Imports: []string{
+								"github.com/Masterminds/semver",
+								"os",
+							},
+						},
+					},
+					"doublenest/namemismatch/m1p": {
+						P: Package{
+							ImportPath:  "doublenest/namemismatch/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"file and importroot mismatch": {
+			fileRoot:   j("doublenest"),
+			importRoot: "other",
+			out: PackageTree{
+				ImportRoot: "other",
+				Packages: map[string]PackageOrErr{
+					"other": {
+						P: Package{
+							ImportPath:  "other",
+							CommentPath: "",
+							Name:        "base",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"go/parser",
+							},
+						},
+					},
+					"other/namemismatch": {
+						P: Package{
+							ImportPath:  "other/namemismatch",
+							CommentPath: "",
+							Name:        "nm",
+							Imports: []string{
+								"github.com/Masterminds/semver",
+								"os",
+							},
+						},
+					},
+					"other/namemismatch/m1p": {
+						P: Package{
+							ImportPath:  "other/namemismatch/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code and ignored main": {
+			fileRoot:   j("igmain"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+								"unicode",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code and ignored main with comment leader": {
+			fileRoot:   j("igmainlong"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+								"unicode",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code, tests, and ignored main": {
+			fileRoot:   j("igmaint"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+								"unicode",
+							},
+							TestImports: []string{
+								"math/rand",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"two pkgs": {
+			fileRoot:   j("twopkgs"),
+			importRoot: "twopkgs",
+			out: PackageTree{
+				ImportRoot: "twopkgs",
+				Packages: map[string]PackageOrErr{
+					"twopkgs": {
+						Err: &build.MultiplePackageError{
+							Dir:      j("twopkgs"),
+							Packages: []string{"simple", "m1p"},
+							Files:    []string{"a.go", "b.go"},
+						},
+					},
+				},
+			},
+		},
+		// imports a missing pkg
+		"missing import": {
+			fileRoot:   j("missing"),
+			importRoot: "missing",
+			out: PackageTree{
+				ImportRoot: "missing",
+				Packages: map[string]PackageOrErr{
+					"missing": {
+						P: Package{
+							ImportPath:  "missing",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"missing/missing",
+								"sort",
+							},
+						},
+					},
+					"missing/m1p": {
+						P: Package{
+							ImportPath:  "missing/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		// has disallowed dir names
+		"disallowed dirs": {
+			fileRoot:   j("disallow"),
+			importRoot: "disallow",
+			out: PackageTree{
+				ImportRoot: "disallow",
+				Packages: map[string]PackageOrErr{
+					"disallow": {
+						P: Package{
+							ImportPath:  "disallow",
+							CommentPath: "",
+							Name:        "disallow",
+							Imports: []string{
+								"disallow/testdata",
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+					// disallow/.m1p is ignored by listPackages...for now. Kept
+					// here commented because this might change again...
+					//"disallow/.m1p": {
+					//P: Package{
+					//ImportPath:  "disallow/.m1p",
+					//CommentPath: "",
+					//Name:        "m1p",
+					//Imports: []string{
+					//"github.com/sdboyer/gps",
+					//"os",
+					//"sort",
+					//},
+					//},
+					//},
+					"disallow/testdata": {
+						P: Package{
+							ImportPath:  "disallow/testdata",
+							CommentPath: "",
+							Name:        "testdata",
+							Imports: []string{
+								"hash",
+							},
+						},
+					},
+				},
+			},
+		},
+		// This case mostly exists for the PackageTree methods, but it does
+		// cover a bit of range
+		"varied": {
+			fileRoot:   j("varied"),
+			importRoot: "varied",
+			out: PackageTree{
+				ImportRoot: "varied",
+				Packages: map[string]PackageOrErr{
+					"varied": {
+						P: Package{
+							ImportPath:  "varied",
+							CommentPath: "",
+							Name:        "main",
+							Imports: []string{
+								"net/http",
+								"varied/namemismatch",
+								"varied/otherpath",
+								"varied/simple",
+							},
+						},
+					},
+					"varied/otherpath": {
+						P: Package{
+							ImportPath:  "varied/otherpath",
+							CommentPath: "",
+							Name:        "otherpath",
+							Imports:     []string{},
+							TestImports: []string{
+								"varied/m1p",
+							},
+						},
+					},
+					"varied/simple": {
+						P: Package{
+							ImportPath:  "varied/simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"go/parser",
+								"varied/simple/another",
+							},
+						},
+					},
+					"varied/simple/another": {
+						P: Package{
+							ImportPath:  "varied/simple/another",
+							CommentPath: "",
+							Name:        "another",
+							Imports: []string{
+								"hash",
+								"varied/m1p",
+							},
+							TestImports: []string{
+								"encoding/binary",
+							},
+						},
+					},
+					"varied/namemismatch": {
+						P: Package{
+							ImportPath:  "varied/namemismatch",
+							CommentPath: "",
+							Name:        "nm",
+							Imports: []string{
+								"github.com/Masterminds/semver",
+								"os",
+							},
+						},
+					},
+					"varied/m1p": {
+						P: Package{
+							ImportPath:  "varied/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	for name, fix := range table {
+		if _, err := os.Stat(fix.fileRoot); err != nil {
+			t.Errorf("listPackages(%q): error on fileRoot %s: %s", name, fix.fileRoot, err)
+			continue
+		}
+
+		out, err := listPackages(fix.fileRoot, fix.importRoot)
+
+		if err != nil && fix.err == nil {
+			t.Errorf("listPackages(%q): Received error but none expected: %s", name, err)
+		} else if fix.err != nil && err == nil {
+			t.Errorf("listPackages(%q): Error expected but none received", name)
+		} else if fix.err != nil && err != nil {
+			if !reflect.DeepEqual(fix.err, err) {
+				t.Errorf("listPackages(%q): Did not receive expected error:\n\t(GOT): %s\n\t(WNT): %s", name, err, fix.err)
+			}
+		}
+
+		if fix.out.ImportRoot != "" && fix.out.Packages != nil {
+			if !reflect.DeepEqual(out, fix.out) {
+				if fix.out.ImportRoot != out.ImportRoot {
+					t.Errorf("listPackages(%q): Expected ImportRoot %s, got %s", name, fix.out.ImportRoot, out.ImportRoot)
+				}
+
+				// overwrite the out one to see if we still have a real problem
+				out.ImportRoot = fix.out.ImportRoot
+
+				if !reflect.DeepEqual(out, fix.out) {
+					if len(fix.out.Packages) < 2 {
+						t.Errorf("listPackages(%q): Did not get expected PackageOrErrs:\n\t(GOT): %s\n\t(WNT): %s", name, out, fix.out)
+					} else {
+						seen := make(map[string]bool)
+						for path, perr := range fix.out.Packages {
+							seen[path] = true
+							if operr, exists := out.Packages[path]; !exists {
+								t.Errorf("listPackages(%q): Expected PackageOrErr for path %s was missing from output:\n\t%s", name, path, perr)
+							} else {
+								if !reflect.DeepEqual(perr, operr) {
+									t.Errorf("listPackages(%q): PkgOrErr for path %s was not as expected:\n\t(GOT): %s\n\t(WNT): %s", name, path, operr, perr)
+								}
+							}
+						}
+
+						for path, operr := range out.Packages {
+							if seen[path] {
+								continue
+							}
+
+							t.Errorf("listPackages(%q): Got PackageOrErr for path %s, but none was expected:\n\t%s", name, path, operr)
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+func TestListExternalImports(t *testing.T) {
+	// There's enough in the 'varied' test case to test most of what matters
+	vptree, err := listPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
+	if err != nil {
+		t.Fatalf("listPackages failed on varied test case: %s", err)
+	}
+
+	var expect []string
+	var name string
+	var ignore map[string]bool
+	var main, tests bool
+
+	validate := func() {
+		result := vptree.ListExternalImports(main, tests, ignore)
+		if !reflect.DeepEqual(expect, result) {
+			t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect)
+		}
+	}
+
+	all := []string{
+		"encoding/binary",
+		"github.com/Masterminds/semver",
+		"github.com/sdboyer/gps",
+		"go/parser",
+		"hash",
+		"net/http",
+		"os",
+		"sort",
+	}
+
+	// helper to rewrite expect, except for a couple packages
+	//
+	// this makes it easier to see what we're taking out on each test
+	except := func(not ...string) {
+		expect = make([]string, len(all)-len(not))
+
+		drop := make(map[string]bool)
+		for _, npath := range not {
+			drop[npath] = true
+		}
+
+		k := 0
+		for _, path := range all {
+			if !drop[path] {
+				expect[k] = path
+				k++
+			}
+		}
+	}
+
+	// everything on
+	name = "simple"
+	except()
+	main, tests = true, true
+	validate()
+
+	// Now without tests, which should just cut one
+	name = "no tests"
+	tests = false
+	except("encoding/binary")
+	validate()
+
+	// Now skip main, which still just cuts out one
+	name = "no main"
+	main, tests = false, true
+	except("net/http")
+	validate()
+
+	// No test and no main, which should be additive
+	name = "no test, no main"
+	main, tests = false, false
+	except("net/http", "encoding/binary")
+	validate()
+
+	// now, the ignore tests. turn main and tests back on
+	main, tests = true, true
+
+	// start with non-matching
+	name = "non-matching ignore"
+	ignore = map[string]bool{
+		"nomatch": true,
+	}
+	except()
+	validate()
+
+	// should have the same effect as ignoring main
+	name = "ignore the root"
+	ignore = map[string]bool{
+		"varied": true,
+	}
+	except("net/http")
+	validate()
+
+	// now drop a more interesting one
+	name = "ignore simple"
+	ignore = map[string]bool{
+		"varied/simple": true,
+	}
+	// we get github.com/sdboyer/gps from m1p, too, so it should still be there
+	except("go/parser")
+	validate()
+
+	// now drop two
+	name = "ignore simple and namemismatch"
+	ignore = map[string]bool{
+		"varied/simple":       true,
+		"varied/namemismatch": true,
+	}
+	except("go/parser", "github.com/Masterminds/semver")
+	validate()
+
+	// make sure tests and main play nice with ignore
+	name = "ignore simple and namemismatch, and no tests"
+	tests = false
+	except("go/parser", "github.com/Masterminds/semver", "encoding/binary")
+	validate()
+	name = "ignore simple and namemismatch, and no main"
+	main, tests = false, true
+	except("go/parser", "github.com/Masterminds/semver", "net/http")
+	validate()
+	name = "ignore simple and namemismatch, and no main or tests"
+	main, tests = false, false
+	except("go/parser", "github.com/Masterminds/semver", "net/http", "encoding/binary")
+	validate()
+
+	main, tests = true, true
+
+	// ignore two that should knock out gps
+	name = "ignore both importers"
+	ignore = map[string]bool{
+		"varied/simple": true,
+		"varied/m1p":    true,
+	}
+	except("sort", "github.com/sdboyer/gps", "go/parser")
+	validate()
+
+	// finally, directly ignore some external packages
+	name = "ignore external"
+	ignore = map[string]bool{
+		"github.com/sdboyer/gps": true,
+		"go/parser":              true,
+		"sort":                   true,
+	}
+	except("sort", "github.com/sdboyer/gps", "go/parser")
+	validate()
+
+	// The only thing varied *doesn't* cover is disallowed path patterns
+	ptree, err := listPackages(filepath.Join(getwd(t), "_testdata", "src", "disallow"), "disallow")
+	if err != nil {
+		t.Fatalf("listPackages failed on disallow test case: %s", err)
+	}
+
+	result := ptree.ListExternalImports(false, false, nil)
+	expect = []string{"github.com/sdboyer/gps", "hash", "sort"}
+	if !reflect.DeepEqual(expect, result) {
+		t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect)
+	}
+}
+
+func TestExternalReach(t *testing.T) {
+	// There's enough in the 'varied' test case to test most of what matters
+	vptree, err := listPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
+	if err != nil {
+		t.Fatalf("listPackages failed on varied test case: %s", err)
+	}
+
+	// Set up vars for validate closure
+	var expect map[string][]string
+	var name string
+	var main, tests bool
+	var ignore map[string]bool
+
+	validate := func() {
+		result := vptree.ExternalReach(main, tests, ignore)
+		if !reflect.DeepEqual(expect, result) {
+			seen := make(map[string]bool)
+			for ip, epkgs := range expect {
+				seen[ip] = true
+				if pkgs, exists := result[ip]; !exists {
+					t.Errorf("ver(%q): expected import path %s was not present in result", name, ip)
+				} else {
+					if !reflect.DeepEqual(pkgs, epkgs) {
+						t.Errorf("ver(%q): did not get expected package set for import path %s:\n\t(GOT): %s\n\t(WNT): %s", name, ip, pkgs, epkgs)
+					}
+				}
+			}
+
+			for ip, pkgs := range result {
+				if seen[ip] {
+					continue
+				}
+				t.Errorf("ver(%q): Got packages for import path %s, but none were expected:\n\t%s", name, ip, pkgs)
+			}
+		}
+	}
+
+	all := map[string][]string{
+		"varied":                {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/gps", "go/parser", "hash", "net/http", "os", "sort"},
+		"varied/m1p":            {"github.com/sdboyer/gps", "os", "sort"},
+		"varied/namemismatch":   {"github.com/Masterminds/semver", "os"},
+		"varied/otherpath":      {"github.com/sdboyer/gps", "os", "sort"},
+		"varied/simple":         {"encoding/binary", "github.com/sdboyer/gps", "go/parser", "hash", "os", "sort"},
+		"varied/simple/another": {"encoding/binary", "github.com/sdboyer/gps", "hash", "os", "sort"},
+	}
+	// build a map to validate the exception inputs. do this because shit is
+	// hard enough to keep track of that it's preferable not to have silent
+	// success if a typo creeps in and we're trying to except an import that
+	// isn't in a pkg in the first place
+	valid := make(map[string]map[string]bool)
+	for ip, expkgs := range all {
+		m := make(map[string]bool)
+		for _, pkg := range expkgs {
+			m[pkg] = true
+		}
+		valid[ip] = m
+	}
+
+	// helper to compose expect, excepting specific packages
+	//
+	// this makes it easier to see what we're taking out on each test
+	except := func(pkgig ...string) {
+		// reinit expect with everything from all
+		expect = make(map[string][]string)
+		for ip, expkgs := range all {
+			sl := make([]string, len(expkgs))
+			copy(sl, expkgs)
+			expect[ip] = sl
+		}
+
+		// now build the dropmap
+		drop := make(map[string]map[string]bool)
+		for _, igstr := range pkgig {
+			// split on space; first elem is import path to pkg, the rest are
+			// the imports to drop.
+			not := strings.Split(igstr, " ")
+			var ip string
+			ip, not = not[0], not[1:]
+			if _, exists := valid[ip]; !exists {
+				t.Fatalf("%s is not a package name we're working with, doofus", ip)
+			}
+
+			// if only a single elem was passed, though, drop the whole thing
+			if len(not) == 0 {
+				delete(expect, ip)
+				continue
+			}
+
+			m := make(map[string]bool)
+			for _, imp := range not {
+				if !valid[ip][imp] {
+					t.Fatalf("%s is not a reachable import of %s, even in the all case", imp, ip)
+				}
+				m[imp] = true
+			}
+
+			drop[ip] = m
+		}
+
+		for ip, pkgs := range expect {
+			var npkgs []string
+			for _, imp := range pkgs {
+				if !drop[ip][imp] {
+					npkgs = append(npkgs, imp)
+				}
+			}
+
+			expect[ip] = npkgs
+		}
+	}
+
+	// first, validate all
+	name = "all"
+	main, tests = true, true
+	except()
+	validate()
+
+	// turn off main pkgs, which necessarily doesn't affect anything else
+	name = "no main"
+	main = false
+	except("varied")
+	validate()
+
+	// ignoring the "varied" pkg has same effect as disabling main pkgs
+	name = "ignore root"
+	ignore = map[string]bool{
+		"varied": true,
+	}
+	main = true
+	validate()
+
+	// when we drop tests, varied/otherpath loses its link to varied/m1p and
+	// varied/simple/another loses its test import, which has a fairly big
+	// cascade
+	name = "no tests"
+	tests = false
+	ignore = nil
+	except(
+		"varied encoding/binary",
+		"varied/simple encoding/binary",
+		"varied/simple/another encoding/binary",
+		"varied/otherpath github.com/sdboyer/gps os sort",
+	)
+
+	// almost the same as previous, but varied just goes away completely
+	name = "no main or tests"
+	main = false
+	except(
+		"varied",
+		"varied/simple encoding/binary",
+		"varied/simple/another encoding/binary",
+		"varied/otherpath github.com/sdboyer/gps os sort",
+	)
+	validate()
+
+	// focus on ignores now, so reset main and tests
+	main, tests = true, true
+
+	// now, the fun stuff. punch a hole in the middle by cutting out
+	// varied/simple
+	name = "ignore varied/simple"
+	ignore = map[string]bool{
+		"varied/simple": true,
+	}
+	except(
+		// root pkg loses on everything in varied/simple/another
+		"varied hash encoding/binary go/parser",
+		"varied/simple",
+	)
+	validate()
+
+	// widen the hole by excluding otherpath
+	name = "ignore varied/{otherpath,simple}"
+	ignore = map[string]bool{
+		"varied/otherpath": true,
+		"varied/simple":    true,
+	}
+	except(
+		// root pkg loses on everything in varied/simple/another and varied/m1p
+		"varied hash encoding/binary go/parser github.com/sdboyer/gps sort",
+		"varied/otherpath",
+		"varied/simple",
+	)
+	validate()
+
+	// remove namemismatch, though we're mostly beating a dead horse now
+	name = "ignore varied/{otherpath,simple,namemismatch}"
+	ignore["varied/namemismatch"] = true
+	except(
+		// root pkg loses on everything in varied/simple/another and varied/m1p
+		"varied hash encoding/binary go/parser github.com/sdboyer/gps sort os github.com/Masterminds/semver",
+		"varied/otherpath",
+		"varied/simple",
+		"varied/namemismatch",
+	)
+	validate()
+}
+
+var _ = map[string][]string{
+	"varied":                {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/gps", "go/parser", "hash", "net/http", "os", "sort"},
+	"varied/m1p":            {"github.com/sdboyer/gps", "os", "sort"},
+	"varied/namemismatch":   {"github.com/Masterminds/semver", "os"},
+	"varied/otherpath":      {"github.com/sdboyer/gps", "os", "sort"},
+	"varied/simple":         {"encoding/binary", "github.com/sdboyer/gps", "go/parser", "hash", "os", "sort"},
+	"varied/simple/another": {"encoding/binary", "github.com/sdboyer/gps", "hash", "os", "sort"},
+}
+
+func getwd(t *testing.T) string {
+	cwd, err := os.Getwd()
+	if err != nil {
+		t.Fatal(err)
+	}
+	return cwd
+}
diff --git a/vendor/github.com/sdboyer/gps/appveyor.yml b/vendor/github.com/sdboyer/gps/appveyor.yml
new file mode 100644
index 0000000..9bf23a3
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/appveyor.yml
@@ -0,0 +1,24 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\sdboyer\gps
+shallow_clone: true
+
+environment:
+  GOPATH: C:\gopath
+
+platform:
+  - x64
+
+install:
+  - go version
+  - go env
+  - choco install bzr hg
+  - set PATH=C:\Program Files (x86)\Bazaar\;C:\Program Files\Mercurial\;%PATH%
+build_script:
+  - go get github.com/Masterminds/glide
+  - C:\gopath\bin\glide install
+
+test_script:
+  - go test
+
+deploy: off
diff --git a/vendor/github.com/sdboyer/gps/bridge.go b/vendor/github.com/sdboyer/gps/bridge.go
new file mode 100644
index 0000000..8b26e6b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/bridge.go
@@ -0,0 +1,627 @@
+package gps
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"sort"
+)
+
+// sourceBridges provide an adapter to SourceManagers that tailor operations
+// for a single solve run.
+type sourceBridge interface {
+	getProjectInfo(pa atom) (Manifest, Lock, error)
+	listVersions(id ProjectIdentifier) ([]Version, error)
+	listPackages(id ProjectIdentifier, v Version) (PackageTree, error)
+	computeRootReach() ([]string, error)
+	revisionPresentIn(id ProjectIdentifier, r Revision) (bool, error)
+	pairRevision(id ProjectIdentifier, r Revision) []Version
+	pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion
+	repoExists(id ProjectIdentifier) (bool, error)
+	vendorCodeExists(id ProjectIdentifier) (bool, error)
+	matches(id ProjectIdentifier, c Constraint, v Version) bool
+	matchesAny(id ProjectIdentifier, c1, c2 Constraint) bool
+	intersect(id ProjectIdentifier, c1, c2 Constraint) Constraint
+	verifyRootDir(path string) error
+	deduceRemoteRepo(path string) (*remoteRepo, error)
+}
+
+// bridge is an adapter around a proper SourceManager. It provides localized
+// caching that's tailored to the requirements of a particular solve run.
+//
+// It also performs transformations between ProjectIdentifiers, which is what
+// the solver primarily deals in, and ProjectRoot, which is what the
+// SourceManager primarily deals in. This separation is helpful because it keeps
+// the complexities of deciding what a particular name "means" entirely within
+// the solver, while the SourceManager can traffic exclusively in
+// globally-unique network names.
+//
+// Finally, it provides authoritative version/constraint operations, ensuring
+// that any possible approach to a match - even those not literally encoded in
+// the inputs - is achieved.
+type bridge struct {
+	// The underlying, adapted-to SourceManager
+	sm SourceManager
+
+	// The solver which we're assisting.
+	//
+	// The link between solver and bridge is circular, which is typically a bit
+	// awkward, but the bridge needs access to so many of the input arguments
+	// held by the solver that it ends up being easier and saner to do this.
+	s *solver
+
+	// Simple, local cache of the root's PackageTree
+	crp *struct {
+		ptree PackageTree
+		err   error
+	}
+
+	// Map of project root name to their available version list. This cache is
+	// layered on top of the proper SourceManager's cache; the only difference
+	// is that this keeps the versions sorted in the direction required by the
+	// current solve run
+	vlists map[ProjectRoot][]Version
+}
+
+// Global factory func to create a bridge. This exists solely to allow tests to
+// override it with a custom bridge and sm.
+var mkBridge func(*solver, SourceManager) sourceBridge = func(s *solver, sm SourceManager) sourceBridge {
+	return &bridge{
+		sm:     sm,
+		s:      s,
+		vlists: make(map[ProjectRoot][]Version),
+	}
+}
+
+func (b *bridge) getProjectInfo(pa atom) (Manifest, Lock, error) {
+	if pa.id.ProjectRoot == b.s.params.ImportRoot {
+		return b.s.rm, b.s.rl, nil
+	}
+	return b.sm.GetProjectInfo(ProjectRoot(pa.id.netName()), pa.v)
+}
+
+func (b *bridge) key(id ProjectIdentifier) ProjectRoot {
+	k := ProjectRoot(id.NetworkName)
+	if k == "" {
+		k = id.ProjectRoot
+	}
+
+	return k
+}
+
+func (b *bridge) listVersions(id ProjectIdentifier) ([]Version, error) {
+	k := b.key(id)
+
+	if vl, exists := b.vlists[k]; exists {
+		return vl, nil
+	}
+
+	vl, err := b.sm.ListVersions(k)
+	// TODO(sdboyer) cache errors, too?
+	if err != nil {
+		return nil, err
+	}
+
+	if b.s.params.Downgrade {
+		sort.Sort(downgradeVersionSorter(vl))
+	} else {
+		sort.Sort(upgradeVersionSorter(vl))
+	}
+
+	b.vlists[k] = vl
+	return vl, nil
+}
+
+func (b *bridge) revisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
+	k := b.key(id)
+	return b.sm.RevisionPresentIn(k, r)
+}
+
+func (b *bridge) repoExists(id ProjectIdentifier) (bool, error) {
+	k := b.key(id)
+	return b.sm.RepoExists(k)
+}
+
+func (b *bridge) vendorCodeExists(id ProjectIdentifier) (bool, error) {
+	fi, err := os.Stat(filepath.Join(b.s.params.RootDir, "vendor", string(id.ProjectRoot)))
+	if err != nil {
+		return false, err
+	} else if fi.IsDir() {
+		return true, nil
+	}
+
+	return false, nil
+}
+
+func (b *bridge) pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion {
+	vl, err := b.listVersions(id)
+	if err != nil {
+		return nil
+	}
+
+	// doing it like this is a bit sloppy
+	for _, v2 := range vl {
+		if p, ok := v2.(PairedVersion); ok {
+			if p.Matches(v) {
+				return p
+			}
+		}
+	}
+
+	return nil
+}
+
+func (b *bridge) pairRevision(id ProjectIdentifier, r Revision) []Version {
+	vl, err := b.listVersions(id)
+	if err != nil {
+		return nil
+	}
+
+	p := []Version{r}
+	// doing it like this is a bit sloppy
+	for _, v2 := range vl {
+		if pv, ok := v2.(PairedVersion); ok {
+			if pv.Matches(r) {
+				p = append(p, pv)
+			}
+		}
+	}
+
+	return p
+}
+
+// matches performs a typical match check between the provided version and
+// constraint. If that basic check fails and the provided version is incomplete
+// (e.g. an unpaired version or bare revision), it will attempt to gather more
+// information on one or the other and re-perform the comparison.
+func (b *bridge) matches(id ProjectIdentifier, c2 Constraint, v Version) bool {
+	if c2.Matches(v) {
+		return true
+	}
+
+	// There's a wide field of possible ways that pairing might result in a
+	// match. For each possible type of version, start by carving out all the
+	// cases where the constraint would have provided an authoritative match
+	// result.
+	switch tv := v.(type) {
+	case PairedVersion:
+		switch tc := c2.(type) {
+		case PairedVersion, Revision, noneConstraint:
+			// These three would all have been authoritative matches
+			return false
+		case UnpairedVersion:
+			// Only way paired and unpaired could match is if they share an
+			// underlying rev
+			pv := b.pairVersion(id, tc)
+			if pv == nil {
+				return false
+			}
+			return pv.Matches(v)
+		case semverConstraint:
+			// Have to check all the possible versions for that rev to see if
+			// any match the semver constraint
+			for _, pv := range b.pairRevision(id, tv.Underlying()) {
+				if tc.Matches(pv) {
+					return true
+				}
+			}
+			return false
+		}
+
+	case Revision:
+		switch tc := c2.(type) {
+		case PairedVersion, Revision, noneConstraint:
+			// These three would all have been authoritative matches
+			return false
+		case UnpairedVersion:
+			// Only way paired and unpaired could match is if they share an
+			// underlying rev
+			pv := b.pairVersion(id, tc)
+			if pv == nil {
+				return false
+			}
+			return pv.Matches(v)
+		case semverConstraint:
+			// Have to check all the possible versions for the rev to see if
+			// any match the semver constraint
+			for _, pv := range b.pairRevision(id, tv) {
+				if tc.Matches(pv) {
+					return true
+				}
+			}
+			return false
+		}
+
+	// UnpairedVersion as input has the most weird cases. It's also the one
+	// we'll probably see the least
+	case UnpairedVersion:
+		switch tc := c2.(type) {
+		case noneConstraint:
+			// obviously
+			return false
+		case Revision, PairedVersion:
+			// Easy case for both - just pair the uv and see if it matches the revision
+			// constraint
+			pv := b.pairVersion(id, tv)
+			if pv == nil {
+				return false
+			}
+			return tc.Matches(pv)
+		case UnpairedVersion:
+			// Both are unpaired versions. See if they share an underlying rev.
+			pv := b.pairVersion(id, tv)
+			if pv == nil {
+				return false
+			}
+
+			pc := b.pairVersion(id, tc)
+			if pc == nil {
+				return false
+			}
+			return pc.Matches(pv)
+
+		case semverConstraint:
+			// semverConstraint can't ever match a rev, but we do need to check
+			// if any other versions corresponding to this rev work.
+			pv := b.pairVersion(id, tv)
+			if pv == nil {
+				return false
+			}
+
+			for _, ttv := range b.pairRevision(id, pv.Underlying()) {
+				if c2.Matches(ttv) {
+					return true
+				}
+			}
+			return false
+		}
+	default:
+		panic("unreachable")
+	}
+
+	return false
+}
+
+// matchesAny is the authoritative version of Constraint.MatchesAny.
+func (b *bridge) matchesAny(id ProjectIdentifier, c1, c2 Constraint) bool {
+	if c1.MatchesAny(c2) {
+		return true
+	}
+
+	// This approach is slightly wasteful, but just SO much less verbose, and
+	// more easily understood.
+	var uc1, uc2 Constraint
+	if v1, ok := c1.(Version); ok {
+		uc1 = b.vtu(id, v1)
+	} else {
+		uc1 = c1
+	}
+
+	if v2, ok := c2.(Version); ok {
+		uc2 = b.vtu(id, v2)
+	} else {
+		uc2 = c2
+	}
+
+	return uc1.MatchesAny(uc2)
+}
+
+// intersect is the authoritative version of Constraint.Intersect.
+func (b *bridge) intersect(id ProjectIdentifier, c1, c2 Constraint) Constraint {
+	rc := c1.Intersect(c2)
+	if rc != none {
+		return rc
+	}
+
+	// This approach is slightly wasteful, but just SO much less verbose, and
+	// more easily understood.
+	var uc1, uc2 Constraint
+	if v1, ok := c1.(Version); ok {
+		uc1 = b.vtu(id, v1)
+	} else {
+		uc1 = c1
+	}
+
+	if v2, ok := c2.(Version); ok {
+		uc2 = b.vtu(id, v2)
+	} else {
+		uc2 = c2
+	}
+
+	return uc1.Intersect(uc2)
+}
+
+// vtu creates a versionTypeUnion for the provided version.
+//
+// This union may (and typically will) end up being nothing more than the single
+// input version, but creating a versionTypeUnion guarantees that 'local'
+// constraint checks (direct method calls) are authoritative.
+func (b *bridge) vtu(id ProjectIdentifier, v Version) versionTypeUnion {
+	switch tv := v.(type) {
+	case Revision:
+		return versionTypeUnion(b.pairRevision(id, tv))
+	case PairedVersion:
+		return versionTypeUnion(b.pairRevision(id, tv.Underlying()))
+	case UnpairedVersion:
+		pv := b.pairVersion(id, tv)
+		if pv == nil {
+			return versionTypeUnion{tv}
+		}
+
+		return versionTypeUnion(b.pairRevision(id, pv.Underlying()))
+	}
+
+	return nil
+}
+
+// computeRootReach is a specialized, less stringent version of listExternal
+// that allows for a bit of fuzziness in the source inputs.
+//
+// Specifically, we need to:
+//  - Analyze test-type files as well as typical source files
+//  - Make a best-effort attempt even if the code doesn't compile
+//  - Include main packages in the analysis
+//
+// Perhaps most important is that we don't want to have the results of this
+// analysis be in any permanent cache, and we want to read directly from our
+// potentially messy root project source location on disk. Together, this means
+// that we can't ask the real SourceManager to do it.
+func (b *bridge) computeRootReach() ([]string, error) {
+	// TODO(sdboyer) i now cannot remember the reasons why i thought being less stringent
+	// in the analysis was OK. so, for now, we just compute a bog-standard list
+	// of externally-touched packages, including mains and test.
+	ptree, err := b.listRootPackages()
+	if err != nil {
+		return nil, err
+	}
+
+	return ptree.ListExternalImports(true, true, b.s.ig), nil
+}
+
+func (b *bridge) listRootPackages() (PackageTree, error) {
+	if b.crp == nil {
+		ptree, err := listPackages(b.s.params.RootDir, string(b.s.params.ImportRoot))
+
+		b.crp = &struct {
+			ptree PackageTree
+			err   error
+		}{
+			ptree: ptree,
+			err:   err,
+		}
+	}
+	if b.crp.err != nil {
+		return PackageTree{}, b.crp.err
+	}
+
+	return b.crp.ptree, nil
+}
+
+// listPackages lists all the packages contained within the given project at a
+// particular version.
+//
+// The root project is handled separately, as the source manager isn't
+// responsible for that code.
+func (b *bridge) listPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	if id.ProjectRoot == b.s.params.ImportRoot {
+		return b.listRootPackages()
+	}
+
+	// FIXME if we're aliasing here, the returned PackageTree will have
+	// unaliased import paths, which is super not correct
+	return b.sm.ListPackages(b.key(id), v)
+}
+
+// verifyRoot ensures that the provided path to the project root is in good
+// working condition. This check is made only once, at the beginning of a solve
+// run.
+func (b *bridge) verifyRootDir(path string) error {
+	if fi, err := os.Stat(path); err != nil {
+		return badOptsFailure(fmt.Sprintf("could not read project root (%s): %s", path, err))
+	} else if !fi.IsDir() {
+		return badOptsFailure(fmt.Sprintf("project root (%s) is a file, not a directory", path))
+	}
+
+	return nil
+}
+
+// deduceRemoteRepo deduces certain network-oriented properties about an import
+// path.
+func (b *bridge) deduceRemoteRepo(path string) (*remoteRepo, error) {
+	return deduceRemoteRepo(path)
+}
+
+// versionTypeUnion represents a set of versions that are, within the scope of
+// this solver run, equivalent.
+//
+// The simple case here is just a pair - a normal version plus its underlying
+// revision - but if a tag or branch point at the same rev, then we consider
+// them equivalent. Again, however, this equivalency is short-lived; it must be
+// re-assessed during every solver run.
+//
+// The union members are treated as being OR'd together:  all constraint
+// operations attempt each member, and will take the most open/optimistic
+// answer.
+//
+// This technically does allow tags to match branches - something we
+// otherwise try hard to avoid - but because the original input constraint never
+// actually changes (and is never written out in the Result), there's no harmful
+// case of a user suddenly riding a branch when they expected a fixed tag.
+type versionTypeUnion []Version
+
+// This should generally not be called, but is required for the interface. If it
+// is called, we have a bigger problem (the type has escaped the solver); thus,
+// panic.
+func (av versionTypeUnion) String() string {
+	panic("versionTypeUnion should never be turned into a string; it is solver internal-only")
+}
+
+// This should generally not be called, but is required for the interface. If it
+// is called, we have a bigger problem (the type has escaped the solver); thus,
+// panic.
+func (av versionTypeUnion) Type() string {
+	panic("versionTypeUnion should never need to answer a Type() call; it is solver internal-only")
+}
+
+// Matches takes a version, and returns true if that version matches any version
+// contained in the union.
+//
+// This DOES allow tags to match branches, albeit indirectly through a revision.
+func (av versionTypeUnion) Matches(v Version) bool {
+	av2, oav := v.(versionTypeUnion)
+
+	for _, v1 := range av {
+		if oav {
+			for _, v2 := range av2 {
+				if v1.Matches(v2) {
+					return true
+				}
+			}
+		} else if v1.Matches(v) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// MatchesAny returns true if any of the contained versions (which are also
+// constraints) in the union successfully MatchAny with the provided
+// constraint.
+func (av versionTypeUnion) MatchesAny(c Constraint) bool {
+	av2, oav := c.(versionTypeUnion)
+
+	for _, v1 := range av {
+		if oav {
+			for _, v2 := range av2 {
+				if v1.MatchesAny(v2) {
+					return true
+				}
+			}
+		} else if v1.MatchesAny(c) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Intersect takes a constraint, and attempts to intersect it with all the
+// versions contained in the union until one returns non-none. If that never
+// happens, then none is returned.
+//
+// In order to avoid weird version floating elsewhere in the solver, the union
+// always returns the input constraint. (This is probably obviously correct, but
+// is still worth noting.)
+func (av versionTypeUnion) Intersect(c Constraint) Constraint {
+	av2, oav := c.(versionTypeUnion)
+
+	for _, v1 := range av {
+		if oav {
+			for _, v2 := range av2 {
+				if rc := v1.Intersect(v2); rc != none {
+					return rc
+				}
+			}
+		} else if rc := v1.Intersect(c); rc != none {
+			return rc
+		}
+	}
+
+	return none
+}
+
+func (av versionTypeUnion) _private() {}
+
+type upgradeVersionSorter []Version
+type downgradeVersionSorter []Version
+
+func (vs upgradeVersionSorter) Len() int {
+	return len(vs)
+}
+
+func (vs upgradeVersionSorter) Swap(i, j int) {
+	vs[i], vs[j] = vs[j], vs[i]
+}
+
+func (vs downgradeVersionSorter) Len() int {
+	return len(vs)
+}
+
+func (vs downgradeVersionSorter) Swap(i, j int) {
+	vs[i], vs[j] = vs[j], vs[i]
+}
+
+func (vs upgradeVersionSorter) Less(i, j int) bool {
+	l, r := vs[i], vs[j]
+
+	if tl, ispair := l.(versionPair); ispair {
+		l = tl.v
+	}
+	if tr, ispair := r.(versionPair); ispair {
+		r = tr.v
+	}
+
+	switch compareVersionType(l, r) {
+	case -1:
+		return true
+	case 1:
+		return false
+	case 0:
+		break
+	default:
+		panic("unreachable")
+	}
+
+	switch l.(type) {
+	// For these, now nothing to do but alpha sort
+	case Revision, branchVersion, plainVersion:
+		return l.String() < r.String()
+	}
+
+	// This ensures that pre-release versions are always sorted after ALL
+	// full-release versions
+	lsv, rsv := l.(semVersion).sv, r.(semVersion).sv
+	lpre, rpre := lsv.Prerelease() == "", rsv.Prerelease() == ""
+	if (lpre && !rpre) || (!lpre && rpre) {
+		return lpre
+	}
+	return lsv.GreaterThan(rsv)
+}
+
+func (vs downgradeVersionSorter) Less(i, j int) bool {
+	l, r := vs[i], vs[j]
+
+	if tl, ispair := l.(versionPair); ispair {
+		l = tl.v
+	}
+	if tr, ispair := r.(versionPair); ispair {
+		r = tr.v
+	}
+
+	switch compareVersionType(l, r) {
+	case -1:
+		return true
+	case 1:
+		return false
+	case 0:
+		break
+	default:
+		panic("unreachable")
+	}
+
+	switch l.(type) {
+	// For these, now nothing to do but alpha
+	case Revision, branchVersion, plainVersion:
+		return l.String() < r.String()
+	}
+
+	// This ensures that pre-release versions are always sorted after ALL
+	// full-release versions
+	lsv, rsv := l.(semVersion).sv, r.(semVersion).sv
+	lpre, rpre := lsv.Prerelease() == "", rsv.Prerelease() == ""
+	if (lpre && !rpre) || (!lpre && rpre) {
+		return lpre
+	}
+	return lsv.LessThan(rsv)
+}
diff --git a/vendor/github.com/sdboyer/gps/circle.yml b/vendor/github.com/sdboyer/gps/circle.yml
new file mode 100644
index 0000000..5723c35
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/circle.yml
@@ -0,0 +1,19 @@
+machine:
+    environment:
+        GO15VENDOREXPERIMENT: 1
+checkout:
+    post:
+dependencies:
+    override:
+        - mkdir -pv $HOME/.go_workspace/src/github.com/$CIRCLE_PROJECT_USERNAME
+        - ln -Tsf $HOME/$CIRCLE_PROJECT_REPONAME $HOME/.go_workspace/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
+        # Glide 0.10.1
+        - wget https://github.com/Masterminds/glide/releases/download/0.10.1/glide-0.10.1-linux-amd64.tar.gz
+        - tar -vxz -C $HOME/bin --strip=1 -f glide-0.10.1-linux-amd64.tar.gz
+        # Fetch deps with glide
+        - glide --home $HOME/.glide -y glide.yaml install --cache
+    cache_directories:
+        - "~/.glide"
+test:
+    override:
+        - cd $HOME/.go_workspace/src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME && go test
diff --git a/vendor/github.com/sdboyer/gps/constraint_test.go b/vendor/github.com/sdboyer/gps/constraint_test.go
new file mode 100644
index 0000000..3863e65
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/constraint_test.go
@@ -0,0 +1,817 @@
+package gps
+
+import (
+	"fmt"
+	"testing"
+)
+
+// gu - helper func for stringifying what we assume is a VersionPair (otherwise
+// will panic), but is given as a Constraint
+func gu(v Constraint) string {
+	return fmt.Sprintf("%q at rev %q", v, v.(PairedVersion).Underlying())
+}
+
+func TestBranchConstraintOps(t *testing.T) {
+	v1 := NewBranch("master").(branchVersion)
+	v2 := NewBranch("test").(branchVersion)
+
+	if !v1.MatchesAny(any) {
+		t.Errorf("Branches should always match the any constraint")
+	}
+	if v1.Intersect(any) != v1 {
+		t.Errorf("Branches should always return self when intersecting the any constraint, but got %s", v1.Intersect(any))
+	}
+
+	if v1.MatchesAny(none) {
+		t.Errorf("Branches should never match the none constraint")
+	}
+	if v1.Intersect(none) != none {
+		t.Errorf("Branches should always return none when intersecting the none constraint, but got %s", v1.Intersect(none))
+	}
+
+	if v1.Matches(v2) {
+		t.Errorf("%s should not match %s", v1, v2)
+	}
+
+	if v1.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v1, v2)
+	}
+
+	if v1.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v1, v2)
+	}
+
+	// Add rev to one
+	snuffster := Revision("snuffleupagus")
+	v3 := v1.Is(snuffster).(versionPair)
+	if v2.Matches(v3) {
+		t.Errorf("%s should not match %s", v2, gu(v3))
+	}
+	if v3.Matches(v2) {
+		t.Errorf("%s should not match %s", gu(v3), v2)
+	}
+
+	if v2.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+	if v3.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+
+	if v2.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v2, gu(v3))
+	}
+	if v3.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), v2)
+	}
+
+	// Add different rev to the other
+	v4 := v2.Is(Revision("cookie monster")).(versionPair)
+	if v4.Matches(v3) {
+		t.Errorf("%s should not match %s", gu(v4), gu(v3))
+	}
+	if v3.Matches(v4) {
+		t.Errorf("%s should not match %s", gu(v3), gu(v4))
+	}
+
+	if v4.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+	if v3.MatchesAny(v4) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+
+	if v4.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v4), gu(v3))
+	}
+	if v3.Intersect(v4) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), gu(v4))
+	}
+
+	// Now add same rev to different branches
+	// TODO(sdboyer) this might not actually be a good idea, when you consider the
+	// semantics of floating versions...matching on an underlying rev might be
+	// nice in the short term, but it's probably shit most of the time
+	v5 := v2.Is(Revision("snuffleupagus")).(versionPair)
+	if !v5.Matches(v3) {
+		t.Errorf("%s should match %s", gu(v5), gu(v3))
+	}
+	if !v3.Matches(v5) {
+		t.Errorf("%s should match %s", gu(v3), gu(v5))
+	}
+
+	if !v5.MatchesAny(v3) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+	if !v3.MatchesAny(v5) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+
+	if v5.Intersect(v3) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v5), gu(v3))
+	}
+	if v3.Intersect(v5) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v3), gu(v5))
+	}
+
+	// Set up for cross-type constraint ops
+	cookie := Revision("cookie monster")
+	o1 := NewVersion("master").(plainVersion)
+	o2 := NewVersion("1.0.0").(semVersion)
+	o3 := o1.Is(cookie).(versionPair)
+	o4 := o2.Is(cookie).(versionPair)
+	v6 := v1.Is(cookie).(versionPair)
+
+	if v1.Matches(o1) {
+		t.Errorf("%s (branch) should not match %s (version) across types", v1, o1)
+	}
+
+	if v1.MatchesAny(o1) {
+		t.Errorf("%s (branch) should not allow any matches when combined with %s (version)", v1, o1)
+	}
+
+	if v1.Intersect(o1) != none {
+		t.Errorf("Intersection of %s (branch) with %s (version) should result in empty set", v1, o1)
+	}
+
+	if v1.Matches(o2) {
+		t.Errorf("%s (branch) should not match %s (semver) across types", v1, o2)
+	}
+
+	if v1.MatchesAny(o2) {
+		t.Errorf("%s (branch) should not allow any matches when combined with %s (semver)", v1, o2)
+	}
+
+	if v1.Intersect(o2) != none {
+		t.Errorf("Intersection of %s (branch) with %s (semver) should result in empty set", v1, o2)
+	}
+
+	if v1.Matches(o3) {
+		t.Errorf("%s (branch) should not match %s (version) across types", v1, gu(o3))
+	}
+
+	if v1.MatchesAny(o3) {
+		t.Errorf("%s (branch) should not allow any matches when combined with %s (version)", v1, gu(o3))
+	}
+
+	if v1.Intersect(o3) != none {
+		t.Errorf("Intersection of %s (branch) with %s (version) should result in empty set", v1, gu(o3))
+	}
+
+	if v1.Matches(o4) {
+		t.Errorf("%s (branch) should not match %s (semver) across types", v1, gu(o4))
+	}
+
+	if v1.MatchesAny(o4) {
+		t.Errorf("%s (branch) should not allow any matches when combined with %s (semver)", v1, gu(o4))
+	}
+
+	if v1.Intersect(o4) != none {
+		t.Errorf("Intersection of %s (branch) with %s (semver) should result in empty set", v1, gu(o4))
+	}
+
+	if !v6.Matches(o3) {
+		t.Errorf("%s (branch) should match %s (version) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if !v6.MatchesAny(o3) {
+		t.Errorf("%s (branch) should allow some matches when combined with %s (version) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if v6.Intersect(o3) != cookie {
+		t.Errorf("Intersection of %s (branch) with %s (version) should return shared underlying rev", gu(v6), gu(o3))
+	}
+
+	if !v6.Matches(o4) {
+		t.Errorf("%s (branch) should match %s (version) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if !v6.MatchesAny(o4) {
+		t.Errorf("%s (branch) should allow some matches when combined with %s (version) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if v6.Intersect(o4) != cookie {
+		t.Errorf("Intersection of %s (branch) with %s (version) should return shared underlying rev", gu(v6), gu(o4))
+	}
+}
+
+func TestVersionConstraintOps(t *testing.T) {
+	v1 := NewVersion("ab123").(plainVersion)
+	v2 := NewVersion("b2a13").(plainVersion)
+
+	if !v1.MatchesAny(any) {
+		t.Errorf("Versions should always match the any constraint")
+	}
+	if v1.Intersect(any) != v1 {
+		t.Errorf("Versions should always return self when intersecting the any constraint, but got %s", v1.Intersect(any))
+	}
+
+	if v1.MatchesAny(none) {
+		t.Errorf("Versions should never match the none constraint")
+	}
+	if v1.Intersect(none) != none {
+		t.Errorf("Versions should always return none when intersecting the none constraint, but got %s", v1.Intersect(none))
+	}
+
+	if v1.Matches(v2) {
+		t.Errorf("%s should not match %s", v1, v2)
+	}
+
+	if v1.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v1, v2)
+	}
+
+	if v1.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v1, v2)
+	}
+
+	// Add rev to one
+	snuffster := Revision("snuffleupagus")
+	v3 := v1.Is(snuffster).(versionPair)
+	if v2.Matches(v3) {
+		t.Errorf("%s should not match %s", v2, gu(v3))
+	}
+	if v3.Matches(v2) {
+		t.Errorf("%s should not match %s", gu(v3), v2)
+	}
+
+	if v2.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+	if v3.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+
+	if v2.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v2, gu(v3))
+	}
+	if v3.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), v2)
+	}
+
+	// Add different rev to the other
+	v4 := v2.Is(Revision("cookie monster")).(versionPair)
+	if v4.Matches(v3) {
+		t.Errorf("%s should not match %s", gu(v4), gu(v3))
+	}
+	if v3.Matches(v4) {
+		t.Errorf("%s should not match %s", gu(v3), gu(v4))
+	}
+
+	if v4.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+	if v3.MatchesAny(v4) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+
+	if v4.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v4), gu(v3))
+	}
+	if v3.Intersect(v4) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), gu(v4))
+	}
+
+	// Now add same rev to different versions, and things should line up
+	v5 := v2.Is(Revision("snuffleupagus")).(versionPair)
+	if !v5.Matches(v3) {
+		t.Errorf("%s should match %s", gu(v5), gu(v3))
+	}
+	if !v3.Matches(v5) {
+		t.Errorf("%s should match %s", gu(v3), gu(v5))
+	}
+
+	if !v5.MatchesAny(v3) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+	if !v3.MatchesAny(v5) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+
+	if v5.Intersect(v3) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v5), gu(v3))
+	}
+	if v3.Intersect(v5) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v3), gu(v5))
+	}
+
+	// Set up for cross-type constraint ops
+	cookie := Revision("cookie monster")
+	o1 := NewBranch("master").(branchVersion)
+	o2 := NewVersion("1.0.0").(semVersion)
+	o3 := o1.Is(cookie).(versionPair)
+	o4 := o2.Is(cookie).(versionPair)
+	v6 := v1.Is(cookie).(versionPair)
+
+	if v1.Matches(o1) {
+		t.Errorf("%s (version) should not match %s (branch) across types", v1, o1)
+	}
+
+	if v1.MatchesAny(o1) {
+		t.Errorf("%s (version) should not allow any matches when combined with %s (branch)", v1, o1)
+	}
+
+	if v1.Intersect(o1) != none {
+		t.Errorf("Intersection of %s (version) with %s (branch) should result in empty set", v1, o1)
+	}
+
+	if v1.Matches(o2) {
+		t.Errorf("%s (version) should not match %s (semver) across types", v1, o2)
+	}
+
+	if v1.MatchesAny(o2) {
+		t.Errorf("%s (version) should not allow any matches when combined with %s (semver)", v1, o2)
+	}
+
+	if v1.Intersect(o2) != none {
+		t.Errorf("Intersection of %s (version) with %s (semver) should result in empty set", v1, o2)
+	}
+
+	if v1.Matches(o3) {
+		t.Errorf("%s (version) should not match %s (branch) across types", v1, gu(o3))
+	}
+
+	if v1.MatchesAny(o3) {
+		t.Errorf("%s (version) should not allow any matches when combined with %s (branch)", v1, gu(o3))
+	}
+
+	if v1.Intersect(o3) != none {
+		t.Errorf("Intersection of %s (version) with %s (branch) should result in empty set", v1, gu(o3))
+	}
+
+	if v1.Matches(o4) {
+		t.Errorf("%s (version) should not match %s (semver) across types", v1, gu(o4))
+	}
+
+	if v1.MatchesAny(o4) {
+		t.Errorf("%s (version) should not allow any matches when combined with %s (semver)", v1, gu(o4))
+	}
+
+	if v1.Intersect(o4) != none {
+		t.Errorf("Intersection of %s (version) with %s (semver) should result in empty set", v1, gu(o4))
+	}
+
+	if !v6.Matches(o3) {
+		t.Errorf("%s (version) should match %s (branch) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if !v6.MatchesAny(o3) {
+		t.Errorf("%s (version) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if v6.Intersect(o3) != cookie {
+		t.Errorf("Intersection of %s (version) with %s (branch) should return shared underlying rev", gu(v6), gu(o3))
+	}
+
+	if !v6.Matches(o4) {
+		t.Errorf("%s (version) should match %s (branch) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if !v6.MatchesAny(o4) {
+		t.Errorf("%s (version) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if v6.Intersect(o4) != cookie {
+		t.Errorf("Intersection of %s (version) with %s (branch) should return shared underlying rev", gu(v6), gu(o4))
+	}
+}
+
+func TestSemverVersionConstraintOps(t *testing.T) {
+	v1 := NewVersion("1.0.0").(semVersion)
+	v2 := NewVersion("2.0.0").(semVersion)
+
+	if !v1.MatchesAny(any) {
+		t.Errorf("Semvers should always match the any constraint")
+	}
+	if v1.Intersect(any) != v1 {
+		t.Errorf("Semvers should always return self when intersecting the any constraint, but got %s", v1.Intersect(any))
+	}
+
+	if v1.MatchesAny(none) {
+		t.Errorf("Semvers should never match the none constraint")
+	}
+	if v1.Intersect(none) != none {
+		t.Errorf("Semvers should always return none when intersecting the none constraint, but got %s", v1.Intersect(none))
+	}
+
+	if v1.Matches(v2) {
+		t.Errorf("%s should not match %s", v1, v2)
+	}
+
+	if v1.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v1, v2)
+	}
+
+	if v1.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v1, v2)
+	}
+
+	// Add rev to one
+	snuffster := Revision("snuffleupagus")
+	v3 := v1.Is(snuffster).(versionPair)
+	if v2.Matches(v3) {
+		t.Errorf("%s should not match %s", v2, gu(v3))
+	}
+	if v3.Matches(v2) {
+		t.Errorf("%s should not match %s", gu(v3), v2)
+	}
+
+	if v2.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+	if v3.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+
+	if v2.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v2, gu(v3))
+	}
+	if v3.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), v2)
+	}
+
+	// Add different rev to the other
+	v4 := v2.Is(Revision("cookie monster")).(versionPair)
+	if v4.Matches(v3) {
+		t.Errorf("%s should not match %s", gu(v4), gu(v3))
+	}
+	if v3.Matches(v4) {
+		t.Errorf("%s should not match %s", gu(v3), gu(v4))
+	}
+
+	if v4.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+	if v3.MatchesAny(v4) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+
+	if v4.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v4), gu(v3))
+	}
+	if v3.Intersect(v4) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), gu(v4))
+	}
+
+	// Now add same rev to different versions, and things should line up
+	v5 := v2.Is(Revision("snuffleupagus")).(versionPair)
+	if !v5.Matches(v3) {
+		t.Errorf("%s should match %s", gu(v5), gu(v3))
+	}
+	if !v3.Matches(v5) {
+		t.Errorf("%s should match %s", gu(v3), gu(v5))
+	}
+
+	if !v5.MatchesAny(v3) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+	if !v3.MatchesAny(v5) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+
+	if v5.Intersect(v3) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v5), gu(v3))
+	}
+	if v3.Intersect(v5) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v3), gu(v5))
+	}
+
+	// Set up for cross-type constraint ops
+	cookie := Revision("cookie monster")
+	o1 := NewBranch("master").(branchVersion)
+	o2 := NewVersion("ab123").(plainVersion)
+	o3 := o1.Is(cookie).(versionPair)
+	o4 := o2.Is(cookie).(versionPair)
+	v6 := v1.Is(cookie).(versionPair)
+
+	if v1.Matches(o1) {
+		t.Errorf("%s (semver) should not match %s (branch) across types", v1, o1)
+	}
+
+	if v1.MatchesAny(o1) {
+		t.Errorf("%s (semver) should not allow any matches when combined with %s (branch)", v1, o1)
+	}
+
+	if v1.Intersect(o1) != none {
+		t.Errorf("Intersection of %s (semver) with %s (branch) should result in empty set", v1, o1)
+	}
+
+	if v1.Matches(o2) {
+		t.Errorf("%s (semver) should not match %s (version) across types", v1, o2)
+	}
+
+	if v1.MatchesAny(o2) {
+		t.Errorf("%s (semver) should not allow any matches when combined with %s (version)", v1, o2)
+	}
+
+	if v1.Intersect(o2) != none {
+		t.Errorf("Intersection of %s (semver) with %s (version) should result in empty set", v1, o2)
+	}
+
+	if v1.Matches(o3) {
+		t.Errorf("%s (semver) should not match %s (branch) across types", v1, gu(o3))
+	}
+
+	if v1.MatchesAny(o3) {
+		t.Errorf("%s (semver) should not allow any matches when combined with %s (branch)", v1, gu(o3))
+	}
+
+	if v1.Intersect(o3) != none {
+		t.Errorf("Intersection of %s (semver) with %s (branch) should result in empty set", v1, gu(o3))
+	}
+
+	if v1.Matches(o4) {
+		t.Errorf("%s (semver) should not match %s (version) across types", v1, gu(o4))
+	}
+
+	if v1.MatchesAny(o4) {
+		t.Errorf("%s (semver) should not allow any matches when combined with %s (version)", v1, gu(o4))
+	}
+
+	if v1.Intersect(o4) != none {
+		t.Errorf("Intersection of %s (semver) with %s (version) should result in empty set", v1, gu(o4))
+	}
+
+	if !v6.Matches(o3) {
+		t.Errorf("%s (semver) should match %s (branch) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if !v6.MatchesAny(o3) {
+		t.Errorf("%s (semver) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if v6.Intersect(o3) != cookie {
+		t.Errorf("Intersection of %s (semver) with %s (branch) should return shared underlying rev", gu(v6), gu(o3))
+	}
+
+	if !v6.Matches(o4) {
+		t.Errorf("%s (semver) should match %s (branch) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if !v6.MatchesAny(o4) {
+		t.Errorf("%s (semver) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if v6.Intersect(o4) != cookie {
+		t.Errorf("Intersection of %s (semver) with %s (branch) should return shared underlying rev", gu(v6), gu(o4))
+	}
+
+	// Regression check - make sure that semVersion -> semverConstraint works
+	// the same as verified in the other test
+	c1, _ := NewSemverConstraint("=1.0.0")
+	if !v1.MatchesAny(c1) {
+		t.Errorf("%s (semver) should allow some matches - itself - when combined with an equivalent semverConstraint", gu(v1))
+	}
+	if v1.Intersect(c1) != v1 {
+		t.Errorf("Intersection of %s (semver) with equivalent semver constraint should return self, got %s", gu(v1), v1.Intersect(c1))
+	}
+
+	if !v6.MatchesAny(c1) {
+		t.Errorf("%s (semver pair) should allow some matches - itself - when combined with an equivalent semverConstraint", gu(v6))
+	}
+	if v6.Intersect(c1) != v6 {
+		t.Errorf("Intersection of %s (semver pair) with equivalent semver constraint should return self, got %s", gu(v6), v6.Intersect(c1))
+	}
+
+}
+
+// The other test is about the semverVersion, this is about semverConstraint
+func TestSemverConstraintOps(t *testing.T) {
+	v1 := NewBranch("master").(branchVersion)
+	v2 := NewVersion("ab123").(plainVersion)
+	v3 := NewVersion("1.0.0").(semVersion)
+
+	fozzie := Revision("fozzie bear")
+	v4 := v1.Is(fozzie).(versionPair)
+	v5 := v2.Is(fozzie).(versionPair)
+	v6 := v3.Is(fozzie).(versionPair)
+
+	// TODO(sdboyer) we can't use the same range as below b/c semver.rangeConstraint is
+	// still an incomparable type
+	c1, err := NewSemverConstraint("=1.0.0")
+	if err != nil {
+		t.Errorf("Failed to create constraint: %s", err)
+		t.FailNow()
+	}
+
+	if !c1.MatchesAny(any) {
+		t.Errorf("Semver constraints should always match the any constraint")
+	}
+	if c1.Intersect(any) != c1 {
+		t.Errorf("Semver constraints should always return self when intersecting the any constraint, but got %s", c1.Intersect(any))
+	}
+
+	if c1.MatchesAny(none) {
+		t.Errorf("Semver constraints should never match the none constraint")
+	}
+	if c1.Intersect(none) != none {
+		t.Errorf("Semver constraints should always return none when intersecting the none constraint, but got %s", c1.Intersect(none))
+	}
+
+	c1, err = NewSemverConstraint(">= 1.0.0")
+	if err != nil {
+		t.Errorf("Failed to create constraint: %s", err)
+		t.FailNow()
+	}
+
+	if c1.Matches(v1) {
+		t.Errorf("Semver constraint should not match simple branch")
+	}
+	if c1.Matches(v2) {
+		t.Errorf("Semver constraint should not match simple version")
+	}
+	if !c1.Matches(v3) {
+		t.Errorf("Semver constraint should match a simple semver version in its range")
+	}
+	if c1.Matches(v4) {
+		t.Errorf("Semver constraint should not match paired branch")
+	}
+	if c1.Matches(v5) {
+		t.Errorf("Semver constraint should not match paired version")
+	}
+	if !c1.Matches(v6) {
+		t.Errorf("Semver constraint should match a paired semver version in its range")
+	}
+
+	if c1.MatchesAny(v1) {
+		t.Errorf("Semver constraint should not allow any when intersected with simple branch")
+	}
+	if c1.MatchesAny(v2) {
+		t.Errorf("Semver constraint should not allow any when intersected with simple version")
+	}
+	if !c1.MatchesAny(v3) {
+		t.Errorf("Semver constraint should allow some when intersected with a simple semver version in its range")
+	}
+	if c1.MatchesAny(v4) {
+		t.Errorf("Semver constraint should not allow any when intersected with paired branch")
+	}
+	if c1.MatchesAny(v5) {
+		t.Errorf("Semver constraint should not allow any when intersected with paired version")
+	}
+	if !c1.MatchesAny(v6) {
+		t.Errorf("Semver constraint should allow some when intersected with a paired semver version in its range")
+	}
+
+	if c1.Intersect(v1) != none {
+		t.Errorf("Semver constraint should return none when intersected with a simple branch")
+	}
+	if c1.Intersect(v2) != none {
+		t.Errorf("Semver constraint should return none when intersected with a simple version")
+	}
+	if c1.Intersect(v3) != v3 {
+		t.Errorf("Semver constraint should return input when intersected with a simple semver version in its range")
+	}
+	if c1.Intersect(v4) != none {
+		t.Errorf("Semver constraint should return none when intersected with a paired branch")
+	}
+	if c1.Intersect(v5) != none {
+		t.Errorf("Semver constraint should return none when intersected with a paired version")
+	}
+	if c1.Intersect(v6) != v6 {
+		t.Errorf("Semver constraint should return input when intersected with a paired semver version in its range")
+	}
+}
+
+// Test that certain types of cross-version comparisons work when they are
+// expressed as a version union (but that others don't).
+func TestVersionUnion(t *testing.T) {
+	rev := Revision("flooboofoobooo")
+	v1 := NewBranch("master")
+	v2 := NewBranch("test")
+	v3 := NewVersion("1.0.0").Is(rev)
+	v4 := NewVersion("1.0.1")
+	v5 := NewVersion("v2.0.5").Is(Revision("notamatch"))
+
+	uv1 := versionTypeUnion{v1, v4, rev}
+
+	if uv1.MatchesAny(none) {
+		t.Errorf("Union can't match none")
+	}
+	if none.MatchesAny(uv1) {
+		t.Errorf("Union can't match none")
+	}
+
+	if !uv1.MatchesAny(any) {
+		t.Errorf("Union must match any")
+	}
+	if !any.MatchesAny(uv1) {
+		t.Errorf("Union must match any")
+	}
+
+	// Basic matching
+	if !uv1.Matches(v4) {
+		t.Errorf("Union should match on branch to branch")
+	}
+	if !v4.Matches(uv1) {
+		t.Errorf("Union should reverse-match on branch to branch")
+	}
+
+	if !uv1.Matches(v3) {
+		t.Errorf("Union should match on rev to paired rev")
+	}
+	if !v3.Matches(uv1) {
+		t.Errorf("Union should reverse-match on rev to paired rev")
+	}
+
+	if uv1.Matches(v2) {
+		t.Errorf("Union should not match on anything in disjoint unpaired")
+	}
+	if v2.Matches(uv1) {
+		t.Errorf("Union should not reverse-match on anything in disjoint unpaired")
+	}
+
+	if uv1.Matches(v5) {
+		t.Errorf("Union should not match on anything in disjoint pair")
+	}
+	if v5.Matches(uv1) {
+		t.Errorf("Union should not reverse-match on anything in disjoint pair")
+	}
+
+	// MatchesAny - repeat Matches for safety, but add more, too
+	if !uv1.MatchesAny(v4) {
+		t.Errorf("Union should match on branch to branch")
+	}
+	if !v4.MatchesAny(uv1) {
+		t.Errorf("Union should reverse-match on branch to branch")
+	}
+
+	if !uv1.MatchesAny(v3) {
+		t.Errorf("Union should match on rev to paired rev")
+	}
+	if !v3.MatchesAny(uv1) {
+		t.Errorf("Union should reverse-match on rev to paired rev")
+	}
+
+	if uv1.MatchesAny(v2) {
+		t.Errorf("Union should not match on anything in disjoint unpaired")
+	}
+	if v2.MatchesAny(uv1) {
+		t.Errorf("Union should not reverse-match on anything in disjoint unpaired")
+	}
+
+	if uv1.MatchesAny(v5) {
+		t.Errorf("Union should not match on anything in disjoint pair")
+	}
+	if v5.MatchesAny(uv1) {
+		t.Errorf("Union should not reverse-match on anything in disjoint pair")
+	}
+
+	c1, _ := NewSemverConstraint("~1.0.0")
+	c2, _ := NewSemverConstraint("~2.0.0")
+	if !uv1.MatchesAny(c1) {
+		t.Errorf("Union should have some overlap due to containing 1.0.1 version")
+	}
+	if !c1.MatchesAny(uv1) {
+		t.Errorf("Union should have some overlap due to containing 1.0.1 version")
+	}
+
+	if uv1.MatchesAny(c2) {
+		t.Errorf("Union should have no overlap with ~2.0.0 semver range")
+	}
+	if c2.MatchesAny(uv1) {
+		t.Errorf("Union should have no overlap with ~2.0.0 semver range")
+	}
+
+	// Intersect - repeat all previous
+	if uv1.Intersect(v4) != v4 {
+		t.Errorf("Union intersection on contained version should return that version")
+	}
+	if v4.Intersect(uv1) != v4 {
+		t.Errorf("Union reverse-intersection on contained version should return that version")
+	}
+
+	if uv1.Intersect(v3) != rev {
+		t.Errorf("Union intersection on paired version w/matching rev should return rev, got %s", uv1.Intersect(v3))
+	}
+	if v3.Intersect(uv1) != rev {
+		t.Errorf("Union reverse-intersection on paired version w/matching rev should return rev, got %s", v3.Intersect(uv1))
+	}
+
+	if uv1.Intersect(v2) != none {
+		t.Errorf("Union should not intersect with anything in disjoint unpaired")
+	}
+	if v2.Intersect(uv1) != none {
+		t.Errorf("Union should not reverse-intersect with anything in disjoint unpaired")
+	}
+
+	if uv1.Intersect(v5) != none {
+		t.Errorf("Union should not intersect with anything in disjoint pair")
+	}
+	if v5.Intersect(uv1) != none {
+		t.Errorf("Union should not reverse-intersect with anything in disjoint pair")
+	}
+
+	if uv1.Intersect(c1) != v4 {
+		t.Errorf("Union intersecting with semver range should return 1.0.1 version, got %s", uv1.Intersect(c1))
+	}
+	if c1.Intersect(uv1) != v4 {
+		t.Errorf("Union reverse-intersecting with semver range should return 1.0.1 version, got %s", c1.Intersect(uv1))
+	}
+
+	if uv1.Intersect(c2) != none {
+		t.Errorf("Union intersecting with non-overlapping semver range should return none, got %s", uv1.Intersect(c2))
+	}
+	if c2.Intersect(uv1) != none {
+		t.Errorf("Union reverse-intersecting with non-overlapping semver range should return none, got %s", uv1.Intersect(c2))
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/constraints.go b/vendor/github.com/sdboyer/gps/constraints.go
new file mode 100644
index 0000000..43b8b09
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/constraints.go
@@ -0,0 +1,166 @@
+package gps
+
+import (
+	"fmt"
+
+	"github.com/Masterminds/semver"
+)
+
+var (
+	none = noneConstraint{}
+	any  = anyConstraint{}
+)
+
+// A Constraint provides structured limitations on the versions that are
+// admissible for a given project.
+//
+// As with Version, it has a private method because the gps's internal
+// implementation of the problem is complete, and the system relies on type
+// magic to operate.
+type Constraint interface {
+	fmt.Stringer
+	// Matches indicates if the provided Version is allowed by the Constraint.
+	Matches(Version) bool
+	// MatchesAny indicates if the intersection of the Constraint with the
+	// provided Constraint would yield a Constraint that could allow *any*
+	// Version.
+	MatchesAny(Constraint) bool
+	// Intersect computes the intersection of the Constraint with the provided
+	// Constraint.
+	Intersect(Constraint) Constraint
+	_private()
+}
+
+func (semverConstraint) _private() {}
+func (anyConstraint) _private()    {}
+func (noneConstraint) _private()   {}
+
+// NewSemverConstraint attempts to construct a semver Constraint object from the
+// input string.
+//
+// If the input string cannot be made into a valid semver Constraint, an error
+// is returned.
+func NewSemverConstraint(body string) (Constraint, error) {
+	c, err := semver.NewConstraint(body)
+	if err != nil {
+		return nil, err
+	}
+	return semverConstraint{c: c}, nil
+}
+
+type semverConstraint struct {
+	c semver.Constraint
+}
+
+func (c semverConstraint) String() string {
+	return c.c.String()
+}
+
+func (c semverConstraint) Matches(v Version) bool {
+	switch tv := v.(type) {
+	case versionTypeUnion:
+		for _, elem := range tv {
+			if c.Matches(elem) {
+				return true
+			}
+		}
+	case semVersion:
+		return c.c.Matches(tv.sv) == nil
+	case versionPair:
+		if tv2, ok := tv.v.(semVersion); ok {
+			return c.c.Matches(tv2.sv) == nil
+		}
+	}
+
+	return false
+}
+
+func (c semverConstraint) MatchesAny(c2 Constraint) bool {
+	return c.Intersect(c2) != none
+}
+
+func (c semverConstraint) Intersect(c2 Constraint) Constraint {
+	switch tc := c2.(type) {
+	case anyConstraint:
+		return c
+	case versionTypeUnion:
+		for _, elem := range tc {
+			if rc := c.Intersect(elem); rc != none {
+				return rc
+			}
+		}
+	case semverConstraint:
+		rc := c.c.Intersect(tc.c)
+		if !semver.IsNone(rc) {
+			return semverConstraint{c: rc}
+		}
+	case semVersion:
+		rc := c.c.Intersect(tc.sv)
+		if !semver.IsNone(rc) {
+			// If single version intersected with constraint, we know the result
+			// must be the single version, so just return it back out
+			return c2
+		}
+	case versionPair:
+		if tc2, ok := tc.v.(semVersion); ok {
+			rc := c.c.Intersect(tc2.sv)
+			if !semver.IsNone(rc) {
+				// same reasoning as previous case
+				return c2
+			}
+		}
+	}
+
+	return none
+}
+
+// IsAny indicates if the provided constraint is the wildcard "Any" constraint.
+func IsAny(c Constraint) bool {
+	_, ok := c.(anyConstraint)
+	return ok
+}
+
+// Any returns a constraint that will match anything.
+func Any() Constraint {
+	return anyConstraint{}
+}
+
+// anyConstraint is an unbounded constraint - it matches all other types of
+// constraints. It mirrors the behavior of the semver package's any type.
+type anyConstraint struct{}
+
+func (anyConstraint) String() string {
+	return "*"
+}
+
+func (anyConstraint) Matches(Version) bool {
+	return true
+}
+
+func (anyConstraint) MatchesAny(Constraint) bool {
+	return true
+}
+
+func (anyConstraint) Intersect(c Constraint) Constraint {
+	return c
+}
+
+// noneConstraint is the empty set - it matches no versions. It mirrors the
+// behavior of the semver package's none type.
+type noneConstraint struct{}
+
+func (noneConstraint) String() string {
+	return ""
+}
+
+func (noneConstraint) Matches(Version) bool {
+	return false
+}
+
+func (noneConstraint) MatchesAny(Constraint) bool {
+	return false
+}
+
+func (noneConstraint) Intersect(Constraint) Constraint {
+	return none
+}
diff --git a/vendor/github.com/sdboyer/gps/discovery.go b/vendor/github.com/sdboyer/gps/discovery.go
new file mode 100644
index 0000000..8da4a66
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/discovery.go
@@ -0,0 +1,83 @@
+// Copyright 2012 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gps
+
+// This code is taken from cmd/go/discovery.go; it is the logic go get itself
+// uses to interpret meta imports information.
+
+import (
+	"encoding/xml"
+	"fmt"
+	"io"
+	"strings"
+)
+
+// charsetReader returns a reader for the given charset. Currently
+// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
+// error which is printed by go get, so the user can find why the package
+// wasn't downloaded if the encoding is not supported. Note that, in
+// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
+// greater than 0x7f are not rejected).
+func charsetReader(charset string, input io.Reader) (io.Reader, error) {
+	switch strings.ToLower(charset) {
+	case "ascii":
+		return input, nil
+	default:
+		return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
+	}
+}
+
+type metaImport struct {
+	Prefix, VCS, RepoRoot string
+}
+
+// parseMetaGoImports returns meta imports from the HTML in r.
+// Parsing ends at the end of the <head> section or the beginning of the <body>.
+func parseMetaGoImports(r io.Reader) (imports []metaImport, err error) {
+	d := xml.NewDecoder(r)
+	d.CharsetReader = charsetReader
+	d.Strict = false
+	var t xml.Token
+	for {
+		t, err = d.RawToken()
+		if err != nil {
+			if err == io.EOF || len(imports) > 0 {
+				err = nil
+			}
+			return
+		}
+		if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
+			return
+		}
+		if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
+			return
+		}
+		e, ok := t.(xml.StartElement)
+		if !ok || !strings.EqualFold(e.Name.Local, "meta") {
+			continue
+		}
+		if attrValue(e.Attr, "name") != "go-import" {
+			continue
+		}
+		if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 {
+			imports = append(imports, metaImport{
+				Prefix:   f[0],
+				VCS:      f[1],
+				RepoRoot: f[2],
+			})
+		}
+	}
+}
+
+// attrValue returns the attribute value for the case-insensitive key
+// `name', or the empty string if nothing is found.
+func attrValue(attrs []xml.Attr, name string) string {
+	for _, a := range attrs {
+		if strings.EqualFold(a.Name.Local, name) {
+			return a.Value
+		}
+	}
+	return ""
+}
diff --git a/vendor/github.com/sdboyer/gps/errors.go b/vendor/github.com/sdboyer/gps/errors.go
new file mode 100644
index 0000000..26c8413
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/errors.go
@@ -0,0 +1,405 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+)
+
+type errorLevel uint8
+
+// TODO(sdboyer) consistent, sensible way of handling 'type' and 'severity' - or figure
+// out that they're not orthogonal and collapse into just 'type'
+
+const (
+	warning errorLevel = 1 << iota
+	mustResolve
+	cannotResolve
+)
+
+type traceError interface {
+	traceString() string
+}
+
+type solveError struct {
+	lvl errorLevel
+	msg string
+}
+
+func newSolveError(msg string, lvl errorLevel) error {
+	return &solveError{msg: msg, lvl: lvl}
+}
+
+func (e *solveError) Error() string {
+	return e.msg
+}
+
+type noVersionError struct {
+	pn    ProjectIdentifier
+	fails []failedVersion
+}
+
+func (e *noVersionError) Error() string {
+	if len(e.fails) == 0 {
+		return fmt.Sprintf("No versions found for project %q.", e.pn.ProjectRoot)
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot)
+	for _, f := range e.fails {
+		fmt.Fprintf(&buf, "\n\t%s: %s", f.v, f.f.Error())
+	}
+
+	return buf.String()
+}
+
+func (e *noVersionError) traceString() string {
+	if len(e.fails) == 0 {
+		return fmt.Sprintf("No versions found")
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot)
+	for _, f := range e.fails {
+		if te, ok := f.f.(traceError); ok {
+			fmt.Fprintf(&buf, "\n  %s: %s", f.v, te.traceString())
+		} else {
+			fmt.Fprintf(&buf, "\n  %s: %s", f.v, f.f.Error())
+		}
+	}
+
+	return buf.String()
+}
+
+type disjointConstraintFailure struct {
+	goal      dependency
+	failsib   []dependency
+	nofailsib []dependency
+	c         Constraint
+}
+
+func (e *disjointConstraintFailure) Error() string {
+	if len(e.failsib) == 1 {
+		str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which has no overlap with existing constraint %s from %s at %s"
+		return fmt.Sprintf(str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String(), e.failsib[0].dep.Constraint.String(), e.failsib[0].depender.id.errString(), e.failsib[0].depender.v)
+	}
+
+	var buf bytes.Buffer
+
+	var sibs []dependency
+	if len(e.failsib) > 1 {
+		sibs = e.failsib
+
+		str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which has no overlap with the following existing constraints:\n"
+		fmt.Fprintf(&buf, str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
+	} else {
+		sibs = e.nofailsib
+
+		str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which does not overlap with the intersection of existing constraints from other currently selected packages:\n"
+		fmt.Fprintf(&buf, str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
+	}
+
+	for _, c := range sibs {
+		fmt.Fprintf(&buf, "\t%s from %s at %s\n", c.dep.Constraint.String(), c.depender.id.errString(), c.depender.v)
+	}
+
+	return buf.String()
+}
+
+func (e *disjointConstraintFailure) traceString() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "constraint %s on %s disjoint with other dependers:\n", e.goal.dep.Constraint.String(), e.goal.dep.Ident.errString())
+	for _, f := range e.failsib {
+		fmt.Fprintf(&buf, "%s from %s at %s (no overlap)\n", f.dep.Constraint.String(), f.depender.id.ProjectRoot, f.depender.v)
+	}
+	for _, f := range e.nofailsib {
+		fmt.Fprintf(&buf, "%s from %s at %s (some overlap)\n", f.dep.Constraint.String(), f.depender.id.ProjectRoot, f.depender.v)
+	}
+
+	return buf.String()
+}
+
+// Indicates that an atom could not be introduced because one of its dep
+// constraints does not admit the currently-selected version of the target
+// project.
+type constraintNotAllowedFailure struct {
+	goal dependency
+	v    Version
+}
+
+func (e *constraintNotAllowedFailure) Error() string {
+	str := "Could not introduce %s at %s, as it has a dependency on %s with constraint %s, which does not allow the currently selected version of %s"
+	return fmt.Sprintf(str, e.goal.depender.id.errString(), e.goal.depender.v, e.goal.dep.Ident.errString(), e.goal.dep.Constraint, e.v)
+}
+
+func (e *constraintNotAllowedFailure) traceString() string {
+	str := "%s at %s depends on %s with %s, but that's already selected at %s"
+	return fmt.Sprintf(str, e.goal.depender.id.ProjectRoot, e.goal.depender.v, e.goal.dep.Ident.ProjectRoot, e.goal.dep.Constraint, e.v)
+}
+
+type versionNotAllowedFailure struct {
+	goal       atom
+	failparent []dependency
+	c          Constraint
+}
+
+func (e *versionNotAllowedFailure) Error() string {
+	if len(e.failparent) == 1 {
+		str := "Could not introduce %s at %s, as it is not allowed by constraint %s from project %s."
+		return fmt.Sprintf(str, e.goal.id.errString(), e.goal.v, e.failparent[0].dep.Constraint.String(), e.failparent[0].depender.id.errString())
+	}
+
+	var buf bytes.Buffer
+
+	str := "Could not introduce %s at %s, as it is not allowed by constraints from the following projects:\n"
+	fmt.Fprintf(&buf, str, e.goal.id.errString(), e.goal.v)
+
+	for _, f := range e.failparent {
+		fmt.Fprintf(&buf, "\t%s from %s at %s\n", f.dep.Constraint.String(), f.depender.id.errString(), f.depender.v)
+	}
+
+	return buf.String()
+}
+
+func (e *versionNotAllowedFailure) traceString() string {
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "%s at %s not allowed by constraint %s:\n", e.goal.id.ProjectRoot, e.goal.v, e.c.String())
+	for _, f := range e.failparent {
+		fmt.Fprintf(&buf, "  %s from %s at %s\n", f.dep.Constraint.String(), f.depender.id.ProjectRoot, f.depender.v)
+	}
+
+	return buf.String()
+}
+
+type missingSourceFailure struct {
+	goal ProjectIdentifier
+	prob string
+}
+
+func (e *missingSourceFailure) Error() string {
+	return fmt.Sprintf(e.prob, e.goal)
+}
+
+type badOptsFailure string
+
+func (e badOptsFailure) Error() string {
+	return string(e)
+}
+
+type sourceMismatchFailure struct {
+	shared            ProjectRoot
+	sel               []dependency
+	current, mismatch string
+	prob              atom
+}
+
+func (e *sourceMismatchFailure) Error() string {
+	var cur []string
+	for _, c := range e.sel {
+		cur = append(cur, string(c.depender.id.ProjectRoot))
+	}
+
+	str := "Could not introduce %s at %s, as it depends on %s from %s, but %s is already marked as coming from %s by %s"
+	return fmt.Sprintf(str, e.prob.id.errString(), e.prob.v, e.shared, e.mismatch, e.shared, e.current, strings.Join(cur, ", "))
+}
+
+func (e *sourceMismatchFailure) traceString() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "disagreement on network addr for %s:\n", e.shared)
+
+	fmt.Fprintf(&buf, "  %s from %s\n", e.mismatch, e.prob.id.errString())
+	for _, dep := range e.sel {
+		fmt.Fprintf(&buf, "  %s from %s\n", e.current, dep.depender.id.errString())
+	}
+
+	return buf.String()
+}
+
+type errDeppers struct {
+	err     error
+	deppers []atom
+}
+type checkeeHasProblemPackagesFailure struct {
+	goal    atom
+	failpkg map[string]errDeppers
+}
+
+func (e *checkeeHasProblemPackagesFailure) Error() string {
+	var buf bytes.Buffer
+	indent := ""
+
+	if len(e.failpkg) > 1 {
+		indent = "\t"
+		fmt.Fprintf(
+			&buf, "Could not introduce %s at %s due to multiple problematic subpackages:\n",
+			e.goal.id.errString(),
+			e.goal.v,
+		)
+	}
+
+	for pkg, errdep := range e.failpkg {
+		var cause string
+		if errdep.err == nil {
+			cause = "is missing"
+		} else {
+			cause = fmt.Sprintf("does not contain usable Go code (%T).", errdep.err)
+		}
+
+		if len(e.failpkg) == 1 {
+			fmt.Fprintf(
+				&buf, "Could not introduce %s at %s, as its subpackage %s %s.",
+				e.goal.id.errString(),
+				e.goal.v,
+				pkg,
+				cause,
+			)
+		} else {
+			fmt.Fprintf(&buf, "\tSubpackage %s %s.", pkg, cause)
+		}
+
+		if len(errdep.deppers) == 1 {
+			fmt.Fprintf(
+				&buf, " (Package is required by %s at %s.)",
+				errdep.deppers[0].id.errString(),
+				errdep.deppers[0].v,
+			)
+		} else {
+			fmt.Fprintf(&buf, " Package is required by:")
+			for _, pa := range errdep.deppers {
+				fmt.Fprintf(&buf, "\n%s\t%s at %s", indent, pa.id.errString(), pa.v)
+			}
+		}
+	}
+
+	return buf.String()
+}
+
+func (e *checkeeHasProblemPackagesFailure) traceString() string {
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "%s at %s has problem subpkg(s):\n", e.goal.id.ProjectRoot, e.goal.v)
+	for pkg, errdep := range e.failpkg {
+		if errdep.err == nil {
+			fmt.Fprintf(&buf, "\t%s is missing; ", pkg)
+		} else {
+			fmt.Fprintf(&buf, "\t%s has err (%T); ", pkg, errdep.err)
+		}
+
+		if len(errdep.deppers) == 1 {
+			fmt.Fprintf(
+				&buf, "required by %s at %s.",
+				errdep.deppers[0].id.errString(),
+				errdep.deppers[0].v,
+			)
+		} else {
+			fmt.Fprintf(&buf, " required by:")
+			for _, pa := range errdep.deppers {
+				fmt.Fprintf(&buf, "\n\t\t%s at %s", pa.id.errString(), pa.v)
+			}
+		}
+	}
+
+	return buf.String()
+}
+
+type depHasProblemPackagesFailure struct {
+	goal dependency
+	v    Version
+	pl   []string
+	prob map[string]error
+}
+
+func (e *depHasProblemPackagesFailure) Error() string {
+	fcause := func(pkg string) string {
+		var cause string
+		if err, has := e.prob[pkg]; has {
+			cause = fmt.Sprintf("does not contain usable Go code (%T).", err)
+		} else {
+			cause = "is missing."
+		}
+		return cause
+	}
+
+	if len(e.pl) == 1 {
+		return fmt.Sprintf(
+			"Could not introduce %s at %s, as it requires package %s from %s, but in version %s that package %s",
+			e.goal.depender.id.errString(),
+			e.goal.depender.v,
+			e.pl[0],
+			e.goal.dep.Ident.errString(),
+			e.v,
+			fcause(e.pl[0]),
+		)
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(
+		&buf, "Could not introduce %s at %s, as it requires problematic packages from %s (current version %s):",
+		e.goal.depender.id.errString(),
+		e.goal.depender.v,
+		e.goal.dep.Ident.errString(),
+		e.v,
+	)
+
+	for _, pkg := range e.pl {
+		fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg))
+	}
+
+	return buf.String()
+}
+
+func (e *depHasProblemPackagesFailure) traceString() string {
+	var buf bytes.Buffer
+	fcause := func(pkg string) string {
+		var cause string
+		if err, has := e.prob[pkg]; has {
+			cause = fmt.Sprintf("has parsing err (%T).", err)
+		} else {
+			cause = "is missing"
+		}
+		return cause
+	}
+
+	fmt.Fprintf(
+		&buf, "%s at %s depping on %s at %s has problem subpkg(s):",
+		e.goal.depender.id.errString(),
+		e.goal.depender.v,
+		e.goal.dep.Ident.errString(),
+		e.v,
+	)
+
+	for _, pkg := range e.pl {
+		fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg))
+	}
+
+	return buf.String()
+}
+
+// nonexistentRevisionFailure indicates that a revision constraint was specified
+// for a given project, but that that revision does not exist in the source
+// repository.
+type nonexistentRevisionFailure struct {
+	goal dependency
+	r    Revision
+}
+
+func (e *nonexistentRevisionFailure) Error() string {
+	return fmt.Sprintf(
+		"Could not introduce %s at %s, as it requires %s at revision %s, but that revision does not exist",
+		e.goal.depender.id.errString(),
+		e.goal.depender.v,
+		e.goal.dep.Ident.errString(),
+		e.r,
+	)
+}
+
+func (e *nonexistentRevisionFailure) traceString() string {
+	return fmt.Sprintf(
+		"%s at %s wants missing rev %s of %s",
+		e.goal.depender.id.errString(),
+		e.goal.depender.v,
+		e.r,
+		e.goal.dep.Ident.errString(),
+	)
+}
diff --git a/vendor/github.com/sdboyer/gps/example.go b/vendor/github.com/sdboyer/gps/example.go
new file mode 100644
index 0000000..1a5a31a
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/example.go
@@ -0,0 +1,58 @@
+// +build ignore
+
+package main
+
+import (
+	"go/build"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+
+	gps "github.com/sdboyer/gps"
+)
+
+// This is probably the simplest possible implementation of gps. It does the
+// substantive work that `go get` does, except:
+//  1. It drops the resulting tree into vendor instead of GOPATH
+//  2. It prefers semver tags (if available) over branches
+//  3. It removes any vendor directories nested within dependencies
+//
+//  This will compile and work...and then blow away the vendor directory present
+//  in the cwd, if any. Be careful!
+func main() {
+	// Operate on the current directory
+	root, _ := os.Getwd()
+	// Assume the current directory is correctly placed on a GOPATH, and derive
+	// the ProjectRoot from it
+	srcprefix := filepath.Join(build.Default.GOPATH, "src") + string(filepath.Separator)
+	importroot := filepath.ToSlash(strings.TrimPrefix(root, srcprefix))
+
+	// Set up params, including tracing
+	params := gps.SolveParameters{
+		RootDir:     root,
+		ImportRoot:  gps.ProjectRoot(importroot),
+		Trace:       true,
+		TraceLogger: log.New(os.Stdout, "", 0),
+	}
+
+	// Set up a SourceManager with the NaiveAnalyzer
+	sourcemgr, _ := gps.NewSourceManager(NaiveAnalyzer{}, ".repocache", false)
+	defer sourcemgr.Release()
+
+	// Prep and run the solver
+	solver, _ := gps.Prepare(params, sourcemgr)
+	solution, err := solver.Solve()
+	if err == nil {
+		// If no failure, blow away the vendor dir and write a new one out,
+		// stripping nested vendor directories as we go.
+		os.RemoveAll(filepath.Join(root, "vendor"))
+		gps.CreateVendorTree(filepath.Join(root, "vendor"), solution, sourcemgr, true)
+	}
+}
+
+type NaiveAnalyzer struct{}
+
+func (a NaiveAnalyzer) GetInfo(path string, n gps.ProjectRoot) (gps.Manifest, gps.Lock, error) {
+	return nil, nil, nil
+}
diff --git a/vendor/github.com/sdboyer/gps/flags.go b/vendor/github.com/sdboyer/gps/flags.go
new file mode 100644
index 0000000..a7172c1
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/flags.go
@@ -0,0 +1,37 @@
+package gps
+
+// projectExistence values represent the extent to which a project "exists."
+type projectExistence uint8
+
+const (
+	// ExistsInVendorRoot indicates that a project exists in a vendor directory
+	// at the predictable location based on import path. It does NOT imply, much
+	// less guarantee, any of the following:
+	//   - That the code at the expected location under vendor is at the version
+	//   given in a lock file
+	//   - That the code at the expected location under vendor is from the
+	//   expected upstream project at all
+	//   - That, if this flag is not present, the project does not exist at some
+	//   unexpected/nested location under vendor
+	//   - That the full repository history is available. In fact, the
+	//   assumption should be that if only this flag is on, the full repository
+	//   history is likely not available (locally)
+	//
+	// In short, the information encoded in this flag should not be construed as
+	// exhaustive.
+	existsInVendorRoot projectExistence = 1 << iota
+
+	// ExistsInCache indicates that a project exists on-disk in the local cache.
+	// It does not guarantee that an upstream exists, thus it cannot imply
+	// that the cache is at all correct - up-to-date, or even of the expected
+	// upstream project repository.
+	//
+	// Additionally, this refers only to the existence of the local repository
+	// itself; it says nothing about the existence or completeness of the
+	// separate metadata cache.
+	existsInCache
+
+	// ExistsUpstream indicates that a project repository was locatable at the
+	// path provided by a project's URI (a base import path).
+	existsUpstream
+)
diff --git a/vendor/github.com/sdboyer/gps/glide.lock b/vendor/github.com/sdboyer/gps/glide.lock
new file mode 100644
index 0000000..ea36f4b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/glide.lock
@@ -0,0 +1,19 @@
+hash: 2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e
+updated: 2016-06-06T22:10:37.696580463-04:00
+imports:
+- name: github.com/armon/go-radix
+  version: 4239b77079c7b5d1243b7b4736304ce8ddb6f0f2
+- name: github.com/hashicorp/go-immutable-radix
+  version: 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
+- name: github.com/hashicorp/golang-lru
+  version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+- name: github.com/Masterminds/semver
+  version: 0a2c9fc0eee2c4cbb9526877c4a54da047fdcadd
+  vcs: git
+- name: github.com/Masterminds/vcs
+  version: 7a21de0acff824ccf45f633cc844a19625149c2f
+  vcs: git
+- name: github.com/termie/go-shutil
+  version: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
+  vcs: git
+devImports: []
diff --git a/vendor/github.com/sdboyer/gps/glide.yaml b/vendor/github.com/sdboyer/gps/glide.yaml
new file mode 100644
index 0000000..690f9e1
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/glide.yaml
@@ -0,0 +1,14 @@
+package: github.com/sdboyer/gps
+owners:
+- name: Sam Boyer
+  email: tech@samboyer.org
+import:
+- package: github.com/Masterminds/semver
+  branch: 2.x
+  vcs: git
+- package: github.com/Masterminds/vcs
+  vcs: git
+- package: github.com/termie/go-shutil
+  version: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
+  vcs: git
+- package: github.com/armon/go-radix
diff --git a/vendor/github.com/sdboyer/gps/hash.go b/vendor/github.com/sdboyer/gps/hash.go
new file mode 100644
index 0000000..9e27bcd
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/hash.go
@@ -0,0 +1,104 @@
+package gps
+
+import (
+	"crypto/sha256"
+	"fmt"
+	"sort"
+)
+
+// HashInputs computes a hash digest of all data in a SolveOpts that are as
+// function inputs to Solve().
+//
+// The digest returned from this function is the same as the digest that would
+// be included with a Solve() Result. As such, it's appropriate for comparison
+// against the digest stored in a lock file, generated by a previous Solve(): if
+// the digests match, then manifest and lock are in sync, and a Solve() is
+// unnecessary.
+//
+// (Basically, this is for memoization.)
+func (s *solver) HashInputs() ([]byte, error) {
+	// Do these checks up front before any other work is needed, as they're the
+	// only things that can cause errors
+	// Pass in magic root values, and the bridge will analyze the right thing
+	ptree, err := s.b.listPackages(ProjectIdentifier{ProjectRoot: s.params.ImportRoot}, nil)
+	if err != nil {
+		return nil, badOptsFailure(fmt.Sprintf("Error while parsing packages under %s: %s", s.params.RootDir, err.Error()))
+	}
+
+	d, dd := s.params.Manifest.DependencyConstraints(), s.params.Manifest.TestDependencyConstraints()
+	p := make(sortedDeps, len(d))
+	copy(p, d)
+	p = append(p, dd...)
+
+	sort.Stable(p)
+
+	// We have everything we need; now, compute the hash.
+	h := sha256.New()
+	for _, pd := range p {
+		h.Write([]byte(pd.Ident.ProjectRoot))
+		h.Write([]byte(pd.Ident.NetworkName))
+		// FIXME Constraint.String() is a surjective-only transformation - tags
+		// and branches with the same name are written out as the same string.
+		// This could, albeit rarely, result in input collisions when a real
+		// change has occurred.
+		h.Write([]byte(pd.Constraint.String()))
+	}
+
+	// The stdlib and old appengine packages play the same functional role in
+	// solving as ignores. Because they change, albeit quite infrequently, we
+	// have to include them in the hash.
+	h.Write([]byte(stdlibPkgs))
+	h.Write([]byte(appenginePkgs))
+
+	// Write each of the packages, or the errors that were found for a
+	// particular subpath, into the hash.
+	for _, perr := range ptree.Packages {
+		if perr.Err != nil {
+			h.Write([]byte(perr.Err.Error()))
+		} else {
+			h.Write([]byte(perr.P.Name))
+			h.Write([]byte(perr.P.CommentPath))
+			h.Write([]byte(perr.P.ImportPath))
+			for _, imp := range perr.P.Imports {
+				h.Write([]byte(imp))
+			}
+			for _, imp := range perr.P.TestImports {
+				h.Write([]byte(imp))
+			}
+		}
+	}
+
+	// Add the package ignores, if any.
+	if len(s.ig) > 0 {
+		// Dump and sort the ignores
+		ig := make([]string, len(s.ig))
+		k := 0
+		for pkg := range s.ig {
+			ig[k] = pkg
+			k++
+		}
+		sort.Strings(ig)
+
+		for _, igp := range ig {
+			h.Write([]byte(igp))
+		}
+	}
+
+	// TODO(sdboyer) overrides
+	// TODO(sdboyer) aliases
+	return h.Sum(nil), nil
+}
+
+type sortedDeps []ProjectConstraint
+
+func (s sortedDeps) Len() int {
+	return len(s)
+}
+
+func (s sortedDeps) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s sortedDeps) Less(i, j int) bool {
+	return s[i].Ident.less(s[j].Ident)
+}
diff --git a/vendor/github.com/sdboyer/gps/hash_test.go b/vendor/github.com/sdboyer/gps/hash_test.go
new file mode 100644
index 0000000..dc27ddf
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/hash_test.go
@@ -0,0 +1,35 @@
+package gps
+
+import (
+	"bytes"
+	"crypto/sha256"
+	"testing"
+)
+
+func TestHashInputs(t *testing.T) {
+	fix := basicFixtures["shared dependency with overlapping constraints"]
+
+	params := SolveParameters{
+		RootDir:    string(fix.ds[0].n),
+		ImportRoot: fix.ds[0].n,
+		Manifest:   fix.ds[0],
+		Ignore:     []string{"foo", "bar"},
+	}
+
+	s, err := Prepare(params, newdepspecSM(fix.ds, nil))
+
+	dig, err := s.HashInputs()
+	if err != nil {
+		t.Fatalf("HashInputs returned unexpected err: %s", err)
+	}
+
+	h := sha256.New()
+	for _, v := range []string{"a", "a", "1.0.0", "b", "b", "1.0.0", stdlibPkgs, appenginePkgs, "root", "", "root", "a", "b", "bar", "foo"} {
+		h.Write([]byte(v))
+	}
+	correct := h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/import_mode_go15.go b/vendor/github.com/sdboyer/gps/import_mode_go15.go
new file mode 100644
index 0000000..5ef11c2
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/import_mode_go15.go
@@ -0,0 +1,13 @@
+// +build !go1.6
+
+package gps
+
+import "go/build"
+
+// analysisImportMode returns the import mode used for build.Import() calls for
+// standard package analysis.
+//
+// build.NoVendor was added in go1.6, so we have to omit it here.
+func analysisImportMode() build.ImportMode {
+	return build.ImportComment
+}
diff --git a/vendor/github.com/sdboyer/gps/import_mode_go16.go b/vendor/github.com/sdboyer/gps/import_mode_go16.go
new file mode 100644
index 0000000..edb534a
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/import_mode_go16.go
@@ -0,0 +1,11 @@
+// +build go1.6
+
+package gps
+
+import "go/build"
+
+// analysisImportMode returns the import mode used for build.Import() calls for
+// standard package analysis.
+func analysisImportMode() build.ImportMode {
+	return build.ImportComment | build.IgnoreVendor
+}
diff --git a/vendor/github.com/sdboyer/gps/lock.go b/vendor/github.com/sdboyer/gps/lock.go
new file mode 100644
index 0000000..1d4db56
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/lock.go
@@ -0,0 +1,161 @@
+package gps
+
+// Lock represents data from a lock file (or however the implementing tool
+// chooses to store it) at a particular version that is relevant to the
+// satisfiability solving process.
+//
+// In general, the information produced by gps on finding a successful
+// solution is all that would be necessary to constitute a lock file, though
+// tools can include whatever other information they want in their storage.
+type Lock interface {
+	// Indicates the version of the solver used to generate this lock data
+	//SolverVersion() string
+
+	// The hash of inputs to gps that resulted in this lock data
+	InputHash() []byte
+
+	// Projects returns the list of LockedProjects contained in the lock data.
+	Projects() []LockedProject
+}
+
+// LockedProject is a single project entry from a lock file. It expresses the
+// project's name, one or both of version and underlying revision, the network
+// URI for accessing it, the path at which it should be placed within a vendor
+// directory, and the packages that are used in it.
+type LockedProject struct {
+	pi   ProjectIdentifier
+	v    UnpairedVersion
+	r    Revision
+	pkgs []string
+}
+
+// SimpleLock is a helper for tools to easily describe lock data when they know
+// that no hash, or other complex information, is available.
+type SimpleLock []LockedProject
+
+var _ Lock = SimpleLock{}
+
+// InputHash always returns an empty string for SimpleLock. This makes it useless
+// as a stable lock to be written to disk, but still useful for some ephemeral
+// purposes.
+func (SimpleLock) InputHash() []byte {
+	return nil
+}
+
+// Projects returns the entire contents of the SimpleLock.
+func (l SimpleLock) Projects() []LockedProject {
+	return l
+}
+
+// NewLockedProject creates a new LockedProject struct with a given name,
+// version, and upstream repository URL.
+//
+// Note that passing a nil version will cause a panic. This is a correctness
+// measure to ensure that the solver is never exposed to a version-less lock
+// entry. Such a case would be meaningless - the solver would have no choice but
+// to simply dismiss that project. By creating a hard failure case via panic
+// instead, we are trying to avoid inflicting the resulting pain on the user by
+// instead forcing a decision on the Analyzer implementation.
+func NewLockedProject(n ProjectRoot, v Version, url string, pkgs []string) LockedProject {
+	if v == nil {
+		panic("must provide a non-nil version to create a LockedProject")
+	}
+
+	lp := LockedProject{
+		pi: ProjectIdentifier{
+			ProjectRoot: n,
+			NetworkName: url,
+		},
+		pkgs: pkgs,
+	}
+
+	switch tv := v.(type) {
+	case Revision:
+		lp.r = tv
+	case branchVersion:
+		lp.v = tv
+	case semVersion:
+		lp.v = tv
+	case plainVersion:
+		lp.v = tv
+	case versionPair:
+		lp.r = tv.r
+		lp.v = tv.v
+	}
+
+	return lp
+}
+
+// Ident returns the identifier describing the project. This includes both the
+// local name (the root name by which the project is referenced in import paths)
+// and the network name, where the upstream source lives.
+func (lp LockedProject) Ident() ProjectIdentifier {
+	return lp.pi
+}
+
+// Version assembles together whatever version and/or revision data is
+// available into a single Version.
+func (lp LockedProject) Version() Version {
+	if lp.r == "" {
+		return lp.v
+	}
+
+	if lp.v == nil {
+		return lp.r
+	}
+
+	return lp.v.Is(lp.r)
+}
+
+func (lp LockedProject) toAtom() atom {
+	pa := atom{
+		id: lp.Ident(),
+	}
+
+	if lp.v == nil {
+		pa.v = lp.r
+	} else if lp.r != "" {
+		pa.v = lp.v.Is(lp.r)
+	} else {
+		pa.v = lp.v
+	}
+
+	return pa
+}
+
+type safeLock struct {
+	h []byte
+	p []LockedProject
+}
+
+func (sl safeLock) InputHash() []byte {
+	return sl.h
+}
+
+func (sl safeLock) Projects() []LockedProject {
+	return sl.p
+}
+
+// prepLock ensures a lock is prepared and safe for use by the solver.
+// This entails two things:
+//
+//  * Ensuring that all LockedProject's identifiers are normalized.
+//  * Defensively ensuring that no outside routine can modify the lock while the
+//  solver is in-flight.
+//
+// This is achieved by copying the lock's data into a new safeLock.
+func prepLock(l Lock) Lock {
+	pl := l.Projects()
+
+	rl := safeLock{
+		h: l.InputHash(),
+		p: make([]LockedProject, len(pl)),
+	}
+
+	for k, lp := range pl {
+		lp.pi = lp.pi.normalize()
+		rl.p[k] = lp
+	}
+
+	return rl
+}
diff --git a/vendor/github.com/sdboyer/gps/manager_test.go b/vendor/github.com/sdboyer/gps/manager_test.go
new file mode 100644
index 0000000..ebc8091
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/manager_test.go
@@ -0,0 +1,342 @@
+package gps
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"runtime"
+	"sort"
+	"testing"
+
+	"github.com/Masterminds/semver"
+)
+
+var bd string
+
+// An analyzer that passes nothing back, but doesn't error. This is the naive
+// case - no constraints, no lock, and no errors. The SourceMgr will interpret
+// this as open/Any constraints on everything in the import graph.
+type naiveAnalyzer struct{}
+
+func (naiveAnalyzer) GetInfo(string, ProjectRoot) (Manifest, Lock, error) {
+	return nil, nil, nil
+}
+
+func sv(s string) *semver.Version {
+	sv, err := semver.NewVersion(s)
+	if err != nil {
+		panic(fmt.Sprintf("Error creating semver from %q: %s", s, err))
+	}
+
+	return sv
+}
+
+func init() {
+	_, filename, _, _ := runtime.Caller(1)
+	bd = path.Dir(filename)
+}
+
+func TestSourceManagerInit(t *testing.T) {
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	_, err = NewSourceManager(naiveAnalyzer{}, cpath, false)
+
+	if err != nil {
+		t.Errorf("Unexpected error on SourceManager creation: %s", err)
+	}
+	defer func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}()
+
+	_, err = NewSourceManager(naiveAnalyzer{}, cpath, false)
+	if err == nil {
+		t.Errorf("Creating second SourceManager should have failed due to file lock contention")
+	}
+
+	sm, err := NewSourceManager(naiveAnalyzer{}, cpath, true)
+	defer sm.Release()
+	if err != nil {
+		t.Errorf("Creating second SourceManager should have succeeded when force flag was passed, but failed with err %s", err)
+	}
+
+	if _, err = os.Stat(path.Join(cpath, "sm.lock")); err != nil {
+		t.Errorf("Global cache lock file not created correctly")
+	}
+}
+
+func TestProjectManagerInit(t *testing.T) {
+	// This test is a bit slow, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping project manager init test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	sm, err := NewSourceManager(naiveAnalyzer{}, cpath, false)
+
+	if err != nil {
+		t.Errorf("Unexpected error on SourceManager creation: %s", err)
+		t.FailNow()
+	}
+	defer func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}()
+	defer sm.Release()
+
+	pn := ProjectRoot("github.com/Masterminds/VCSTestRepo")
+	v, err := sm.ListVersions(pn)
+	if err != nil {
+		t.Errorf("Unexpected error during initial project setup/fetching %s", err)
+	}
+
+	if len(v) != 3 {
+		t.Errorf("Expected three version results from the test repo, got %v", len(v))
+	} else {
+		rev := Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")
+		expected := []Version{
+			NewVersion("1.0.0").Is(rev),
+			NewBranch("master").Is(rev),
+			NewBranch("test").Is(rev),
+		}
+
+		// SourceManager itself doesn't guarantee ordering; sort them here so we
+		// can dependably check output
+		sort.Sort(upgradeVersionSorter(v))
+
+		for k, e := range expected {
+			if v[k] != e {
+				t.Errorf("Expected version %s in position %v but got %s", e, k, v[k])
+			}
+		}
+	}
+
+	// Two birds, one stone - make sure the internal ProjectManager vlist cache
+	// works by asking for the versions again, and do it through smcache to
+	// ensure its sorting works, as well.
+	smc := &bridge{
+		sm:     sm,
+		vlists: make(map[ProjectRoot][]Version),
+		s:      &solver{},
+	}
+
+	v, err = smc.listVersions(ProjectIdentifier{ProjectRoot: pn})
+	if err != nil {
+		t.Errorf("Unexpected error during initial project setup/fetching %s", err)
+	}
+
+	if len(v) != 3 {
+		t.Errorf("Expected three version results from the test repo, got %v", len(v))
+	} else {
+		rev := Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")
+		expected := []Version{
+			NewVersion("1.0.0").Is(rev),
+			NewBranch("master").Is(rev),
+			NewBranch("test").Is(rev),
+		}
+
+		for k, e := range expected {
+			if v[k] != e {
+				t.Errorf("Expected version %s in position %v but got %s", e, k, v[k])
+			}
+		}
+	}
+
+	// Ensure that the appropriate cache dirs and files exist
+	_, err = os.Stat(path.Join(cpath, "src", "github.com", "Masterminds", "VCSTestRepo", ".git"))
+	if err != nil {
+		t.Error("Cache repo does not exist in expected location")
+	}
+
+	_, err = os.Stat(path.Join(cpath, "metadata", "github.com", "Masterminds", "VCSTestRepo", "cache.json"))
+	if err != nil {
+		// TODO(sdboyer) temporarily disabled until we turn caching back on
+		//t.Error("Metadata cache json file does not exist in expected location")
+	}
+
+	// Ensure project existence values are what we expect
+	var exists bool
+	exists, err = sm.RepoExists(pn)
+	if err != nil {
+		t.Errorf("Error on checking RepoExists: %s", err)
+	}
+	if !exists {
+		t.Error("Repo should exist after non-erroring call to ListVersions")
+	}
+
+	// Now reach inside the black box
+	pms, err := sm.getProjectManager(pn)
+	if err != nil {
+		t.Errorf("Error on grabbing project manager obj: %s", err)
+	}
+
+	// Check upstream existence flag
+	if !pms.pm.CheckExistence(existsUpstream) {
+		t.Errorf("ExistsUpstream flag not being correctly set the project")
+	}
+}
+
+func TestRepoVersionFetching(t *testing.T) {
+	// This test is quite slow, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping repo version fetching test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+
+	sm, err := NewSourceManager(naiveAnalyzer{}, cpath, false)
+	if err != nil {
+		t.Errorf("Unexpected error on SourceManager creation: %s", err)
+		t.FailNow()
+	}
+
+	upstreams := []ProjectRoot{
+		"github.com/Masterminds/VCSTestRepo",
+		"bitbucket.org/mattfarina/testhgrepo",
+		"launchpad.net/govcstestbzrrepo",
+	}
+
+	pms := make([]*projectManager, len(upstreams))
+	for k, u := range upstreams {
+		pmi, err := sm.getProjectManager(u)
+		if err != nil {
+			sm.Release()
+			removeAll(cpath)
+			t.Errorf("Unexpected error on ProjectManager creation: %s", err)
+			t.FailNow()
+		}
+		pms[k] = pmi.pm
+	}
+
+	defer func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}()
+	defer sm.Release()
+
+	// test git first
+	vlist, exbits, err := pms[0].crepo.getCurrentVersionPairs()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from git repo: %s", err)
+	}
+	if exbits != existsUpstream {
+		t.Errorf("git pair fetch should only set upstream existence bits, but got %v", exbits)
+	}
+	if len(vlist) != 3 {
+		t.Errorf("git test repo should've produced three versions, got %v", len(vlist))
+	} else {
+		v := NewBranch("master").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
+		if vlist[0] != v {
+			t.Errorf("git pair fetch reported incorrect first version, got %s", vlist[0])
+		}
+
+		v = NewBranch("test").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
+		if vlist[1] != v {
+			t.Errorf("git pair fetch reported incorrect second version, got %s", vlist[1])
+		}
+
+		v = NewVersion("1.0.0").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
+		if vlist[2] != v {
+			t.Errorf("git pair fetch reported incorrect third version, got %s", vlist[2])
+		}
+	}
+
+	// now hg
+	vlist, exbits, err = pms[1].crepo.getCurrentVersionPairs()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from hg repo: %s", err)
+	}
+	if exbits != existsUpstream|existsInCache {
+		t.Errorf("hg pair fetch should set upstream and cache existence bits, but got %v", exbits)
+	}
+	if len(vlist) != 2 {
+		t.Errorf("hg test repo should've produced two versions, got %v", len(vlist))
+	} else {
+		v := NewVersion("1.0.0").Is(Revision("d680e82228d206935ab2eaa88612587abe68db07"))
+		if vlist[0] != v {
+			t.Errorf("hg pair fetch reported incorrect first version, got %s", vlist[0])
+		}
+
+		v = NewBranch("test").Is(Revision("6c44ee3fe5d87763616c19bf7dbcadb24ff5a5ce"))
+		if vlist[1] != v {
+			t.Errorf("hg pair fetch reported incorrect second version, got %s", vlist[1])
+		}
+	}
+
+	// bzr last
+	vlist, exbits, err = pms[2].crepo.getCurrentVersionPairs()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err)
+	}
+	if exbits != existsUpstream|existsInCache {
+		t.Errorf("bzr pair fetch should set upstream and cache existence bits, but got %v", exbits)
+	}
+	if len(vlist) != 1 {
+		t.Errorf("bzr test repo should've produced one version, got %v", len(vlist))
+	} else {
+		v := NewVersion("1.0.0").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68"))
+		if vlist[0] != v {
+			t.Errorf("bzr pair fetch reported incorrect first version, got %s", vlist[0])
+		}
+	}
+	// no svn for now, because...svn
+}
+
+// Regression test for #32
+func TestGetInfoListVersionsOrdering(t *testing.T) {
+	// This test is quite slow, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping slow test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	sm, err := NewSourceManager(naiveAnalyzer{}, cpath, false)
+
+	if err != nil {
+		t.Errorf("Unexpected error on SourceManager creation: %s", err)
+		t.FailNow()
+	}
+	defer func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}()
+	defer sm.Release()
+
+	// setup done, now do the test
+
+	pn := ProjectRoot("github.com/Masterminds/VCSTestRepo")
+
+	_, _, err = sm.GetProjectInfo(pn, NewVersion("1.0.0"))
+	if err != nil {
+		t.Errorf("Unexpected error from GetInfoAt %s", err)
+	}
+
+	v, err := sm.ListVersions(pn)
+	if err != nil {
+		t.Errorf("Unexpected error from ListVersions %s", err)
+	}
+
+	if len(v) != 3 {
+		t.Errorf("Expected three results from ListVersions, got %v", len(v))
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/manifest.go b/vendor/github.com/sdboyer/gps/manifest.go
new file mode 100644
index 0000000..83fd9d7
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/manifest.go
@@ -0,0 +1,77 @@
+package gps
+
+// Manifest represents manifest-type data for a project at a particular version.
+// That means dependency constraints, both for normal dependencies and for
+// tests. The constraints expressed in a manifest determine the set of versions that
+// are acceptable to try for a given project.
+//
+// Expressing a constraint in a manifest does not guarantee that a particular
+// dependency will be present. It only guarantees that if packages in the
+// project specified by the dependency are discovered through static analysis of
+// the (transitive) import graph, then they will conform to the constraint.
+//
+// This does entail that manifests can express constraints on projects they do
+// not themselves import. This is by design, but its implications are complex.
+// See the gps docs for more information: https://github.com/sdboyer/gps/wiki
+type Manifest interface {
+	// Returns a list of project-level constraints.
+	DependencyConstraints() []ProjectConstraint
+	// Returns a list of constraints applicable to test imports. Note that this
+	// will only be consulted for root manifests.
+	TestDependencyConstraints() []ProjectConstraint
+}
+
+// SimpleManifest is a helper for tools to enumerate manifest data. It's
+// generally intended for ephemeral manifests, such as those Analyzers create on
+// the fly for projects with no manifest metadata, or metadata through a foreign
+// tool's idioms.
+type SimpleManifest struct {
+	Deps     []ProjectConstraint
+	TestDeps []ProjectConstraint
+}
+
+var _ Manifest = SimpleManifest{}
+
+// DependencyConstraints returns the project's dependencies.
+func (m SimpleManifest) DependencyConstraints() []ProjectConstraint {
+	return m.Deps
+}
+
+// TestDependencyConstraints returns the project's test dependencies.
+func (m SimpleManifest) TestDependencyConstraints() []ProjectConstraint {
+	return m.TestDeps
+}
+
+// prepManifest ensures a manifest is prepared and safe for use by the solver.
+// This entails two things:
+//
+//  * Ensuring that all ProjectIdentifiers are normalized (otherwise matching
+//  can get screwy and the queues go out of alignment)
+//  * Defensively ensuring that no outside routine can modify the manifest while
+//  the solver is in-flight.
+//
+// This is achieved by copying the manifest's data into a new SimpleManifest.
+func prepManifest(m Manifest) Manifest {
+	if m == nil {
+		return SimpleManifest{}
+	}
+
+	deps := m.DependencyConstraints()
+	ddeps := m.TestDependencyConstraints()
+
+	rm := SimpleManifest{
+		Deps:     make([]ProjectConstraint, len(deps)),
+		TestDeps: make([]ProjectConstraint, len(ddeps)),
+	}
+
+	for k, d := range deps {
+		d.Ident = d.Ident.normalize()
+		rm.Deps[k] = d
+	}
+	for k, d := range ddeps {
+		d.Ident = d.Ident.normalize()
+		rm.TestDeps[k] = d
+	}
+
+	return rm
+}
diff --git a/vendor/github.com/sdboyer/gps/marker-header.png b/vendor/github.com/sdboyer/gps/marker-header.png
new file mode 100644
index 0000000..66965c5
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/marker-header.png
Binary files differ
diff --git a/vendor/github.com/sdboyer/gps/project_manager.go b/vendor/github.com/sdboyer/gps/project_manager.go
new file mode 100644
index 0000000..e174fde
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/project_manager.go
@@ -0,0 +1,584 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"go/build"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"strings"
+	"sync"
+
+	"github.com/Masterminds/vcs"
+	"github.com/termie/go-shutil"
+)
+
+type projectManager struct {
+	// The identifier of the project. At this level, corresponds to the
+	// '$GOPATH/src'-relative path, *and* the network name.
+	n ProjectRoot
+
+	// build.Context to use in any analysis, and to pass to the analyzer
+	ctx build.Context
+
+	// Object for the cache repository
+	crepo *repo
+
+	// Indicates the extent to which we have searched for, and verified, the
+	// existence of the project/repo.
+	ex existence
+
+	// Analyzer, injected by way of the SourceManager and originally from the
+	// sm's creator
+	an ProjectAnalyzer
+
+	// Whether the cache has the latest info on versions
+	cvsync bool
+
+	// The project metadata cache. This is persisted to disk, for reuse across
+	// solver runs.
+	// TODO(sdboyer) protect with mutex
+	dc *projectDataCache
+}
+
+type existence struct {
+	// The existence levels for which a search/check has been performed
+	s projectExistence
+
+	// The existence levels verified to be present through searching
+	f projectExistence
+}
+
+// TODO(sdboyer) figure out shape of versions, then implement marshaling/unmarshaling
+type projectDataCache struct {
+	Version  string                   `json:"version"` // TODO(sdboyer) use this
+	Infos    map[Revision]projectInfo `json:"infos"`
+	Packages map[Revision]PackageTree `json:"packages"`
+	VMap     map[Version]Revision     `json:"vmap"`
+	RMap     map[Revision][]Version   `json:"rmap"`
+}
+
+// projectInfo holds manifest and lock
+type projectInfo struct {
+	Manifest
+	Lock
+}
+
+type repo struct {
+	// Path to the root of the default working copy (NOT the repo itself)
+	rpath string
+
+	// Mutex controlling general access to the repo
+	mut sync.RWMutex
+
+	// Object for direct repo interaction
+	r vcs.Repo
+
+	// Whether or not the cache repo is in sync (think dvcs) with upstream
+	synced bool
+}
+
+func (pm *projectManager) GetInfoAt(v Version) (Manifest, Lock, error) {
+	if err := pm.ensureCacheExistence(); err != nil {
+		return nil, nil, err
+	}
+
+	if r, exists := pm.dc.VMap[v]; exists {
+		if pi, exists := pm.dc.Infos[r]; exists {
+			return pi.Manifest, pi.Lock, nil
+		}
+	}
+
+	pm.crepo.mut.Lock()
+	var err error
+	if !pm.crepo.synced {
+		err = pm.crepo.r.Update()
+		if err != nil {
+			return nil, nil, fmt.Errorf("Could not fetch latest updates into repository")
+		}
+		pm.crepo.synced = true
+	}
+
+	// Always prefer a rev, if it's available
+	if pv, ok := v.(PairedVersion); ok {
+		err = pm.crepo.r.UpdateVersion(pv.Underlying().String())
+	} else {
+		err = pm.crepo.r.UpdateVersion(v.String())
+	}
+	pm.crepo.mut.Unlock()
+	if err != nil {
+		// TODO(sdboyer) More-er proper-er error
+		panic(fmt.Sprintf("canary - why is checkout/whatever failing: %s %s %s", pm.n, v.String(), err))
+	}
+
+	pm.crepo.mut.RLock()
+	m, l, err := pm.an.GetInfo(filepath.Join(pm.ctx.GOPATH, "src", string(pm.n)), pm.n)
+	// TODO(sdboyer) cache results
+	pm.crepo.mut.RUnlock()
+
+	if err == nil {
+		if l != nil {
+			l = prepLock(l)
+		}
+
+		// If m is nil, prepManifest will provide an empty one.
+		pi := projectInfo{
+			Manifest: prepManifest(m),
+			Lock:     l,
+		}
+
+		// TODO(sdboyer) this just clobbers all over and ignores the paired/unpaired
+		// distinction; serious fix is needed
+		if r, exists := pm.dc.VMap[v]; exists {
+			pm.dc.Infos[r] = pi
+		}
+
+		return pi.Manifest, pi.Lock, nil
+	}
+
+	return nil, nil, err
+}
+
+func (pm *projectManager) ListPackages(v Version) (ptree PackageTree, err error) {
+	if err = pm.ensureCacheExistence(); err != nil {
+		return
+	}
+
+	// See if we can find it in the cache
+	var r Revision
+	switch v.(type) {
+	case Revision, PairedVersion:
+		var ok bool
+		if r, ok = v.(Revision); !ok {
+			r = v.(PairedVersion).Underlying()
+		}
+
+		if ptree, cached := pm.dc.Packages[r]; cached {
+			return ptree, nil
+		}
+	default:
+		var has bool
+		if r, has = pm.dc.VMap[v]; has {
+			if ptree, cached := pm.dc.Packages[r]; cached {
+				return ptree, nil
+			}
+		}
+	}
+
+	// TODO(sdboyer) handle the case where we have a version w/out rev, and not in cache
+
+	// Not in the cache; check out the version and do the analysis
+	pm.crepo.mut.Lock()
+	// Check out the desired version for analysis
+	if r != "" {
+		// Always prefer a rev, if it's available
+		err = pm.crepo.r.UpdateVersion(string(r))
+	} else {
+		// If we don't have a rev, ensure the repo is up to date, otherwise we
+		// could have a desync issue
+		if !pm.crepo.synced {
+			err = pm.crepo.r.Update()
+			if err != nil {
+				return PackageTree{}, fmt.Errorf("Could not fetch latest updates into repository: %s", err)
+			}
+			pm.crepo.synced = true
+		}
+		err = pm.crepo.r.UpdateVersion(v.String())
+	}
+
+	ptree, err = listPackages(filepath.Join(pm.ctx.GOPATH, "src", string(pm.n)), string(pm.n))
+	pm.crepo.mut.Unlock()
+
+	// TODO(sdboyer) cache errs?
+	if err != nil {
+		pm.dc.Packages[r] = ptree
+	}
+
+	return
+}
+
+func (pm *projectManager) ensureCacheExistence() error {
+	// Technically, methods could could attempt to return straight from the
+	// metadata cache even if the repo cache doesn't exist on disk. But that
+	// would allow weird state inconsistencies (cache exists, but no repo...how
+	// does that even happen?) that it'd be better to just not allow so that we
+	// don't have to think about it elsewhere
+	if !pm.CheckExistence(existsInCache) {
+		if pm.CheckExistence(existsUpstream) {
+			pm.crepo.mut.Lock()
+			err := pm.crepo.r.Get()
+			pm.crepo.mut.Unlock()
+
+			if err != nil {
+				return fmt.Errorf("failed to create repository cache for %s", pm.n)
+			}
+			pm.ex.s |= existsInCache
+			pm.ex.f |= existsInCache
+		} else {
+			return fmt.Errorf("project %s does not exist upstream", pm.n)
+		}
+	}
+
+	return nil
+}
+
+func (pm *projectManager) ListVersions() (vlist []Version, err error) {
+	if !pm.cvsync {
+		// This check only guarantees that the upstream exists, not the cache
+		pm.ex.s |= existsUpstream
+		vpairs, exbits, err := pm.crepo.getCurrentVersionPairs()
+		// But it *may* also check the local existence
+		pm.ex.s |= exbits
+		pm.ex.f |= exbits
+
+		if err != nil {
+			// TODO(sdboyer) More-er proper-er error
+			fmt.Println(err)
+			return nil, err
+		}
+
+		vlist = make([]Version, len(vpairs))
+		// mark our cache as synced if we got ExistsUpstream back
+		if exbits&existsUpstream == existsUpstream {
+			pm.cvsync = true
+		}
+
+		// Process the version data into the cache
+		// TODO(sdboyer) detect out-of-sync data as we do this?
+		for k, v := range vpairs {
+			pm.dc.VMap[v] = v.Underlying()
+			pm.dc.RMap[v.Underlying()] = append(pm.dc.RMap[v.Underlying()], v)
+			vlist[k] = v
+		}
+	} else {
+		vlist = make([]Version, len(pm.dc.VMap))
+		k := 0
+		// TODO(sdboyer) key type of VMap should be string; recombine here
+		//for v, r := range pm.dc.VMap {
+		for v := range pm.dc.VMap {
+			vlist[k] = v
+			k++
+		}
+	}
+
+	return
+}
+
+func (pm *projectManager) RevisionPresentIn(r Revision) (bool, error) {
+	// First and fastest path is to check the data cache to see if the rev is
+	// present. This could give us false positives, but the cases where that can
+	// occur would require a type of cache staleness that seems *exceedingly*
+	// unlikely to occur.
+	if _, has := pm.dc.Infos[r]; has {
+		return true, nil
+	} else if _, has := pm.dc.RMap[r]; has {
+		return true, nil
+	}
+
+	// For now at least, just run GetInfoAt(); it basically accomplishes the
+	// same thing.
+	if _, _, err := pm.GetInfoAt(r); err != nil {
+		return false, err
+	}
+	return true, nil
+}
+
+// CheckExistence provides a direct method for querying existence levels of the
+// project. It will only perform actual searching (local fs or over the network)
+// if no previous attempt at that search has been made.
+//
+// Note that this may perform read-ish operations on the cache repo, and it
+// takes a lock accordingly. Deadlock may result from calling it during a
+// segment where the cache repo mutex is already write-locked.
+func (pm *projectManager) CheckExistence(ex projectExistence) bool {
+	if pm.ex.s&ex != ex {
+		if ex&existsInVendorRoot != 0 && pm.ex.s&existsInVendorRoot == 0 {
+			panic("should now be implemented in bridge")
+		}
+		if ex&existsInCache != 0 && pm.ex.s&existsInCache == 0 {
+			pm.crepo.mut.RLock()
+			pm.ex.s |= existsInCache
+			if pm.crepo.r.CheckLocal() {
+				pm.ex.f |= existsInCache
+			}
+			pm.crepo.mut.RUnlock()
+		}
+		if ex&existsUpstream != 0 && pm.ex.s&existsUpstream == 0 {
+			pm.crepo.mut.RLock()
+			pm.ex.s |= existsUpstream
+			if pm.crepo.r.Ping() {
+				pm.ex.f |= existsUpstream
+			}
+			pm.crepo.mut.RUnlock()
+		}
+	}
+
+	return ex&pm.ex.f == ex
+}
+
+func (pm *projectManager) ExportVersionTo(v Version, to string) error {
+	return pm.crepo.exportVersionTo(v, to)
+}
+
+func (r *repo) getCurrentVersionPairs() (vlist []PairedVersion, exbits projectExistence, err error) {
+	r.mut.Lock()
+	defer r.mut.Unlock()
+
+	switch r.r.(type) {
+	case *vcs.GitRepo:
+		var out []byte
+		c := exec.Command("git", "ls-remote", r.r.Remote())
+		// Ensure no terminal prompting for PWs
+		c.Env = mergeEnvLists([]string{"GIT_TERMINAL_PROMPT=0"}, os.Environ())
+		out, err = c.CombinedOutput()
+
+		all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+		if err != nil || len(all) == 0 {
+			// TODO(sdboyer) remove this path? it really just complicates things, for
+			// probably not much benefit
+
+			// ls-remote failed, probably due to bad communication or a faulty
+			// upstream implementation. So fetch updates, then build the list
+			// locally
+			err = r.r.Update()
+			if err != nil {
+				// Definitely have a problem, now - bail out
+				return
+			}
+
+			// Upstream and cache must exist, so add that to exbits
+			exbits |= existsUpstream | existsInCache
+			// Also, local is definitely now synced
+			r.synced = true
+
+			out, err = r.r.RunFromDir("git", "show-ref", "--dereference")
+			if err != nil {
+				return
+			}
+
+			all = bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+		}
+		// Local cache may not actually exist here, but upstream definitely does
+		exbits |= existsUpstream
+
+		tmap := make(map[string]PairedVersion)
+		for _, pair := range all {
+			var v PairedVersion
+			if string(pair[46:51]) == "heads" {
+				v = NewBranch(string(pair[52:])).Is(Revision(pair[:40])).(PairedVersion)
+				vlist = append(vlist, v)
+			} else if string(pair[46:50]) == "tags" {
+				vstr := string(pair[51:])
+				if strings.HasSuffix(vstr, "^{}") {
+					// If the suffix is there, then we *know* this is the rev of
+					// the underlying commit object that we actually want
+					vstr = strings.TrimSuffix(vstr, "^{}")
+				} else if _, exists := tmap[vstr]; exists {
+					// Already saw the deref'd version of this tag, if one
+					// exists, so skip this.
+					continue
+					// Can only hit this branch if we somehow got the deref'd
+					// version first. Which should be impossible, but this
+					// covers us in case of weirdness, anyway.
+				}
+				v = NewVersion(vstr).Is(Revision(pair[:40])).(PairedVersion)
+				tmap[vstr] = v
+			}
+		}
+
+		// Append all the deref'd (if applicable) tags into the list
+		for _, v := range tmap {
+			vlist = append(vlist, v)
+		}
+	case *vcs.BzrRepo:
+		var out []byte
+		// Update the local first
+		err = r.r.Update()
+		if err != nil {
+			return
+		}
+		// Upstream and cache must exist, so add that to exbits
+		exbits |= existsUpstream | existsInCache
+		// Also, local is definitely now synced
+		r.synced = true
+
+		// Now, list all the tags
+		out, err = r.r.RunFromDir("bzr", "tags", "--show-ids", "-v")
+		if err != nil {
+			return
+		}
+
+		all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+		for _, line := range all {
+			idx := bytes.IndexByte(line, 32) // space
+			v := NewVersion(string(line[:idx])).Is(Revision(bytes.TrimSpace(line[idx:]))).(PairedVersion)
+			vlist = append(vlist, v)
+		}
+
+	case *vcs.HgRepo:
+		var out []byte
+		err = r.r.Update()
+		if err != nil {
+			return
+		}
+
+		// Upstream and cache must exist, so add that to exbits
+		exbits |= existsUpstream | existsInCache
+		// Also, local is definitely now synced
+		r.synced = true
+
+		out, err = r.r.RunFromDir("hg", "tags", "--debug", "--verbose")
+		if err != nil {
+			return
+		}
+
+		all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+		lbyt := []byte("local")
+		nulrev := []byte("0000000000000000000000000000000000000000")
+		for _, line := range all {
+			if bytes.Equal(lbyt, line[len(line)-len(lbyt):]) {
+				// Skip local tags
+				continue
+			}
+
+			// tip is magic, don't include it
+			if bytes.HasPrefix(line, []byte("tip")) {
+				continue
+			}
+
+			// Split on colon; this gets us the rev and the tag plus local revno
+			pair := bytes.Split(line, []byte(":"))
+			if bytes.Equal(nulrev, pair[1]) {
+				// null rev indicates this tag is marked for deletion
+				continue
+			}
+
+			idx := bytes.IndexByte(pair[0], 32) // space
+			v := NewVersion(string(pair[0][:idx])).Is(Revision(pair[1])).(PairedVersion)
+			vlist = append(vlist, v)
+		}
+
+		out, err = r.r.RunFromDir("hg", "branches", "--debug", "--verbose")
+		if err != nil {
+			// better nothing than incomplete
+			vlist = nil
+			return
+		}
+
+		all = bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+		lbyt = []byte("(inactive)")
+		for _, line := range all {
+			if bytes.Equal(lbyt, line[len(line)-len(lbyt):]) {
+				// Skip inactive branches
+				continue
+			}
+
+			// Split on colon; this gets us the rev and the branch plus local revno
+			pair := bytes.Split(line, []byte(":"))
+			idx := bytes.IndexByte(pair[0], 32) // space
+			v := NewBranch(string(pair[0][:idx])).Is(Revision(pair[1])).(PairedVersion)
+			vlist = append(vlist, v)
+		}
+	case *vcs.SvnRepo:
+		// TODO(sdboyer) is it ok to return empty vlist and no error?
+		// TODO(sdboyer) ...gotta do something for svn, right?
+	default:
+		panic("unknown repo type")
+	}
+
+	return
+}
+
+func (r *repo) exportVersionTo(v Version, to string) error {
+	r.mut.Lock()
+	defer r.mut.Unlock()
+
+	switch r.r.(type) {
+	case *vcs.GitRepo:
+		// Back up original index
+		idx, bak := path.Join(r.rpath, ".git", "index"), path.Join(r.rpath, ".git", "origindex")
+		err := os.Rename(idx, bak)
+		if err != nil {
+			return err
+		}
+
+		// TODO(sdboyer) could have an err here
+		defer os.Rename(bak, idx)
+
+		vstr := v.String()
+		if rv, ok := v.(PairedVersion); ok {
+			vstr = rv.Underlying().String()
+		}
+		_, err = r.r.RunFromDir("git", "read-tree", vstr)
+		if err != nil {
+			return err
+		}
+
+		// Ensure we have exactly one trailing slash
+		to = strings.TrimSuffix(to, string(os.PathSeparator)) + string(os.PathSeparator)
+		// Checkout from our temporary index to the desired target location on disk;
+		// now it's git's job to make it fast. Sadly, this approach *does* also
+		// write out vendor dirs. There doesn't appear to be a way to make
+		// checkout-index respect sparse checkout rules (-a supercedes it);
+		// the alternative is using plain checkout, though we have a bunch of
+		// housekeeping to do to set up, then tear down, the sparse checkout
+		// controls, as well as restore the original index and HEAD.
+		_, err = r.r.RunFromDir("git", "checkout-index", "-a", "--prefix="+to)
+		return err
+	default:
+		// TODO(sdboyer) This is a dumb, slow approach, but we're punting on making these
+		// fast for now because git is the OVERWHELMING case
+		r.r.UpdateVersion(v.String())
+
+		cfg := &shutil.CopyTreeOptions{
+			Symlinks:     true,
+			CopyFunction: shutil.Copy,
+			Ignore: func(src string, contents []os.FileInfo) (ignore []string) {
+				for _, fi := range contents {
+					if !fi.IsDir() {
+						continue
+					}
+					n := fi.Name()
+					switch n {
+					case "vendor", ".bzr", ".svn", ".hg":
+						ignore = append(ignore, n)
+					}
+				}
+
+				return
+			},
+		}
+
+		return shutil.CopyTree(r.rpath, to, cfg)
+	}
+}
+
+// This func copied from Masterminds/vcs so we can exec our own commands
+func mergeEnvLists(in, out []string) []string {
+NextVar:
+	for _, inkv := range in {
+		k := strings.SplitAfterN(inkv, "=", 2)[0]
+		for i, outkv := range out {
+			if strings.HasPrefix(outkv, k) {
+				out[i] = inkv
+				continue NextVar
+			}
+		}
+		out = append(out, inkv)
+	}
+	return out
+}
+
+func stripVendor(path string, info os.FileInfo, err error) error {
+	if info.Name() == "vendor" {
+		if _, err := os.Lstat(path); err == nil {
+			if info.IsDir() {
+				return removeAll(path)
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/sdboyer/gps/remote.go b/vendor/github.com/sdboyer/gps/remote.go
new file mode 100644
index 0000000..c808d9a
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/remote.go
@@ -0,0 +1,306 @@
+package gps
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+// A remoteRepo represents a potential remote repository resource.
+//
+// RemoteRepos are based purely on lexical analysis; successfully constructing
+// one is not a guarantee that the resource it identifies actually exists or is
+// accessible.
+type remoteRepo struct {
+	Base     string
+	RelPkg   string
+	CloneURL *url.URL
+	Schemes  []string
+	VCS      []string
+}
+
+//type remoteResult struct {
+//r   remoteRepo
+//err error
+//}
+
+// TODO(sdboyer) sync access to this map
+//var remoteCache = make(map[string]remoteResult)
+
+// Regexes for the different known import path flavors
+var (
+	// This regex allowed some usernames that github currently disallows. They
+	// may have allowed them in the past; keeping it in case we need to revert.
+	//ghRegex      = regexp.MustCompile(`^(?P<root>github\.com/([A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`)
+	ghRegex      = regexp.MustCompile(`^(?P<root>github\.com/([A-Za-z0-9][-A-Za-z0-9]*[A-Za-z0-9]/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	gpinNewRegex = regexp.MustCompile(`^(?P<root>gopkg\.in/(?:([a-zA-Z0-9][-a-zA-Z0-9]+)/)?([a-zA-Z][-.a-zA-Z0-9]*)\.((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(-unstable)?)(?:\.git)?)((?:/[a-zA-Z0-9][-.a-zA-Z0-9]*)*)$`)
+	//gpinOldRegex = regexp.MustCompile(`^(?P<root>gopkg\.in/(?:([a-z0-9][-a-z0-9]+)/)?((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(-unstable)?)/([a-zA-Z][-a-zA-Z0-9]*)(?:\.git)?)((?:/[a-zA-Z][-a-zA-Z0-9]*)*)$`)
+	bbRegex = regexp.MustCompile(`^(?P<root>bitbucket\.org/(?P<bitname>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	//lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net/([A-Za-z0-9-._]+)(/[A-Za-z0-9-._]+)?)(/.+)?`)
+	lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net/([A-Za-z0-9-._]+))((?:/[A-Za-z0-9_.\-]+)*)?`)
+	//glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net/([A-Za-z0-9_.\-]+)|~[A-Za-z0-9_.\-]+/(\+git|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+)$`)
+	glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net/([A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	//gcRegex      = regexp.MustCompile(`^(?P<root>code\.google\.com/[pr]/(?P<project>[a-z0-9\-]+)(\.(?P<subrepo>[a-z0-9\-]+))?)(/[A-Za-z0-9_.\-]+)*$`)
+	jazzRegex    = regexp.MustCompile(`^(?P<root>hub\.jazz\.net/(git/[a-z0-9]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	apacheRegex  = regexp.MustCompile(`^(?P<root>git\.apache\.org/([a-z0-9_.\-]+\.git))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	genericRegex = regexp.MustCompile(`^(?P<root>(?P<repo>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?/[A-Za-z0-9_.\-/~]*?)\.(?P<vcs>bzr|git|hg|svn))((?:/[A-Za-z0-9_.\-]+)*)$`)
+)
+
+// Other helper regexes
+var (
+	scpSyntaxRe = regexp.MustCompile(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`)
+	pathvld     = regexp.MustCompile(`^([A-Za-z0-9-]+)(\.[A-Za-z0-9-]+)+(/[A-Za-z0-9-_.~]+)*$`)
+)
+
+// deduceRemoteRepo takes a potential import path and returns a RemoteRepo
+// representing the remote location of the source of an import path. Remote
+// repositories can be bare import paths, or urls including a checkout scheme.
+func deduceRemoteRepo(path string) (rr *remoteRepo, err error) {
+	rr = &remoteRepo{}
+	if m := scpSyntaxRe.FindStringSubmatch(path); m != nil {
+		// Match SCP-like syntax and convert it to a URL.
+		// Eg, "git@github.com:user/repo" becomes
+		// "ssh://git@github.com/user/repo".
+		rr.CloneURL = &url.URL{
+			Scheme: "ssh",
+			User:   url.User(m[1]),
+			Host:   m[2],
+			Path:   "/" + m[3],
+			// TODO(sdboyer) This is what stdlib sets; grok why better
+			//RawPath: m[3],
+		}
+	} else {
+		rr.CloneURL, err = url.Parse(path)
+		if err != nil {
+			return nil, fmt.Errorf("%q is not a valid import path", path)
+		}
+	}
+
+	if rr.CloneURL.Host != "" {
+		path = rr.CloneURL.Host + "/" + strings.TrimPrefix(rr.CloneURL.Path, "/")
+	} else {
+		path = rr.CloneURL.Path
+	}
+
+	if !pathvld.MatchString(path) {
+		return nil, fmt.Errorf("%q is not a valid import path", path)
+	}
+
+	if rr.CloneURL.Scheme != "" {
+		rr.Schemes = []string{rr.CloneURL.Scheme}
+	}
+
+	// TODO(sdboyer) instead of a switch, encode base domain in radix tree and pick
+	// detector from there; if failure, then fall back on metadata work
+
+	switch {
+	case ghRegex.MatchString(path):
+		v := ghRegex.FindStringSubmatch(path)
+
+		rr.CloneURL.Host = "github.com"
+		rr.CloneURL.Path = v[2]
+		rr.Base = v[1]
+		rr.RelPkg = strings.TrimPrefix(v[3], "/")
+		rr.VCS = []string{"git"}
+
+		return
+
+	case gpinNewRegex.MatchString(path):
+		v := gpinNewRegex.FindStringSubmatch(path)
+		// Duplicate some logic from the gopkg.in server in order to validate
+		// the import path string without having to hit the server
+		if strings.Contains(v[4], ".") {
+			return nil, fmt.Errorf("%q is not a valid import path; gopkg.in only allows major versions (%q instead of %q)",
+				path, v[4][:strings.Index(v[4], ".")], v[4])
+		}
+
+		// gopkg.in is always backed by github
+		rr.CloneURL.Host = "github.com"
+		// If the third position is empty, it's the shortened form that expands
+		// to the go-pkg github user
+		if v[2] == "" {
+			rr.CloneURL.Path = "go-pkg/" + v[3]
+		} else {
+			rr.CloneURL.Path = v[2] + "/" + v[3]
+		}
+		rr.Base = v[1]
+		rr.RelPkg = strings.TrimPrefix(v[6], "/")
+		rr.VCS = []string{"git"}
+
+		return
+	//case gpinOldRegex.MatchString(path):
+
+	case bbRegex.MatchString(path):
+		v := bbRegex.FindStringSubmatch(path)
+
+		rr.CloneURL.Host = "bitbucket.org"
+		rr.CloneURL.Path = v[2]
+		rr.Base = v[1]
+		rr.RelPkg = strings.TrimPrefix(v[3], "/")
+		rr.VCS = []string{"git", "hg"}
+
+		return
+
+	//case gcRegex.MatchString(path):
+	//v := gcRegex.FindStringSubmatch(path)
+
+	//rr.CloneURL.Host = "code.google.com"
+	//rr.CloneURL.Path = "p/" + v[2]
+	//rr.Base = v[1]
+	//rr.RelPkg = strings.TrimPrefix(v[5], "/")
+	//rr.VCS = []string{"hg", "git"}
+
+	//return
+
+	case lpRegex.MatchString(path):
+		// TODO(sdboyer) lp handling is nasty - there's ambiguities which can only really
+		// be resolved with a metadata request. See https://github.com/golang/go/issues/11436
+		v := lpRegex.FindStringSubmatch(path)
+
+		rr.CloneURL.Host = "launchpad.net"
+		rr.CloneURL.Path = v[2]
+		rr.Base = v[1]
+		rr.RelPkg = strings.TrimPrefix(v[3], "/")
+		rr.VCS = []string{"bzr"}
+
+		return
+
+	case glpRegex.MatchString(path):
+		// TODO(sdboyer) same ambiguity issues as with normal bzr lp
+		v := glpRegex.FindStringSubmatch(path)
+
+		rr.CloneURL.Host = "git.launchpad.net"
+		rr.CloneURL.Path = v[2]
+		rr.Base = v[1]
+		rr.RelPkg = strings.TrimPrefix(v[3], "/")
+		rr.VCS = []string{"git"}
+
+		return
+
+	case jazzRegex.MatchString(path):
+		v := jazzRegex.FindStringSubmatch(path)
+
+		rr.CloneURL.Host = "hub.jazz.net"
+		rr.CloneURL.Path = v[2]
+		rr.Base = v[1]
+		rr.RelPkg = strings.TrimPrefix(v[3], "/")
+		rr.VCS = []string{"git"}
+
+		return
+
+	case apacheRegex.MatchString(path):
+		v := apacheRegex.FindStringSubmatch(path)
+
+		rr.CloneURL.Host = "git.apache.org"
+		rr.CloneURL.Path = v[2]
+		rr.Base = v[1]
+		rr.RelPkg = strings.TrimPrefix(v[3], "/")
+		rr.VCS = []string{"git"}
+
+		return
+
+	// try the general syntax
+	case genericRegex.MatchString(path):
+		v := genericRegex.FindStringSubmatch(path)
+		switch v[5] {
+		case "git", "hg", "bzr":
+			x := strings.SplitN(v[1], "/", 2)
+			// TODO(sdboyer) is this actually correct for bzr?
+			rr.CloneURL.Host = x[0]
+			rr.CloneURL.Path = x[1]
+			rr.VCS = []string{v[5]}
+			rr.Base = v[1]
+			rr.RelPkg = strings.TrimPrefix(v[6], "/")
+			return
+		default:
+			return nil, fmt.Errorf("unknown repository type: %q", v[5])
+		}
+	}
+
+	// No luck so far. maybe it's one of them vanity imports?
+	importroot, vcs, reporoot, err := parseMetadata(path)
+	if err != nil {
+		return nil, fmt.Errorf("unable to deduce repository and source type for: %q", path)
+	}
+
+	// If we got something back at all, then it supercedes the actual input for
+	// the real URL to hit
+	rr.CloneURL, err = url.Parse(reporoot)
+	if err != nil {
+		return nil, fmt.Errorf("server returned bad URL when searching for vanity import: %q", reporoot)
+	}
+
+	// We have a real URL. Set the other values and return.
+	rr.Base = importroot
+	rr.RelPkg = strings.TrimPrefix(path[len(importroot):], "/")
+
+	rr.VCS = []string{vcs}
+	if rr.CloneURL.Scheme != "" {
+		rr.Schemes = []string{rr.CloneURL.Scheme}
+	}
+
+	return rr, nil
+}
+
+// fetchMetadata fetchs the remote metadata for path.
+func fetchMetadata(path string) (rc io.ReadCloser, err error) {
+	defer func() {
+		if err != nil {
+			err = fmt.Errorf("unable to determine remote metadata protocol: %s", err)
+		}
+	}()
+
+	// try https first
+	rc, err = doFetchMetadata("https", path)
+	if err == nil {
+		return
+	}
+
+	rc, err = doFetchMetadata("http", path)
+	return
+}
+
+func doFetchMetadata(scheme, path string) (io.ReadCloser, error) {
+	url := fmt.Sprintf("%s://%s?go-get=1", scheme, path)
+	switch scheme {
+	case "https", "http":
+		resp, err := http.Get(url)
+		if err != nil {
+			return nil, fmt.Errorf("failed to access url %q", url)
+		}
+		return resp.Body, nil
+	default:
+		return nil, fmt.Errorf("unknown remote protocol scheme: %q", scheme)
+	}
+}
+
+// parseMetadata fetches and decodes remote metadata for path.
+func parseMetadata(path string) (string, string, string, error) {
+	rc, err := fetchMetadata(path)
+	if err != nil {
+		return "", "", "", err
+	}
+	defer rc.Close()
+
+	imports, err := parseMetaGoImports(rc)
+	if err != nil {
+		return "", "", "", err
+	}
+	match := -1
+	for i, im := range imports {
+		if !strings.HasPrefix(path, im.Prefix) {
+			continue
+		}
+		if match != -1 {
+			return "", "", "", fmt.Errorf("multiple meta tags match import path %q", path)
+		}
+		match = i
+	}
+	if match == -1 {
+		return "", "", "", fmt.Errorf("go-import metadata not found")
+	}
+	return imports[match].Prefix, imports[match].VCS, imports[match].RepoRoot, nil
+}
diff --git a/vendor/github.com/sdboyer/gps/remote_test.go b/vendor/github.com/sdboyer/gps/remote_test.go
new file mode 100644
index 0000000..17de00f
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/remote_test.go
@@ -0,0 +1,478 @@
+package gps
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+	"testing"
+)
+
+func TestDeduceRemotes(t *testing.T) {
+	if testing.Short() {
+		t.Skip("Skipping remote deduction test in short mode")
+	}
+
+	fixtures := []struct {
+		path string
+		want *remoteRepo
+	}{
+		{
+			"github.com/sdboyer/gps",
+			&remoteRepo{
+				Base:   "github.com/sdboyer/gps",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "github.com",
+					Path: "sdboyer/gps",
+				},
+				Schemes: nil,
+				VCS:     []string{"git"},
+			},
+		},
+		{
+			"github.com/sdboyer/gps/foo",
+			&remoteRepo{
+				Base:   "github.com/sdboyer/gps",
+				RelPkg: "foo",
+				CloneURL: &url.URL{
+					Host: "github.com",
+					Path: "sdboyer/gps",
+				},
+				Schemes: nil,
+				VCS:     []string{"git"},
+			},
+		},
+		{
+			"git@github.com:sdboyer/gps",
+			&remoteRepo{
+				Base:   "github.com/sdboyer/gps",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Scheme: "ssh",
+					User:   url.User("git"),
+					Host:   "github.com",
+					Path:   "sdboyer/gps",
+				},
+				Schemes: []string{"ssh"},
+				VCS:     []string{"git"},
+			},
+		},
+		{
+			"https://github.com/sdboyer/gps/foo",
+			&remoteRepo{
+				Base:   "github.com/sdboyer/gps",
+				RelPkg: "foo",
+				CloneURL: &url.URL{
+					Scheme: "https",
+					Host:   "github.com",
+					Path:   "sdboyer/gps",
+				},
+				Schemes: []string{"https"},
+				VCS:     []string{"git"},
+			},
+		},
+		{
+			"https://github.com/sdboyer/gps/foo/bar",
+			&remoteRepo{
+				Base:   "github.com/sdboyer/gps",
+				RelPkg: "foo/bar",
+				CloneURL: &url.URL{
+					Scheme: "https",
+					Host:   "github.com",
+					Path:   "sdboyer/gps",
+				},
+				Schemes: []string{"https"},
+				VCS:     []string{"git"},
+			},
+		},
+		// some invalid github username patterns
+		{
+			"github.com/-sdboyer/gps/foo",
+			nil,
+		},
+		{
+			"github.com/sdboyer-/gps/foo",
+			nil,
+		},
+		{
+			"github.com/sdbo.yer/gps/foo",
+			nil,
+		},
+		{
+			"github.com/sdbo_yer/gps/foo",
+			nil,
+		},
+		{
+			"gopkg.in/sdboyer/gps.v0",
+			&remoteRepo{
+				Base:   "gopkg.in/sdboyer/gps.v0",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "github.com",
+					Path: "sdboyer/gps",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"gopkg.in/sdboyer/gps.v0/foo",
+			&remoteRepo{
+				Base:   "gopkg.in/sdboyer/gps.v0",
+				RelPkg: "foo",
+				CloneURL: &url.URL{
+					Host: "github.com",
+					Path: "sdboyer/gps",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"gopkg.in/sdboyer/gps.v0/foo/bar",
+			&remoteRepo{
+				Base:   "gopkg.in/sdboyer/gps.v0",
+				RelPkg: "foo/bar",
+				CloneURL: &url.URL{
+					Host: "github.com",
+					Path: "sdboyer/gps",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"gopkg.in/yaml.v1",
+			&remoteRepo{
+				Base:   "gopkg.in/yaml.v1",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "github.com",
+					Path: "go-pkg/yaml",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"gopkg.in/yaml.v1/foo/bar",
+			&remoteRepo{
+				Base:   "gopkg.in/yaml.v1",
+				RelPkg: "foo/bar",
+				CloneURL: &url.URL{
+					Host: "github.com",
+					Path: "go-pkg/yaml",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			// gopkg.in only allows specifying major version in import path
+			"gopkg.in/yaml.v1.2",
+			nil,
+		},
+		// IBM hub devops services - fixtures borrowed from go get
+		{
+			"hub.jazz.net/git/user1/pkgname",
+			&remoteRepo{
+				Base:   "hub.jazz.net/git/user1/pkgname",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "hub.jazz.net",
+					Path: "git/user1/pkgname",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule",
+			&remoteRepo{
+				Base:   "hub.jazz.net/git/user1/pkgname",
+				RelPkg: "submodule/submodule/submodule",
+				CloneURL: &url.URL{
+					Host: "hub.jazz.net",
+					Path: "git/user1/pkgname",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"hub.jazz.net",
+			nil,
+		},
+		{
+			"hub2.jazz.net",
+			nil,
+		},
+		{
+			"hub.jazz.net/someotherprefix",
+			nil,
+		},
+		{
+			"hub.jazz.net/someotherprefix/user1/pkgname",
+			nil,
+		},
+		// Spaces are not valid in user names or package names
+		{
+			"hub.jazz.net/git/User 1/pkgname",
+			nil,
+		},
+		{
+			"hub.jazz.net/git/user1/pkg name",
+			nil,
+		},
+		// Dots are not valid in user names
+		{
+			"hub.jazz.net/git/user.1/pkgname",
+			nil,
+		},
+		{
+			"hub.jazz.net/git/user/pkg.name",
+			&remoteRepo{
+				Base:   "hub.jazz.net/git/user/pkg.name",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "hub.jazz.net",
+					Path: "git/user/pkg.name",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		// User names cannot have uppercase letters
+		{
+			"hub.jazz.net/git/USER/pkgname",
+			nil,
+		},
+		{
+			"bitbucket.org/sdboyer/reporoot",
+			&remoteRepo{
+				Base:   "bitbucket.org/sdboyer/reporoot",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "bitbucket.org",
+					Path: "sdboyer/reporoot",
+				},
+				VCS: []string{"git", "hg"},
+			},
+		},
+		{
+			"bitbucket.org/sdboyer/reporoot/foo/bar",
+			&remoteRepo{
+				Base:   "bitbucket.org/sdboyer/reporoot",
+				RelPkg: "foo/bar",
+				CloneURL: &url.URL{
+					Host: "bitbucket.org",
+					Path: "sdboyer/reporoot",
+				},
+				VCS: []string{"git", "hg"},
+			},
+		},
+		{
+			"https://bitbucket.org/sdboyer/reporoot/foo/bar",
+			&remoteRepo{
+				Base:   "bitbucket.org/sdboyer/reporoot",
+				RelPkg: "foo/bar",
+				CloneURL: &url.URL{
+					Scheme: "https",
+					Host:   "bitbucket.org",
+					Path:   "sdboyer/reporoot",
+				},
+				Schemes: []string{"https"},
+				VCS:     []string{"git", "hg"},
+			},
+		},
+		{
+			"launchpad.net/govcstestbzrrepo",
+			&remoteRepo{
+				Base:   "launchpad.net/govcstestbzrrepo",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "launchpad.net",
+					Path: "govcstestbzrrepo",
+				},
+				VCS: []string{"bzr"},
+			},
+		},
+		{
+			"launchpad.net/govcstestbzrrepo/foo/bar",
+			&remoteRepo{
+				Base:   "launchpad.net/govcstestbzrrepo",
+				RelPkg: "foo/bar",
+				CloneURL: &url.URL{
+					Host: "launchpad.net",
+					Path: "govcstestbzrrepo",
+				},
+				VCS: []string{"bzr"},
+			},
+		},
+		{
+			"launchpad.net/repo root",
+			nil,
+		},
+		{
+			"git.launchpad.net/reporoot",
+			&remoteRepo{
+				Base:   "git.launchpad.net/reporoot",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "git.launchpad.net",
+					Path: "reporoot",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"git.launchpad.net/reporoot/foo/bar",
+			&remoteRepo{
+				Base:   "git.launchpad.net/reporoot",
+				RelPkg: "foo/bar",
+				CloneURL: &url.URL{
+					Host: "git.launchpad.net",
+					Path: "reporoot",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"git.launchpad.net/reporoot",
+			&remoteRepo{
+				Base:   "git.launchpad.net/reporoot",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "git.launchpad.net",
+					Path: "reporoot",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"git.launchpad.net/repo root",
+			nil,
+		},
+		{
+			"git.apache.org/package-name.git",
+			&remoteRepo{
+				Base:   "git.apache.org/package-name.git",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "git.apache.org",
+					Path: "package-name.git",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		{
+			"git.apache.org/package-name.git/foo/bar",
+			&remoteRepo{
+				Base:   "git.apache.org/package-name.git",
+				RelPkg: "foo/bar",
+				CloneURL: &url.URL{
+					Host: "git.apache.org",
+					Path: "package-name.git",
+				},
+				VCS: []string{"git"},
+			},
+		},
+		// Vanity imports
+		{
+			"golang.org/x/exp",
+			&remoteRepo{
+				Base:   "golang.org/x/exp",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Scheme: "https",
+					Host:   "go.googlesource.com",
+					Path:   "/exp",
+				},
+				Schemes: []string{"https"},
+				VCS:     []string{"git"},
+			},
+		},
+		{
+			"golang.org/x/exp/inotify",
+			&remoteRepo{
+				Base:   "golang.org/x/exp",
+				RelPkg: "inotify",
+				CloneURL: &url.URL{
+					Scheme: "https",
+					Host:   "go.googlesource.com",
+					Path:   "/exp",
+				},
+				Schemes: []string{"https"},
+				VCS:     []string{"git"},
+			},
+		},
+		{
+			"rsc.io/pdf",
+			&remoteRepo{
+				Base:   "rsc.io/pdf",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Scheme: "https",
+					Host:   "github.com",
+					Path:   "/rsc/pdf",
+				},
+				Schemes: []string{"https"},
+				VCS:     []string{"git"},
+			},
+		},
+		// Regression - gh does allow two-letter usernames
+		{
+			"github.com/kr/pretty",
+			&remoteRepo{
+				Base:   "github.com/kr/pretty",
+				RelPkg: "",
+				CloneURL: &url.URL{
+					Host: "github.com",
+					Path: "kr/pretty",
+				},
+				Schemes: nil,
+				VCS:     []string{"git"},
+			},
+		},
+	}
+
+	for _, fix := range fixtures {
+		got, err := deduceRemoteRepo(fix.path)
+		want := fix.want
+
+		if want == nil {
+			if err == nil {
+				t.Errorf("deduceRemoteRepo(%q): Error expected but not received", fix.path)
+			}
+			continue
+		}
+
+		if err != nil {
+			t.Errorf("deduceRemoteRepo(%q): %v", fix.path, err)
+			continue
+		}
+
+		if got.Base != want.Base {
+			t.Errorf("deduceRemoteRepo(%q): Base was %s, wanted %s", fix.path, got.Base, want.Base)
+		}
+		if got.RelPkg != want.RelPkg {
+			t.Errorf("deduceRemoteRepo(%q): RelPkg was %s, wanted %s", fix.path, got.RelPkg, want.RelPkg)
+		}
+		if !reflect.DeepEqual(got.CloneURL, want.CloneURL) {
+			// misspelling things is cool when it makes columns line up
+			t.Errorf("deduceRemoteRepo(%q): CloneURL disagreement:\n(GOT) %s\n(WNT) %s", fix.path, ufmt(got.CloneURL), ufmt(want.CloneURL))
+		}
+		if !reflect.DeepEqual(got.VCS, want.VCS) {
+			t.Errorf("deduceRemoteRepo(%q): VCS was %s, wanted %s", fix.path, got.VCS, want.VCS)
+		}
+		if !reflect.DeepEqual(got.Schemes, want.Schemes) {
+			t.Errorf("deduceRemoteRepo(%q): Schemes was %s, wanted %s", fix.path, got.Schemes, want.Schemes)
+		}
+	}
+}
+
+// borrow from stdlib
+// more useful string for debugging than fmt's struct printer
+func ufmt(u *url.URL) string {
+	var user, pass interface{}
+	if u.User != nil {
+		user = u.User.Username()
+		if p, ok := u.User.Password(); ok {
+			pass = p
+		}
+	}
+	return fmt.Sprintf("host=%q, path=%q, opaque=%q, scheme=%q, user=%#v, pass=%#v, rawpath=%q, rawq=%q, frag=%q",
+		u.Host, u.Path, u.Opaque, u.Scheme, user, pass, u.RawPath, u.RawQuery, u.Fragment)
+}
diff --git a/vendor/github.com/sdboyer/gps/remove_go16.go b/vendor/github.com/sdboyer/gps/remove_go16.go
new file mode 100644
index 0000000..8c7844d
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/remove_go16.go
@@ -0,0 +1,38 @@
+// +build !go1.7
+
+package gps
+
+import (
+	"os"
+	"path/filepath"
+	"runtime"
+)
+
+// removeAll removes path and any children it contains. It deals correctly with
+// removal on Windows where, prior to Go 1.7, there were issues when files were
+// set to read-only.
+func removeAll(path string) error {
+	// Only need special handling for windows
+	if runtime.GOOS != "windows" {
+		return os.RemoveAll(path)
+	}
+
+	// Simple case: if Remove works, we're done.
+	err := os.Remove(path)
+	if err == nil || os.IsNotExist(err) {
+		return nil
+	}
+
+	// make sure all files are writable so we can delete them
+	return filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			// walk gave us some error, give it back.
+			return err
+		}
+		mode := info.Mode()
+		if mode|0200 == mode {
+			return nil
+		}
+		return os.Chmod(path, mode|0200)
+	})
+}
diff --git a/vendor/github.com/sdboyer/gps/remove_go17.go b/vendor/github.com/sdboyer/gps/remove_go17.go
new file mode 100644
index 0000000..59c19a6
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/remove_go17.go
@@ -0,0 +1,11 @@
+// +build go1.7
+
+package gps
+
+import "os"
+
+// go1.7 and later deal with the file perms issue in os.RemoveAll(), so our
+// workaround is no longer necessary.
+func removeAll(path string) error {
+	return os.RemoveAll(path)
+}
diff --git a/vendor/github.com/sdboyer/gps/result.go b/vendor/github.com/sdboyer/gps/result.go
new file mode 100644
index 0000000..e601de9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/result.go
@@ -0,0 +1,73 @@
+package gps
+
+import (
+	"fmt"
+	"os"
+	"path"
+	"path/filepath"
+)
+
+// A Solution is returned by a solver run. It is mostly just a Lock, with some
+// additional methods that report information about the solve run.
+type Solution interface {
+	Lock
+	Attempts() int
+}
+
+type solution struct {
+	// A list of the projects selected by the solver.
+	p []LockedProject
+
+	// The number of solutions that were attempted
+	att int
+
+	// The hash digest of the input opts
+	hd []byte
+}
+
+// CreateVendorTree takes a basedir and a Lock, and exports all the projects
+// listed in the lock to the appropriate target location within the basedir.
+//
+// It requires a SourceManager to do the work, and takes a flag indicating
+// whether or not to strip vendor directories contained in the exported
+// dependencies.
+func CreateVendorTree(basedir string, l Lock, sm SourceManager, sv bool) error {
+	err := os.MkdirAll(basedir, 0777)
+	if err != nil {
+		return err
+	}
+
+	// TODO(sdboyer) parallelize
+	for _, p := range l.Projects() {
+		to := path.Join(basedir, string(p.Ident().ProjectRoot))
+
+		err := os.MkdirAll(to, 0777)
+		if err != nil {
+			return err
+		}
+
+		err = sm.ExportProject(p.Ident().ProjectRoot, p.Version(), to)
+		if err != nil {
+			removeAll(basedir)
+			return fmt.Errorf("Error while exporting %s: %s", p.Ident().ProjectRoot, err)
+		}
+		if sv {
+			filepath.Walk(to, stripVendor)
+		}
+		// TODO(sdboyer) dump version metadata file
+	}
+
+	return nil
+}
+
+func (r solution) Projects() []LockedProject {
+	return r.p
+}
+
+func (r solution) Attempts() int {
+	return r.att
+}
+
+func (r solution) InputHash() []byte {
+	return r.hd
+}
diff --git a/vendor/github.com/sdboyer/gps/result_test.go b/vendor/github.com/sdboyer/gps/result_test.go
new file mode 100644
index 0000000..1aed83b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/result_test.go
@@ -0,0 +1,107 @@
+package gps
+
+import (
+	"os"
+	"path"
+	"testing"
+)
+
+var basicResult solution
+var kub atom
+
+func pi(n string) ProjectIdentifier {
+	return ProjectIdentifier{
+		ProjectRoot: ProjectRoot(n),
+	}
+}
+
+func init() {
+	basicResult = solution{
+		att: 1,
+		p: []LockedProject{
+			pa2lp(atom{
+				id: pi("github.com/sdboyer/testrepo"),
+				v:  NewBranch("master").Is(Revision("4d59fb584b15a94d7401e356d2875c472d76ef45")),
+			}, nil),
+			pa2lp(atom{
+				id: pi("github.com/Masterminds/VCSTestRepo"),
+				v:  NewVersion("1.0.0").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+			}, nil),
+		},
+	}
+
+	// just in case something needs punishing, kubernetes is happy to oblige
+	kub = atom{
+		id: pi("github.com/kubernetes/kubernetes"),
+		v:  NewVersion("1.0.0").Is(Revision("528f879e7d3790ea4287687ef0ab3f2a01cc2718")),
+	}
+}
+
+func TestResultCreateVendorTree(t *testing.T) {
+	// This test is a bit slow, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping vendor tree creation test in short mode")
+	}
+
+	r := basicResult
+
+	tmp := path.Join(os.TempDir(), "vsolvtest")
+	os.RemoveAll(tmp)
+
+	sm, err := NewSourceManager(naiveAnalyzer{}, path.Join(tmp, "cache"), false)
+	if err != nil {
+		t.Errorf("NewSourceManager errored unexpectedly: %q", err)
+	}
+
+	err = CreateVendorTree(path.Join(tmp, "export"), r, sm, true)
+	if err != nil {
+		t.Errorf("Unexpected error while creating vendor tree: %s", err)
+	}
+
+	// TODO(sdboyer) add more checks
+}
+
+func BenchmarkCreateVendorTree(b *testing.B) {
+	// We're fs-bound here, so restrict to single parallelism
+	b.SetParallelism(1)
+
+	r := basicResult
+	tmp := path.Join(os.TempDir(), "vsolvtest")
+
+	clean := true
+	sm, err := NewSourceManager(naiveAnalyzer{}, path.Join(tmp, "cache"), true)
+	if err != nil {
+		b.Errorf("NewSourceManager errored unexpectedly: %q", err)
+		clean = false
+	}
+
+	// Prefetch the projects before timer starts
+	for _, lp := range r.p {
+		_, _, err := sm.GetProjectInfo(lp.Ident().ProjectRoot, lp.Version())
+		if err != nil {
+			b.Errorf("failed getting project info during prefetch: %s", err)
+			clean = false
+		}
+	}
+
+	if clean {
+		b.ResetTimer()
+		b.StopTimer()
+		exp := path.Join(tmp, "export")
+		for i := 0; i < b.N; i++ {
+			// Order the loop this way to make it easy to disable final cleanup, to
+			// ease manual inspection
+			os.RemoveAll(exp)
+			b.StartTimer()
+			err = CreateVendorTree(exp, r, sm, true)
+			b.StopTimer()
+			if err != nil {
+				b.Errorf("unexpected error after %v iterations: %s", i, err)
+				break
+			}
+		}
+	}
+
+	sm.Release()
+	os.RemoveAll(tmp) // comment this to leave temp dir behind for inspection
+}
diff --git a/vendor/github.com/sdboyer/gps/satisfy.go b/vendor/github.com/sdboyer/gps/satisfy.go
new file mode 100644
index 0000000..8c99f47
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/satisfy.go
@@ -0,0 +1,316 @@
+package gps
+
+// checkProject performs all constraint checks on a new project (with packages)
+// that we want to select. It determines if selecting the atom would result in
+// a state where all solver requirements are still satisfied.
+func (s *solver) checkProject(a atomWithPackages) error {
+	pa := a.a
+	if nilpa == pa {
+		// This shouldn't be able to happen, but if it does, it unequivocally
+		// indicates a logical bug somewhere, so blowing up is preferable
+		panic("canary - checking version of empty ProjectAtom")
+	}
+
+	if err := s.checkAtomAllowable(pa); err != nil {
+		s.logSolve(err)
+		return err
+	}
+
+	if err := s.checkRequiredPackagesExist(a); err != nil {
+		s.logSolve(err)
+		return err
+	}
+
+	deps, err := s.getImportsAndConstraintsOf(a)
+	if err != nil {
+		// An err here would be from the package fetcher; pass it straight back
+		// TODO(sdboyer) can we logSolve this?
+		return err
+	}
+
+	for _, dep := range deps {
+		if err := s.checkIdentMatches(a, dep); err != nil {
+			s.logSolve(err)
+			return err
+		}
+		if err := s.checkDepsConstraintsAllowable(a, dep); err != nil {
+			s.logSolve(err)
+			return err
+		}
+		if err := s.checkDepsDisallowsSelected(a, dep); err != nil {
+			s.logSolve(err)
+			return err
+		}
+		// TODO(sdboyer) decide how to refactor in order to re-enable this. Checking for
+		// revision existence is important...but kinda obnoxious.
+		//if err := s.checkRevisionExists(a, dep); err != nil {
+		//s.logSolve(err)
+		//return err
+		//}
+		if err := s.checkPackageImportsFromDepExist(a, dep); err != nil {
+			s.logSolve(err)
+			return err
+		}
+
+		// TODO(sdboyer) add check that fails if adding this atom would create a loop
+	}
+
+	return nil
+}
+
+// checkPackages performs all constraint checks for new packages being added to
+// an already-selected project. It determines if selecting the packages would
+// result in a state where all solver requirements are still satisfied.
+func (s *solver) checkPackage(a atomWithPackages) error {
+	if nilpa == a.a {
+		// This shouldn't be able to happen, but if it does, it unequivocally
+		// indicates a logical bug somewhere, so blowing up is preferable
+		panic("canary - checking version of empty ProjectAtom")
+	}
+
+	// The base atom was already validated, so we can skip the
+	// checkAtomAllowable step.
+	deps, err := s.getImportsAndConstraintsOf(a)
+	if err != nil {
+		// An err here would be from the package fetcher; pass it straight back
+		// TODO(sdboyer) can we logSolve this?
+		return err
+	}
+
+	for _, dep := range deps {
+		if err := s.checkIdentMatches(a, dep); err != nil {
+			s.logSolve(err)
+			return err
+		}
+		if err := s.checkDepsConstraintsAllowable(a, dep); err != nil {
+			s.logSolve(err)
+			return err
+		}
+		if err := s.checkDepsDisallowsSelected(a, dep); err != nil {
+			s.logSolve(err)
+			return err
+		}
+		// TODO(sdboyer) decide how to refactor in order to re-enable this. Checking for
+		// revision existence is important...but kinda obnoxious.
+		//if err := s.checkRevisionExists(a, dep); err != nil {
+		//s.logSolve(err)
+		//return err
+		//}
+		if err := s.checkPackageImportsFromDepExist(a, dep); err != nil {
+			s.logSolve(err)
+			return err
+		}
+	}
+
+	return nil
+}
+
+// checkAtomAllowable ensures that an atom itself is acceptable with respect to
+// the constraints established by the current solution.
+func (s *solver) checkAtomAllowable(pa atom) error {
+	constraint := s.sel.getConstraint(pa.id)
+	if s.b.matches(pa.id, constraint, pa.v) {
+		return nil
+	}
+	// TODO(sdboyer) collect constraint failure reason (wait...aren't we, below?)
+
+	deps := s.sel.getDependenciesOn(pa.id)
+	var failparent []dependency
+	for _, dep := range deps {
+		if !s.b.matches(pa.id, dep.dep.Constraint, pa.v) {
+			s.fail(dep.depender.id)
+			failparent = append(failparent, dep)
+		}
+	}
+
+	err := &versionNotAllowedFailure{
+		goal:       pa,
+		failparent: failparent,
+		c:          constraint,
+	}
+
+	return err
+}
+
+// checkRequiredPackagesExist ensures that all required packages enumerated by
+// existing dependencies on this atom are actually present in the atom.
+func (s *solver) checkRequiredPackagesExist(a atomWithPackages) error {
+	ptree, err := s.b.listPackages(a.a.id, a.a.v)
+	if err != nil {
+		// TODO(sdboyer) handle this more gracefully
+		return err
+	}
+
+	deps := s.sel.getDependenciesOn(a.a.id)
+	fp := make(map[string]errDeppers)
+	// We inspect these in a bit of a roundabout way, in order to incrementally
+	// build up the failure we'd return if there is, indeed, a missing package.
+	// TODO(sdboyer) rechecking all of these every time is wasteful. Is there a shortcut?
+	for _, dep := range deps {
+		for _, pkg := range dep.dep.pl {
+			if errdep, seen := fp[pkg]; seen {
+				errdep.deppers = append(errdep.deppers, dep.depender)
+				fp[pkg] = errdep
+			} else {
+				perr, has := ptree.Packages[pkg]
+				if !has || perr.Err != nil {
+					fp[pkg] = errDeppers{
+						err:     perr.Err,
+						deppers: []atom{dep.depender},
+					}
+				}
+			}
+		}
+	}
+
+	if len(fp) > 0 {
+		return &checkeeHasProblemPackagesFailure{
+			goal:    a.a,
+			failpkg: fp,
+		}
+	}
+	return nil
+}
+
+// checkDepsConstraintsAllowable checks that the constraints of an atom on a
+// given dep are valid with respect to existing constraints.
+func (s *solver) checkDepsConstraintsAllowable(a atomWithPackages, cdep completeDep) error {
+	dep := cdep.ProjectConstraint
+	constraint := s.sel.getConstraint(dep.Ident)
+	// Ensure the constraint expressed by the dep has at least some possible
+	// intersection with the intersection of existing constraints.
+	if s.b.matchesAny(dep.Ident, constraint, dep.Constraint) {
+		return nil
+	}
+
+	siblings := s.sel.getDependenciesOn(dep.Ident)
+	// No admissible versions - visit all siblings and identify the disagreement(s)
+	var failsib []dependency
+	var nofailsib []dependency
+	for _, sibling := range siblings {
+		if !s.b.matchesAny(dep.Ident, sibling.dep.Constraint, dep.Constraint) {
+			s.fail(sibling.depender.id)
+			failsib = append(failsib, sibling)
+		} else {
+			nofailsib = append(nofailsib, sibling)
+		}
+	}
+
+	return &disjointConstraintFailure{
+		goal:      dependency{depender: a.a, dep: cdep},
+		failsib:   failsib,
+		nofailsib: nofailsib,
+		c:         constraint,
+	}
+}
+
+// checkDepsDisallowsSelected ensures that an atom's constraints on a particular
+// dep are not incompatible with the version of that dep that's already been
+// selected.
+func (s *solver) checkDepsDisallowsSelected(a atomWithPackages, cdep completeDep) error {
+	dep := cdep.ProjectConstraint
+	selected, exists := s.sel.selected(dep.Ident)
+	if exists && !s.b.matches(dep.Ident, dep.Constraint, selected.a.v) {
+		s.fail(dep.Ident)
+
+		return &constraintNotAllowedFailure{
+			goal: dependency{depender: a.a, dep: cdep},
+			v:    selected.a.v,
+		}
+	}
+	return nil
+}
+
+// checkIdentMatches ensures that the LocalName of a dep introduced by an atom,
+// has the same NetworkName as what's already been selected (assuming anything's
+// been selected).
+//
+// In other words, this ensures that the solver never simultaneously selects two
+// identifiers with the same local name, but that disagree about where their
+// network source is.
+func (s *solver) checkIdentMatches(a atomWithPackages, cdep completeDep) error {
+	dep := cdep.ProjectConstraint
+	if cur, exists := s.names[dep.Ident.ProjectRoot]; exists {
+		if cur != dep.Ident.netName() {
+			deps := s.sel.getDependenciesOn(a.a.id)
+			// Fail all the other deps, as there's no way atom can ever be
+			// compatible with them
+			for _, d := range deps {
+				s.fail(d.depender.id)
+			}
+
+			return &sourceMismatchFailure{
+				shared:   dep.Ident.ProjectRoot,
+				sel:      deps,
+				current:  cur,
+				mismatch: dep.Ident.netName(),
+				prob:     a.a,
+			}
+		}
+	}
+
+	return nil
+}
+
+// checkPackageImportsFromDepExist ensures that, if the dep is already selected,
+// the newly-required set of packages being placed on it exist and are valid.
+func (s *solver) checkPackageImportsFromDepExist(a atomWithPackages, cdep completeDep) error {
+	sel, is := s.sel.selected(cdep.ProjectConstraint.Ident)
+	if !is {
+		// dep is not already selected; nothing to do
+		return nil
+	}
+
+	ptree, err := s.b.listPackages(sel.a.id, sel.a.v)
+	if err != nil {
+		// TODO(sdboyer) handle this more gracefully
+		return err
+	}
+
+	e := &depHasProblemPackagesFailure{
+		goal: dependency{
+			depender: a.a,
+			dep:      cdep,
+		},
+		v:    sel.a.v,
+		prob: make(map[string]error),
+	}
+
+	for _, pkg := range cdep.pl {
+		perr, has := ptree.Packages[pkg]
+		if !has || perr.Err != nil {
+			e.pl = append(e.pl, pkg)
+			if has {
+				e.prob[pkg] = perr.Err
+			}
+		}
+	}
+
+	if len(e.pl) > 0 {
+		return e
+	}
+	return nil
+}
+
+// checkRevisionExists ensures that if a dependency is constrained by a
+// revision, that that revision actually exists.
+func (s *solver) checkRevisionExists(a atomWithPackages, cdep completeDep) error {
+	r, isrev := cdep.Constraint.(Revision)
+	if !isrev {
+		// Constraint is not a revision; nothing to do
+		return nil
+	}
+
+	present, _ := s.b.revisionPresentIn(cdep.Ident, r)
+	if present {
+		return nil
+	}
+
+	return &nonexistentRevisionFailure{
+		goal: dependency{
+			depender: a.a,
+			dep:      cdep,
+		},
+		r: r,
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/selection.go b/vendor/github.com/sdboyer/gps/selection.go
new file mode 100644
index 0000000..6d84643
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/selection.go
@@ -0,0 +1,198 @@
+package gps
+
+type selection struct {
+	projects []selected
+	deps     map[ProjectIdentifier][]dependency
+	sm       sourceBridge
+}
+
+type selected struct {
+	a     atomWithPackages
+	first bool
+}
+
+func (s *selection) getDependenciesOn(id ProjectIdentifier) []dependency {
+	if deps, exists := s.deps[id]; exists {
+		return deps
+	}
+
+	return nil
+}
+
+// pushSelection pushes a new atomWithPackages onto the selection stack, along
+// with an indicator as to whether this selection indicates a new project *and*
+// packages, or merely some new packages on a project that was already selected.
+func (s *selection) pushSelection(a atomWithPackages, first bool) {
+	s.projects = append(s.projects, selected{
+		a:     a,
+		first: first,
+	})
+}
+
+// popSelection removes and returns the last atomWithPackages from the selection
+// stack, along with an indication of whether that element was the first from
+// that project - that is, if it represented an addition of both a project and
+// one or more packages to the overall selection.
+func (s *selection) popSelection() (atomWithPackages, bool) {
+	var sel selected
+	sel, s.projects = s.projects[len(s.projects)-1], s.projects[:len(s.projects)-1]
+	return sel.a, sel.first
+}
+
+func (s *selection) pushDep(dep dependency) {
+	s.deps[dep.dep.Ident] = append(s.deps[dep.dep.Ident], dep)
+}
+
+func (s *selection) popDep(id ProjectIdentifier) (dep dependency) {
+	deps := s.deps[id]
+	dep, s.deps[id] = deps[len(deps)-1], deps[:len(deps)-1]
+	return dep
+}
+
+func (s *selection) depperCount(id ProjectIdentifier) int {
+	return len(s.deps[id])
+}
+
+func (s *selection) setDependenciesOn(id ProjectIdentifier, deps []dependency) {
+	s.deps[id] = deps
+}
+
+// Compute a list of the unique packages within the given ProjectIdentifier that
+// have dependers, and the number of dependers they have.
+func (s *selection) getRequiredPackagesIn(id ProjectIdentifier) map[string]int {
+	// TODO(sdboyer) this is horribly inefficient to do on the fly; we need a method to
+	// precompute it on pushing a new dep, and preferably with an immut
+	// structure so that we can pop with zero cost.
+	uniq := make(map[string]int)
+	for _, dep := range s.deps[id] {
+		for _, pkg := range dep.dep.pl {
+			if count, has := uniq[pkg]; has {
+				count++
+				uniq[pkg] = count
+			} else {
+				uniq[pkg] = 1
+			}
+		}
+	}
+
+	return uniq
+}
+
+// Compute a list of the unique packages within the given ProjectIdentifier that
+// are currently selected, and the number of times each package has been
+// independently selected.
+func (s *selection) getSelectedPackagesIn(id ProjectIdentifier) map[string]int {
+	// TODO(sdboyer) this is horribly inefficient to do on the fly; we need a method to
+	// precompute it on pushing a new dep, and preferably with an immut
+	// structure so that we can pop with zero cost.
+	uniq := make(map[string]int)
+	for _, p := range s.projects {
+		if p.a.a.id.eq(id) {
+			for _, pkg := range p.a.pl {
+				if count, has := uniq[pkg]; has {
+					count++
+					uniq[pkg] = count
+				} else {
+					uniq[pkg] = 1
+				}
+			}
+		}
+	}
+
+	return uniq
+}
+
+func (s *selection) getConstraint(id ProjectIdentifier) Constraint {
+	deps, exists := s.deps[id]
+	if !exists || len(deps) == 0 {
+		return any
+	}
+
+	// TODO(sdboyer) recomputing this sucks and is quite wasteful. Precompute/cache it
+	// on changes to the constraint set, instead.
+
+	// The solver itself is expected to maintain the invariant that all the
+	// constraints kept here collectively admit a non-empty set of versions. We
+	// assume this is the case here while assembling a composite constraint.
+
+	// Start with the open set
+	var ret Constraint = any
+	for _, dep := range deps {
+		ret = s.sm.intersect(id, ret, dep.dep.Constraint)
+	}
+
+	return ret
+}
+
+// selected checks to see if the given ProjectIdentifier has been selected, and
+// if so, returns the corresponding atomWithPackages.
+//
+// It walks the projects selection list from front to back and returns the first
+// match it finds, which means it will always and only return the base selection
+// of the project, without any additional package selections that may or may not
+// have happened later.
+func (s *selection) selected(id ProjectIdentifier) (atomWithPackages, bool) {
+	for _, p := range s.projects {
+		if p.a.a.id.eq(id) {
+			return p.a, true
+		}
+	}
+
+	return atomWithPackages{a: nilpa}, false
+}
+
+type unselected struct {
+	sl  []bimodalIdentifier
+	cmp func(i, j int) bool
+}
+
+func (u unselected) Len() int {
+	return len(u.sl)
+}
+
+func (u unselected) Less(i, j int) bool {
+	return u.cmp(i, j)
+}
+
+func (u unselected) Swap(i, j int) {
+	u.sl[i], u.sl[j] = u.sl[j], u.sl[i]
+}
+
+func (u *unselected) Push(x interface{}) {
+	u.sl = append(u.sl, x.(bimodalIdentifier))
+}
+
+func (u *unselected) Pop() (v interface{}) {
+	v, u.sl = u.sl[len(u.sl)-1], u.sl[:len(u.sl)-1]
+	return v
+}
+
+// remove takes a ProjectIdentifier out of the priority queue, if present.
+//
+// There are, generally, two ways this gets called: to remove the unselected
+// item from the front of the queue while that item is being unselected, and
+// during backtracking, when an item becomes unnecessary because the item that
+// induced it was popped off.
+//
+// The worst case for both of these is O(n), but in practice the first case is
+// be O(1), as we iterate the queue from front to back.
+func (u *unselected) remove(bmi bimodalIdentifier) {
+	for k, pi := range u.sl {
+		if pi.id.eq(bmi.id) {
+			// Simple slice comparison - assume they're both sorted the same
+			for k, pkg := range pi.pl {
+				if bmi.pl[k] != pkg {
+					break
+				}
+			}
+
+			if k == len(u.sl)-1 {
+				// if we're on the last element, just pop, no splice
+				u.sl = u.sl[:len(u.sl)-1]
+			} else {
+				u.sl = append(u.sl[:k], u.sl[k+1:]...)
+			}
+			break
+		}
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/solve_basic_test.go b/vendor/github.com/sdboyer/gps/solve_basic_test.go
new file mode 100644
index 0000000..055ecc8
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solve_basic_test.go
@@ -0,0 +1,1362 @@
+package gps
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+
+	"github.com/Masterminds/semver"
+)
+
+var regfrom = regexp.MustCompile(`^(\w*) from (\w*) ([0-9\.]*)`)
+
+// nvSplit splits an "info" string on " " into the pair of name and
+// version/constraint, and returns each individually.
+//
+// This is for narrow use - panics if there are less than two resulting items in
+// the slice.
+func nvSplit(info string) (id ProjectIdentifier, version string) {
+	if strings.Contains(info, " from ") {
+		parts := regfrom.FindStringSubmatch(info)
+		info = parts[1] + " " + parts[3]
+		id.NetworkName = parts[2]
+	}
+
+	s := strings.SplitN(info, " ", 2)
+	if len(s) < 2 {
+		panic(fmt.Sprintf("Malformed name/version info string '%s'", info))
+	}
+
+	id.ProjectRoot, version = ProjectRoot(s[0]), s[1]
+	if id.NetworkName == "" {
+		id.NetworkName = string(id.ProjectRoot)
+	}
+	return
+}
+
+// nvrSplit splits an "info" string on " " into the triplet of name,
+// version/constraint, and revision, and returns each individually.
+//
+// It will work fine if only name and version/constraint are provided.
+//
+// This is for narrow use - panics if there are less than two resulting items in
+// the slice.
+func nvrSplit(info string) (id ProjectIdentifier, version string, revision Revision) {
+	if strings.Contains(info, " from ") {
+		parts := regfrom.FindStringSubmatch(info)
+		info = parts[1] + " " + parts[3]
+		id.NetworkName = parts[2]
+	}
+
+	s := strings.SplitN(info, " ", 3)
+	if len(s) < 2 {
+		panic(fmt.Sprintf("Malformed name/version info string '%s'", info))
+	}
+
+	id.ProjectRoot, version = ProjectRoot(s[0]), s[1]
+	if id.NetworkName == "" {
+		id.NetworkName = string(id.ProjectRoot)
+	}
+
+	if len(s) == 3 {
+		revision = Revision(s[2])
+	}
+	return
+}
+
+// mkAtom splits the input string on a space, and uses the first two elements as
+// the project identifier and version, respectively.
+//
+// The version segment may have a leading character indicating the type of
+// version to create:
+//
+//  p: create a "plain" (non-semver) version.
+//  b: create a branch version.
+//  r: create a revision.
+//
+// No prefix is assumed to indicate a semver version.
+//
+// If a third space-delimited element is provided, it will be interepreted as a
+// revision, and used as the underlying version in a PairedVersion. No prefix
+// should be provided in this case. It is an error (and will panic) to try to
+// pass a revision with an underlying revision.
+func mkAtom(info string) atom {
+	id, ver, rev := nvrSplit(info)
+
+	var v Version
+	switch ver[0] {
+	case 'r':
+		if rev != "" {
+			panic("Cannot pair a revision with a revision")
+		}
+		v = Revision(ver[1:])
+	case 'p':
+		v = NewVersion(ver[1:])
+	case 'b':
+		v = NewBranch(ver[1:])
+	default:
+		_, err := semver.NewVersion(ver)
+		if err != nil {
+			// don't want to allow bad test data at this level, so just panic
+			panic(fmt.Sprintf("Error when converting '%s' into semver: %s", ver, err))
+		}
+		v = NewVersion(ver)
+	}
+
+	if rev != "" {
+		v = v.(UnpairedVersion).Is(rev)
+	}
+
+	return atom{
+		id: id,
+		v:  v,
+	}
+}
+
+// mkPDep splits the input string on a space, and uses the first two elements
+// as the project identifier and constraint body, respectively.
+//
+// The constraint body may have a leading character indicating the type of
+// version to create:
+//
+//  p: create a "plain" (non-semver) version.
+//  b: create a branch version.
+//  r: create a revision.
+//
+// If no leading character is used, a semver constraint is assumed.
+func mkPDep(info string) ProjectConstraint {
+	id, ver, rev := nvrSplit(info)
+
+	var c Constraint
+	switch ver[0] {
+	case 'r':
+		c = Revision(ver[1:])
+	case 'p':
+		c = NewVersion(ver[1:])
+	case 'b':
+		c = NewBranch(ver[1:])
+	default:
+		// Without one of those leading characters, we know it's a proper semver
+		// expression, so use the other parser that doesn't look for a rev
+		rev = ""
+		id, ver = nvSplit(info)
+		var err error
+		c, err = NewSemverConstraint(ver)
+		if err != nil {
+			// don't want bad test data at this level, so just panic
+			panic(fmt.Sprintf("Error when converting '%s' into semver constraint: %s (full info: %s)", ver, err, info))
+		}
+	}
+
+	// There's no practical reason that a real tool would need to produce a
+	// constraint that's a PairedVersion, but it is a possibility admitted by the
+	// system, so we at least allow for it in our testing harness.
+	if rev != "" {
+		// Of course, this *will* panic if the predicate is a revision or a
+		// semver constraint, neither of which implement UnpairedVersion. This
+		// is as intended, to prevent bad data from entering the system.
+		c = c.(UnpairedVersion).Is(rev)
+	}
+
+	return ProjectConstraint{
+		Ident:      id,
+		Constraint: c,
+	}
+}
+
+// A depspec is a fixture representing all the information a SourceManager would
+// ordinarily glean directly from interrogating a repository.
+type depspec struct {
+	n       ProjectRoot
+	v       Version
+	deps    []ProjectConstraint
+	devdeps []ProjectConstraint
+	pkgs    []tpkg
+}
+
+// mkDepspec creates a depspec by processing a series of strings, each of which
+// contains an identiifer and version information.
+//
+// The first string is broken out into the name and version of the package being
+// described - see the docs on mkAtom for details. subsequent strings are
+// interpreted as dep constraints of that dep at that version. See the docs on
+// mkPDep for details.
+//
+// If a string other than the first includes a "(dev) " prefix, it will be
+// treated as a test-only dependency.
+func mkDepspec(pi string, deps ...string) depspec {
+	pa := mkAtom(pi)
+	if string(pa.id.ProjectRoot) != pa.id.NetworkName {
+		panic("alternate source on self makes no sense")
+	}
+
+	ds := depspec{
+		n: pa.id.ProjectRoot,
+		v: pa.v,
+	}
+
+	for _, dep := range deps {
+		var sl *[]ProjectConstraint
+		if strings.HasPrefix(dep, "(dev) ") {
+			dep = strings.TrimPrefix(dep, "(dev) ")
+			sl = &ds.devdeps
+		} else {
+			sl = &ds.deps
+		}
+
+		*sl = append(*sl, mkPDep(dep))
+	}
+
+	return ds
+}
+
+// mklock makes a fixLock, suitable to act as a lock file
+func mklock(pairs ...string) fixLock {
+	l := make(fixLock, 0)
+	for _, s := range pairs {
+		pa := mkAtom(s)
+		l = append(l, NewLockedProject(pa.id.ProjectRoot, pa.v, pa.id.netName(), nil))
+	}
+
+	return l
+}
+
+// mkrevlock makes a fixLock, suitable to act as a lock file, with only a name
+// and a rev
+func mkrevlock(pairs ...string) fixLock {
+	l := make(fixLock, 0)
+	for _, s := range pairs {
+		pa := mkAtom(s)
+		l = append(l, NewLockedProject(pa.id.ProjectRoot, pa.v.(PairedVersion).Underlying(), pa.id.netName(), nil))
+	}
+
+	return l
+}
+
+// mksolution makes a result set
+func mksolution(pairs ...string) map[string]Version {
+	m := make(map[string]Version)
+	for _, pair := range pairs {
+		a := mkAtom(pair)
+		// TODO(sdboyer) identifierify
+		m[string(a.id.ProjectRoot)] = a.v
+	}
+
+	return m
+}
+
+// computeBasicReachMap takes a depspec and computes a reach map which is
+// identical to the explicit depgraph.
+//
+// Using a reachMap here is overkill for what the basic fixtures actually need,
+// but we use it anyway for congruence with the more general cases.
+func computeBasicReachMap(ds []depspec) reachMap {
+	rm := make(reachMap)
+
+	for k, d := range ds {
+		n := string(d.n)
+		lm := map[string][]string{
+			n: nil,
+		}
+		v := d.v
+		if k == 0 {
+			// Put the root in with a nil rev, to accommodate the solver
+			v = nil
+		}
+		rm[pident{n: d.n, v: v}] = lm
+
+		for _, dep := range d.deps {
+			lm[n] = append(lm[n], string(dep.Ident.ProjectRoot))
+		}
+
+		// first is root
+		if k == 0 {
+			for _, dep := range d.devdeps {
+				lm[n] = append(lm[n], string(dep.Ident.ProjectRoot))
+			}
+		}
+	}
+
+	return rm
+}
+
+type pident struct {
+	n ProjectRoot
+	v Version
+}
+
+type specfix interface {
+	name() string
+	specs() []depspec
+	maxTries() int
+	expectErrs() []string
+	solution() map[string]Version
+}
+
+// A basicFixture is a declarative test fixture that can cover a wide variety of
+// solver cases. All cases, however, maintain one invariant: package == project.
+// There are no subpackages, and so it is impossible for them to trigger or
+// require bimodal solving.
+//
+// This type is separate from bimodalFixture in part for legacy reasons - many
+// of these were adapted from similar tests in dart's pub lib, where there is no
+// such thing as "bimodal solving".
+//
+// But it's also useful to keep them separate because bimodal solving involves
+// considerably more complexity than simple solving, both in terms of fixture
+// declaration and actual solving mechanics. Thus, we gain a lot of value for
+// contributors and maintainers by keeping comprehension costs relatively low
+// while still covering important cases.
+type basicFixture struct {
+	// name of this fixture datum
+	n string
+	// depspecs. always treat first as root
+	ds []depspec
+	// results; map of name/version pairs
+	r map[string]Version
+	// max attempts the solver should need to find solution. 0 means no limit
+	maxAttempts int
+	// Use downgrade instead of default upgrade sorter
+	downgrade bool
+	// lock file simulator, if one's to be used at all
+	l fixLock
+	// projects expected to have errors, if any
+	errp []string
+	// request up/downgrade to all projects
+	changeall bool
+}
+
+func (f basicFixture) name() string {
+	return f.n
+}
+
+func (f basicFixture) specs() []depspec {
+	return f.ds
+}
+
+func (f basicFixture) maxTries() int {
+	return f.maxAttempts
+}
+
+func (f basicFixture) expectErrs() []string {
+	return f.errp
+}
+
+func (f basicFixture) solution() map[string]Version {
+	return f.r
+}
+
+// A table of basicFixtures, used in the basic solving test set.
+var basicFixtures = map[string]basicFixture{
+	// basic fixtures
+	"no dependencies": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0"),
+		},
+		r: mksolution(),
+	},
+	"simple dependency tree": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 1.0.0", "aa 1.0.0", "ab 1.0.0"),
+			mkDepspec("aa 1.0.0"),
+			mkDepspec("ab 1.0.0"),
+			mkDepspec("b 1.0.0", "ba 1.0.0", "bb 1.0.0"),
+			mkDepspec("ba 1.0.0"),
+			mkDepspec("bb 1.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"aa 1.0.0",
+			"ab 1.0.0",
+			"b 1.0.0",
+			"ba 1.0.0",
+			"bb 1.0.0",
+		),
+	},
+	"shared dependency with overlapping constraints": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 1.0.0", "shared >=2.0.0, <4.0.0"),
+			mkDepspec("b 1.0.0", "shared >=3.0.0, <5.0.0"),
+			mkDepspec("shared 2.0.0"),
+			mkDepspec("shared 3.0.0"),
+			mkDepspec("shared 3.6.9"),
+			mkDepspec("shared 4.0.0"),
+			mkDepspec("shared 5.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+			"shared 3.6.9",
+		),
+	},
+	"downgrade on overlapping constraints": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 1.0.0", "shared >=2.0.0, <=4.0.0"),
+			mkDepspec("b 1.0.0", "shared >=3.0.0, <5.0.0"),
+			mkDepspec("shared 2.0.0"),
+			mkDepspec("shared 3.0.0"),
+			mkDepspec("shared 3.6.9"),
+			mkDepspec("shared 4.0.0"),
+			mkDepspec("shared 5.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+			"shared 3.0.0",
+		),
+		downgrade: true,
+	},
+	"shared dependency where dependent version in turn affects other dependencies": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo <=1.0.2", "bar 1.0.0"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("foo 1.0.1", "bang 1.0.0"),
+			mkDepspec("foo 1.0.2", "whoop 1.0.0"),
+			mkDepspec("foo 1.0.3", "zoop 1.0.0"),
+			mkDepspec("bar 1.0.0", "foo <=1.0.1"),
+			mkDepspec("bang 1.0.0"),
+			mkDepspec("whoop 1.0.0"),
+			mkDepspec("zoop 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.1",
+			"bar 1.0.0",
+			"bang 1.0.0",
+		),
+	},
+	"removed dependency": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "foo 1.0.0", "bar *"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("foo 2.0.0"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 2.0.0", "baz 1.0.0"),
+			mkDepspec("baz 1.0.0", "foo 2.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+		),
+		maxAttempts: 2,
+	},
+	"with mismatched net addrs": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.0", "bar from baz 1.0.0"),
+			mkDepspec("bar 1.0.0"),
+		},
+		// TODO(sdboyer) ugh; do real error comparison instead of shitty abstraction
+		errp: []string{"foo", "foo", "root"},
+	},
+	// fixtures with locks
+	"with compatible locked dependency": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.1",
+			"bar 1.0.1",
+		),
+	},
+	"upgrade through lock": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.2",
+		),
+		changeall: true,
+	},
+	"downgrade through lock": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+		),
+		changeall: true,
+		downgrade: true,
+	},
+	"with incompatible locked dependency": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo >1.0.1"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.2",
+		),
+	},
+	"with unrelated locked dependency": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+			mkDepspec("baz 1.0.0 bazrev"),
+		},
+		l: mklock(
+			"baz 1.0.0 bazrev",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.2",
+		),
+	},
+	"unlocks dependencies if necessary to ensure that a new dependency is satisfied": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "newdep *"),
+			mkDepspec("foo 1.0.0 foorev", "bar <2.0.0"),
+			mkDepspec("bar 1.0.0 barrev", "baz <2.0.0"),
+			mkDepspec("baz 1.0.0 bazrev", "qux <2.0.0"),
+			mkDepspec("qux 1.0.0 quxrev"),
+			mkDepspec("foo 2.0.0", "bar <3.0.0"),
+			mkDepspec("bar 2.0.0", "baz <3.0.0"),
+			mkDepspec("baz 2.0.0", "qux <3.0.0"),
+			mkDepspec("qux 2.0.0"),
+			mkDepspec("newdep 2.0.0", "baz >=1.5.0"),
+		},
+		l: mklock(
+			"foo 1.0.0 foorev",
+			"bar 1.0.0 barrev",
+			"baz 1.0.0 bazrev",
+			"qux 1.0.0 quxrev",
+		),
+		r: mksolution(
+			"foo 2.0.0",
+			"bar 2.0.0",
+			"baz 2.0.0",
+			"qux 1.0.0 quxrev",
+			"newdep 2.0.0",
+		),
+		maxAttempts: 4,
+	},
+	"locked atoms are matched on both local and net name": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0 foorev"),
+			mkDepspec("foo 2.0.0 foorev2"),
+		},
+		l: mklock(
+			"foo from baz 1.0.0 foorev",
+		),
+		r: mksolution(
+			"foo 2.0.0 foorev2",
+		),
+	},
+	"pairs bare revs in lock with versions": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo ~1.0.1"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mkrevlock(
+			"foo 1.0.1 foorev", // mkrevlock drops the 1.0.1
+		),
+		r: mksolution(
+			"foo 1.0.1 foorev",
+			"bar 1.0.1",
+		),
+	},
+	"pairs bare revs in lock with all versions": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo ~1.0.1"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2 foorev", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mkrevlock(
+			"foo 1.0.1 foorev", // mkrevlock drops the 1.0.1
+		),
+		r: mksolution(
+			"foo 1.0.2 foorev",
+			"bar 1.0.1",
+		),
+	},
+	"does not pair bare revs in manifest with unpaired lock version": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo ~1.0.1"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mkrevlock(
+			"foo 1.0.1 foorev", // mkrevlock drops the 1.0.1
+		),
+		r: mksolution(
+			"foo 1.0.1 foorev",
+			"bar 1.0.1",
+		),
+	},
+	"includes root package's dev dependencies": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "(dev) foo 1.0.0", "(dev) bar 1.0.0"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("bar 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+		),
+	},
+	"includes dev dependency's transitive dependencies": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "(dev) foo 1.0.0"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("bar 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+		),
+	},
+	"ignores transitive dependency's dev dependencies": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "(dev) foo 1.0.0"),
+			mkDepspec("foo 1.0.0", "(dev) bar 1.0.0"),
+			mkDepspec("bar 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+		),
+	},
+	"no version that matches requirement": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo >=1.0.0, <2.0.0"),
+			mkDepspec("foo 2.0.0"),
+			mkDepspec("foo 2.1.3"),
+		},
+		errp: []string{"foo", "root"},
+	},
+	"no version that matches combined constraint": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.0", "shared >=2.0.0, <3.0.0"),
+			mkDepspec("bar 1.0.0", "shared >=2.9.0, <4.0.0"),
+			mkDepspec("shared 2.5.0"),
+			mkDepspec("shared 3.5.0"),
+		},
+		errp: []string{"shared", "foo", "bar"},
+	},
+	"disjoint constraints": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.0", "shared <=2.0.0"),
+			mkDepspec("bar 1.0.0", "shared >3.0.0"),
+			mkDepspec("shared 2.0.0"),
+			mkDepspec("shared 4.0.0"),
+		},
+		//errp: []string{"shared", "foo", "bar"}, // dart's has this...
+		errp: []string{"foo", "bar"},
+	},
+	"no valid solution": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b *"),
+			mkDepspec("a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 2.0.0", "b 2.0.0"),
+			mkDepspec("b 1.0.0", "a 2.0.0"),
+			mkDepspec("b 2.0.0", "a 1.0.0"),
+		},
+		errp:        []string{"b", "a"},
+		maxAttempts: 2,
+	},
+	"no version that matches while backtracking": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b >1.0.0"),
+			mkDepspec("a 1.0.0"),
+			mkDepspec("b 1.0.0"),
+		},
+		errp: []string{"b", "root"},
+	},
+	// The latest versions of a and b disagree on c. An older version of either
+	// will resolve the problem. This test validates that b, which is farther
+	// in the dependency graph from myapp is downgraded first.
+	"rolls back leaf versions first": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *"),
+			mkDepspec("a 1.0.0", "b *"),
+			mkDepspec("a 2.0.0", "b *", "c 2.0.0"),
+			mkDepspec("b 1.0.0"),
+			mkDepspec("b 2.0.0", "c 1.0.0"),
+			mkDepspec("c 1.0.0"),
+			mkDepspec("c 2.0.0"),
+		},
+		r: mksolution(
+			"a 2.0.0",
+			"b 1.0.0",
+			"c 2.0.0",
+		),
+		maxAttempts: 2,
+	},
+	// Only one version of baz, so foo and bar will have to downgrade until they
+	// reach it.
+	"mutual downgrading": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 2.0.0", "bar 2.0.0"),
+			mkDepspec("foo 3.0.0", "bar 3.0.0"),
+			mkDepspec("bar 1.0.0", "baz *"),
+			mkDepspec("bar 2.0.0", "baz 2.0.0"),
+			mkDepspec("bar 3.0.0", "baz 3.0.0"),
+			mkDepspec("baz 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+			"baz 1.0.0",
+		),
+		maxAttempts: 3,
+	},
+	// Ensures the solver doesn't exhaustively search all versions of b when
+	// it's a-2.0.0 whose dependency on c-2.0.0-nonexistent led to the
+	// problem. We make sure b has more versions than a so that the solver
+	// tries a first since it sorts sibling dependencies by number of
+	// versions.
+	"search real failer": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b *"),
+			mkDepspec("a 1.0.0", "c 1.0.0"),
+			mkDepspec("a 2.0.0", "c 2.0.0"),
+			mkDepspec("b 1.0.0"),
+			mkDepspec("b 2.0.0"),
+			mkDepspec("b 3.0.0"),
+			mkDepspec("c 1.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 3.0.0",
+			"c 1.0.0",
+		),
+		maxAttempts: 2,
+	},
+	// Dependencies are ordered so that packages with fewer versions are tried
+	// first. Here, there are two valid solutions (either a or b must be
+	// downgraded once). The chosen one depends on which dep is traversed first.
+	// Since b has fewer versions, it will be traversed first, which means a
+	// will come later. Since later selections are revised first, a gets
+	// downgraded.
+	"traverse into package with fewer versions first": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b *"),
+			mkDepspec("a 1.0.0", "c *"),
+			mkDepspec("a 2.0.0", "c *"),
+			mkDepspec("a 3.0.0", "c *"),
+			mkDepspec("a 4.0.0", "c *"),
+			mkDepspec("a 5.0.0", "c 1.0.0"),
+			mkDepspec("b 1.0.0", "c *"),
+			mkDepspec("b 2.0.0", "c *"),
+			mkDepspec("b 3.0.0", "c *"),
+			mkDepspec("b 4.0.0", "c 2.0.0"),
+			mkDepspec("c 1.0.0"),
+			mkDepspec("c 2.0.0"),
+		},
+		r: mksolution(
+			"a 4.0.0",
+			"b 4.0.0",
+			"c 2.0.0",
+		),
+		maxAttempts: 2,
+	},
+	// This is similar to the preceding fixture. When getting the number of
+	// versions of a package to determine which to traverse first, versions that
+	// are disallowed by the root package's constraints should not be
+	// considered. Here, foo has more versions than bar in total (4), but fewer
+	// that meet myapp"s constraints (only 2). There is no solution, but we will
+	// do less backtracking if foo is tested first.
+	"root constraints pre-eliminate versions": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "bar *"),
+			mkDepspec("foo 1.0.0", "none 2.0.0"),
+			mkDepspec("foo 2.0.0", "none 2.0.0"),
+			mkDepspec("foo 3.0.0", "none 2.0.0"),
+			mkDepspec("foo 4.0.0", "none 2.0.0"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 2.0.0"),
+			mkDepspec("bar 3.0.0"),
+			mkDepspec("none 1.0.0"),
+		},
+		errp:        []string{"none", "foo"},
+		maxAttempts: 1,
+	},
+	// If there"s a disjoint constraint on a package, then selecting other
+	// versions of it is a waste of time: no possible versions can match. We
+	// need to jump past it to the most recent package that affected the
+	// constraint.
+	"backjump past failed package on disjoint constraint": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "foo *"),
+			mkDepspec("a 1.0.0", "foo *"),
+			mkDepspec("a 2.0.0", "foo <1.0.0"),
+			mkDepspec("foo 2.0.0"),
+			mkDepspec("foo 2.0.1"),
+			mkDepspec("foo 2.0.2"),
+			mkDepspec("foo 2.0.3"),
+			mkDepspec("foo 2.0.4"),
+			mkDepspec("none 1.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"foo 2.0.4",
+		),
+		maxAttempts: 2,
+	},
+	// Revision enters vqueue if a dep has a constraint on that revision
+	"revision injected into vqueue": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo r123abc"),
+			mkDepspec("foo r123abc"),
+			mkDepspec("foo 1.0.0 foorev"),
+			mkDepspec("foo 2.0.0 foorev2"),
+		},
+		r: mksolution(
+			"foo r123abc",
+		),
+	},
+	// TODO(sdboyer) decide how to refactor the solver in order to re-enable these.
+	// Checking for revision existence is important...but kinda obnoxious.
+	//{
+	//// Solve fails if revision constraint calls for a nonexistent revision
+	//n: "fail on missing revision",
+	//ds: []depspec{
+	//mkDepspec("root 0.0.0", "bar *"),
+	//mkDepspec("bar 1.0.0", "foo r123abc"),
+	//mkDepspec("foo r123nomatch"),
+	//mkDepspec("foo 1.0.0"),
+	//mkDepspec("foo 2.0.0"),
+	//},
+	//errp: []string{"bar", "foo", "bar"},
+	//},
+	//{
+	//// Solve fails if revision constraint calls for a nonexistent revision,
+	//// even if rev constraint is specified by root
+	//n: "fail on missing revision from root",
+	//ds: []depspec{
+	//mkDepspec("root 0.0.0", "foo r123nomatch"),
+	//mkDepspec("foo r123abc"),
+	//mkDepspec("foo 1.0.0"),
+	//mkDepspec("foo 2.0.0"),
+	//},
+	//errp: []string{"foo", "root", "foo"},
+	//},
+
+	// TODO(sdboyer) add fixture that tests proper handling of loops via aliases (where
+	// a project that wouldn't be a loop is aliased to a project that is a loop)
+}
+
+func init() {
+	// This sets up a hundred versions of foo and bar, 0.0.0 through 9.9.0. Each
+	// version of foo depends on a baz with the same major version. Each version
+	// of bar depends on a baz with the same minor version. There is only one
+	// version of baz, 0.0.0, so only older versions of foo and bar will
+	// satisfy it.
+	fix := basicFixture{
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "bar *"),
+			mkDepspec("baz 0.0.0"),
+		},
+		r: mksolution(
+			"foo 0.9.0",
+			"bar 9.0.0",
+			"baz 0.0.0",
+		),
+		maxAttempts: 10,
+	}
+
+	for i := 0; i < 10; i++ {
+		for j := 0; j < 10; j++ {
+			fix.ds = append(fix.ds, mkDepspec(fmt.Sprintf("foo %v.%v.0", i, j), fmt.Sprintf("baz %v.0.0", i)))
+			fix.ds = append(fix.ds, mkDepspec(fmt.Sprintf("bar %v.%v.0", i, j), fmt.Sprintf("baz 0.%v.0", j)))
+		}
+	}
+
+	basicFixtures["complex backtrack"] = fix
+
+	for k, fix := range basicFixtures {
+		// Assign the name into the fixture itself
+		fix.n = k
+		basicFixtures[k] = fix
+	}
+}
+
+// reachMaps contain externalReach()-type data for a given depspec fixture's
+// universe of proejcts, packages, and versions.
+type reachMap map[pident]map[string][]string
+
+type depspecSourceManager struct {
+	specs []depspec
+	rm    reachMap
+	ig    map[string]bool
+}
+
+type fixSM interface {
+	SourceManager
+	rootSpec() depspec
+	allSpecs() []depspec
+	ignore() map[string]bool
+}
+
+var _ fixSM = &depspecSourceManager{}
+
+func newdepspecSM(ds []depspec, ignore []string) *depspecSourceManager {
+	ig := make(map[string]bool)
+	if len(ignore) > 0 {
+		for _, pkg := range ignore {
+			ig[pkg] = true
+		}
+	}
+
+	return &depspecSourceManager{
+		specs: ds,
+		rm:    computeBasicReachMap(ds),
+		ig:    ig,
+	}
+}
+
+func (sm *depspecSourceManager) GetProjectInfo(n ProjectRoot, v Version) (Manifest, Lock, error) {
+	for _, ds := range sm.specs {
+		if n == ds.n && v.Matches(ds.v) {
+			return ds, dummyLock{}, nil
+		}
+	}
+
+	// TODO(sdboyer) proper solver-type errors
+	return nil, nil, fmt.Errorf("Project %s at version %s could not be found", n, v)
+}
+
+func (sm *depspecSourceManager) ExternalReach(n ProjectRoot, v Version) (map[string][]string, error) {
+	id := pident{n: n, v: v}
+	if m, exists := sm.rm[id]; exists {
+		return m, nil
+	}
+	return nil, fmt.Errorf("No reach data for %s at version %s", n, v)
+}
+
+func (sm *depspecSourceManager) ListExternal(n ProjectRoot, v Version) ([]string, error) {
+	// This should only be called for the root
+	id := pident{n: n, v: v}
+	if r, exists := sm.rm[id]; exists {
+		return r[string(n)], nil
+	}
+	return nil, fmt.Errorf("No reach data for %s at version %s", n, v)
+}
+
+func (sm *depspecSourceManager) ListPackages(n ProjectRoot, v Version) (PackageTree, error) {
+	id := pident{n: n, v: v}
+	if r, exists := sm.rm[id]; exists {
+		ptree := PackageTree{
+			ImportRoot: string(n),
+			Packages: map[string]PackageOrErr{
+				string(n): {
+					P: Package{
+						ImportPath: string(n),
+						Name:       string(n),
+						Imports:    r[string(n)],
+					},
+				},
+			},
+		}
+		return ptree, nil
+	}
+
+	return PackageTree{}, fmt.Errorf("Project %s at version %s could not be found", n, v)
+}
+
+func (sm *depspecSourceManager) ListVersions(name ProjectRoot) (pi []Version, err error) {
+	for _, ds := range sm.specs {
+		// To simulate the behavior of the real SourceManager, we do not return
+		// revisions from ListVersions().
+		if _, isrev := ds.v.(Revision); !isrev && name == ds.n {
+			pi = append(pi, ds.v)
+		}
+	}
+
+	if len(pi) == 0 {
+		err = fmt.Errorf("Project %s could not be found", name)
+	}
+
+	return
+}
+
+func (sm *depspecSourceManager) RevisionPresentIn(name ProjectRoot, r Revision) (bool, error) {
+	for _, ds := range sm.specs {
+		if name == ds.n && r == ds.v {
+			return true, nil
+		}
+	}
+
+	return false, fmt.Errorf("Project %s has no revision %s", name, r)
+}
+
+func (sm *depspecSourceManager) RepoExists(name ProjectRoot) (bool, error) {
+	for _, ds := range sm.specs {
+		if name == ds.n {
+			return true, nil
+		}
+	}
+
+	return false, nil
+}
+
+func (sm *depspecSourceManager) VendorCodeExists(name ProjectRoot) (bool, error) {
+	return false, nil
+}
+
+func (sm *depspecSourceManager) Release() {}
+
+func (sm *depspecSourceManager) ExportProject(n ProjectRoot, v Version, to string) error {
+	return fmt.Errorf("dummy sm doesn't support exporting")
+}
+
+func (sm *depspecSourceManager) rootSpec() depspec {
+	return sm.specs[0]
+}
+
+func (sm *depspecSourceManager) allSpecs() []depspec {
+	return sm.specs
+}
+
+func (sm *depspecSourceManager) ignore() map[string]bool {
+	return sm.ig
+}
+
+type depspecBridge struct {
+	*bridge
+}
+
+// override computeRootReach() on bridge to read directly out of the depspecs
+func (b *depspecBridge) computeRootReach() ([]string, error) {
+	// This only gets called for the root project, so grab that one off the test
+	// source manager
+	dsm := b.sm.(fixSM)
+	root := dsm.rootSpec()
+
+	ptree, err := dsm.ListPackages(root.n, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return ptree.ListExternalImports(true, true, dsm.ignore()), nil
+}
+
+// override verifyRoot() on bridge to prevent any filesystem interaction
+func (b *depspecBridge) verifyRootDir(path string) error {
+	root := b.sm.(fixSM).rootSpec()
+	if string(root.n) != path {
+		return fmt.Errorf("Expected only root project %q to computeRootReach(), got %q", root.n, path)
+	}
+
+	return nil
+}
+
+func (b *depspecBridge) listPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	return b.sm.(fixSM).ListPackages(b.key(id), v)
+}
+
+// override deduceRemoteRepo on bridge to make all our pkg/project mappings work
+// as expected
+func (b *depspecBridge) deduceRemoteRepo(path string) (*remoteRepo, error) {
+	for _, ds := range b.sm.(fixSM).allSpecs() {
+		n := string(ds.n)
+		if path == n || strings.HasPrefix(path, n+"/") {
+			return &remoteRepo{
+				Base:   n,
+				RelPkg: strings.TrimPrefix(path, n+"/"),
+			}, nil
+		}
+	}
+	return nil, fmt.Errorf("Could not find %s, or any parent, in list of known fixtures", path)
+}
+
+// enforce interfaces
+var _ Manifest = depspec{}
+var _ Lock = dummyLock{}
+var _ Lock = fixLock{}
+
+// impl Spec interface
+func (ds depspec) DependencyConstraints() []ProjectConstraint {
+	return ds.deps
+}
+
+// impl Spec interface
+func (ds depspec) TestDependencyConstraints() []ProjectConstraint {
+	return ds.devdeps
+}
+
+type fixLock []LockedProject
+
+func (fixLock) SolverVersion() string {
+	return "-1"
+}
+
+// impl Lock interface
+func (fixLock) InputHash() []byte {
+	return []byte("fooooorooooofooorooofoo")
+}
+
+// impl Lock interface
+func (l fixLock) Projects() []LockedProject {
+	return l
+}
+
+type dummyLock struct{}
+
+// impl Lock interface
+func (dummyLock) SolverVersion() string {
+	return "-1"
+}
+
+// impl Lock interface
+func (dummyLock) InputHash() []byte {
+	return []byte("fooooorooooofooorooofoo")
+}
+
+// impl Lock interface
+func (dummyLock) Projects() []LockedProject {
+	return nil
+}
+
+// We've borrowed this bestiary from pub's tests:
+// https://github.com/dart-lang/pub/blob/master/test/version_solver_test.dart
+
+// TODO(sdboyer) finish converting all of these
+
+/*
+func basicGraph() {
+  testResolve("circular dependency", {
+    "myapp 1.0.0": {
+      "foo": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "bar": "1.0.0"
+    },
+    "bar 1.0.0": {
+      "foo": "1.0.0"
+    }
+  }, result: {
+    "myapp from root": "1.0.0",
+    "foo": "1.0.0",
+    "bar": "1.0.0"
+  });
+
+}
+
+func withLockFile() {
+
+}
+
+func rootDependency() {
+  testResolve("with root source", {
+    "myapp 1.0.0": {
+      "foo": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "myapp from root": ">=1.0.0"
+    }
+  }, result: {
+    "myapp from root": "1.0.0",
+    "foo": "1.0.0"
+  });
+
+  testResolve("with different source", {
+    "myapp 1.0.0": {
+      "foo": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "myapp": ">=1.0.0"
+    }
+  }, result: {
+    "myapp from root": "1.0.0",
+    "foo": "1.0.0"
+  });
+
+  testResolve("with wrong version", {
+    "myapp 1.0.0": {
+      "foo": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "myapp": "<1.0.0"
+    }
+  }, error: couldNotSolve);
+}
+
+func unsolvable() {
+
+  testResolve("mismatched descriptions", {
+    "myapp 0.0.0": {
+      "foo": "1.0.0",
+      "bar": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "shared-x": "1.0.0"
+    },
+    "bar 1.0.0": {
+      "shared-y": "1.0.0"
+    },
+    "shared-x 1.0.0": {},
+    "shared-y 1.0.0": {}
+  }, error: descriptionMismatch("shared", "foo", "bar"));
+
+  testResolve("mismatched sources", {
+    "myapp 0.0.0": {
+      "foo": "1.0.0",
+      "bar": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "shared": "1.0.0"
+    },
+    "bar 1.0.0": {
+      "shared from mock2": "1.0.0"
+    },
+    "shared 1.0.0": {},
+    "shared 1.0.0 from mock2": {}
+  }, error: sourceMismatch("shared", "foo", "bar"));
+
+
+
+  // This is a regression test for #18300.
+  testResolve("...", {
+    "myapp 0.0.0": {
+      "angular": "any",
+      "collection": "any"
+    },
+    "analyzer 0.12.2": {},
+    "angular 0.10.0": {
+      "di": ">=0.0.32 <0.1.0",
+      "collection": ">=0.9.1 <1.0.0"
+    },
+    "angular 0.9.11": {
+      "di": ">=0.0.32 <0.1.0",
+      "collection": ">=0.9.1 <1.0.0"
+    },
+    "angular 0.9.10": {
+      "di": ">=0.0.32 <0.1.0",
+      "collection": ">=0.9.1 <1.0.0"
+    },
+    "collection 0.9.0": {},
+    "collection 0.9.1": {},
+    "di 0.0.37": {"analyzer": ">=0.13.0 <0.14.0"},
+    "di 0.0.36": {"analyzer": ">=0.13.0 <0.14.0"}
+  }, error: noVersion(["analyzer", "di"]), maxTries: 2);
+}
+
+func badSource() {
+  testResolve("fail if the root package has a bad source in dep", {
+    "myapp 0.0.0": {
+      "foo from bad": "any"
+    },
+  }, error: unknownSource("myapp", "foo", "bad"));
+
+  testResolve("fail if the root package has a bad source in dev dep", {
+    "myapp 0.0.0": {
+      "(dev) foo from bad": "any"
+    },
+  }, error: unknownSource("myapp", "foo", "bad"));
+
+  testResolve("fail if all versions have bad source in dep", {
+    "myapp 0.0.0": {
+      "foo": "any"
+    },
+    "foo 1.0.0": {
+      "bar from bad": "any"
+    },
+    "foo 1.0.1": {
+      "baz from bad": "any"
+    },
+    "foo 1.0.3": {
+      "bang from bad": "any"
+    },
+  }, error: unknownSource("foo", "bar", "bad"), maxTries: 3);
+
+  testResolve("ignore versions with bad source in dep", {
+    "myapp 1.0.0": {
+      "foo": "any"
+    },
+    "foo 1.0.0": {
+      "bar": "any"
+    },
+    "foo 1.0.1": {
+      "bar from bad": "any"
+    },
+    "foo 1.0.3": {
+      "bar from bad": "any"
+    },
+    "bar 1.0.0": {}
+  }, result: {
+    "myapp from root": "1.0.0",
+    "foo": "1.0.0",
+    "bar": "1.0.0"
+  }, maxTries: 3);
+}
+
+func backtracking() {
+  testResolve("circular dependency on older version", {
+    "myapp 0.0.0": {
+      "a": ">=1.0.0"
+    },
+    "a 1.0.0": {},
+    "a 2.0.0": {
+      "b": "1.0.0"
+    },
+    "b 1.0.0": {
+      "a": "1.0.0"
+    }
+  }, result: {
+    "myapp from root": "0.0.0",
+    "a": "1.0.0"
+  }, maxTries: 2);
+}
+*/
diff --git a/vendor/github.com/sdboyer/gps/solve_bimodal_test.go b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
new file mode 100644
index 0000000..09333e0
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
@@ -0,0 +1,643 @@
+package gps
+
+import (
+	"fmt"
+	"path/filepath"
+)
+
+// dsp - "depspec with packages"
+//
+// Wraps a set of tpkgs onto a depspec, and returns it.
+func dsp(ds depspec, pkgs ...tpkg) depspec {
+	ds.pkgs = pkgs
+	return ds
+}
+
+// pkg makes a tpkg appropriate for use in bimodal testing
+func pkg(path string, imports ...string) tpkg {
+	return tpkg{
+		path:    path,
+		imports: imports,
+	}
+}
+
+func init() {
+	for k, fix := range bimodalFixtures {
+		// Assign the name into the fixture itself
+		fix.n = k
+		bimodalFixtures[k] = fix
+	}
+}
+
+// Fixtures that rely on simulated bimodal (project and package-level)
+// analysis for correct operation. The name given in the map gets assigned into
+// the fixture itself in init().
+var bimodalFixtures = map[string]bimodalFixture{
+	// Simple case, ensures that we do the very basics of picking up and
+	// including a single, simple import that is not expressed as a constraint
+	"simple bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a")),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Ensure it works when the import jump is not from the package with the
+	// same path as root, but from a subpkg
+	"subpkg bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// The same, but with a jump through two subpkgs
+	"double-subpkg bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "root/bar"),
+				pkg("root/bar", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Same again, but now nest the subpkgs
+	"double nested subpkg bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "root/foo/bar"),
+				pkg("root/foo/bar", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Importing package from project with no root package
+	"bm-add on project with no pkg in root dir": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a/foo")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a/foo")),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Import jump is in a dep, and points to a transitive dep
+	"transitive bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	// Constraints apply only if the project that declares them has a
+	// reachable import
+	"constraints activated by import": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "b 1.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+			dsp(mkDepspec("b 1.1.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.1.0",
+		),
+	},
+	// Import jump is in a dep, and points to a transitive dep - but only in not
+	// the first version we try
+	"transitive bm-add on older version": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "a ~1.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+			),
+			dsp(mkDepspec("a 1.1.0"),
+				pkg("a"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	// Import jump is in a dep, and points to a transitive dep - but will only
+	// get there via backtracking
+	"backtrack to dep on bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a", "b"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "c"),
+			),
+			dsp(mkDepspec("a 1.1.0"),
+				pkg("a"),
+			),
+			// Include two versions of b, otherwise it'll be selected first
+			dsp(mkDepspec("b 0.9.0"),
+				pkg("b", "c"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b", "c"),
+			),
+			dsp(mkDepspec("c 1.0.0", "a 1.0.0"),
+				pkg("c", "a"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+			"c 1.0.0",
+		),
+	},
+	// Import jump is in a dep subpkg, and points to a transitive dep
+	"transitive subpkg bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "a/bar"),
+				pkg("a/bar", "b"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	// Import jump is in a dep subpkg, pointing to a transitive dep, but only in
+	// not the first version we try
+	"transitive subpkg bm-add on older version": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "a ~1.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "a/bar"),
+				pkg("a/bar", "b"),
+			),
+			dsp(mkDepspec("a 1.1.0"),
+				pkg("a", "a/bar"),
+				pkg("a/bar"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	// Ensure that if a constraint is expressed, but no actual import exists,
+	// then the constraint is disregarded - the project named in the constraint
+	// is not part of the solution.
+	"ignore constraint without import": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "a 1.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		r: mksolution(),
+	},
+	// Transitive deps from one project (a) get incrementally included as other
+	// deps incorporate its various packages.
+	"multi-stage pkg incorporation": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a", "d"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+				pkg("a/second", "c"),
+			),
+			dsp(mkDepspec("b 2.0.0"),
+				pkg("b"),
+			),
+			dsp(mkDepspec("c 1.2.0"),
+				pkg("c"),
+			),
+			dsp(mkDepspec("d 1.0.0"),
+				pkg("d", "a/second"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 2.0.0",
+			"c 1.2.0",
+			"d 1.0.0",
+		),
+	},
+	// Regression - make sure that the the constraint/import intersector only
+	// accepts a project 'match' if exactly equal, or a separating slash is
+	// present.
+	"radix path separator post-check": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "foo", "foobar"),
+			),
+			dsp(mkDepspec("foo 1.0.0"),
+				pkg("foo"),
+			),
+			dsp(mkDepspec("foobar 1.0.0"),
+				pkg("foobar"),
+			),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"foobar 1.0.0",
+		),
+	},
+	// Well-formed failure when there's a dependency on a pkg that doesn't exist
+	"fail when imports nonexistent package": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "a 1.0.0"),
+				pkg("root", "a/foo"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		errp: []string{"a", "root", "a"},
+	},
+	// Transitive deps from one project (a) get incrementally included as other
+	// deps incorporate its various packages, and fail with proper error when we
+	// discover one incrementally that isn't present
+	"fail multi-stage missing pkg": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a", "d"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+				pkg("a/second", "c"),
+			),
+			dsp(mkDepspec("b 2.0.0"),
+				pkg("b"),
+			),
+			dsp(mkDepspec("c 1.2.0"),
+				pkg("c"),
+			),
+			dsp(mkDepspec("d 1.0.0"),
+				pkg("d", "a/second"),
+				pkg("d", "a/nonexistent"),
+			),
+		},
+		errp: []string{"d", "a", "d"},
+	},
+	// Check ignores on the root project
+	"ignore in double-subpkg": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "root/bar", "b"),
+				pkg("root/bar", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		ignore: []string{"root/bar"},
+		r: mksolution(
+			"b 1.0.0",
+		),
+	},
+	// Ignores on a dep pkg
+	"ignore through dep pkg": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "a/bar"),
+				pkg("a/bar", "b"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		ignore: []string{"a/bar"},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Preferred version, as derived from a dep's lock, is attempted first
+	"respect prefv, simple case": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b")),
+			dsp(mkDepspec("b 1.0.0 foorev"),
+				pkg("b")),
+			dsp(mkDepspec("b 2.0.0 barrev"),
+				pkg("b")),
+		},
+		lm: map[string]fixLock{
+			"a 1.0.0": mklock(
+				"b 1.0.0 foorev",
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0 foorev",
+		),
+	},
+	// Preferred version, as derived from a dep's lock, is attempted first, even
+	// if the root also has a direct dep on it (root doesn't need to use
+	// preferreds, because it has direct control AND because the root lock
+	// already supercedes dep lock "preferences")
+	"respect dep prefv with root import": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a", "b")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b")),
+			//dsp(newDepspec("a 1.0.1"),
+			//pkg("a", "b")),
+			//dsp(newDepspec("a 1.1.0"),
+			//pkg("a", "b")),
+			dsp(mkDepspec("b 1.0.0 foorev"),
+				pkg("b")),
+			dsp(mkDepspec("b 2.0.0 barrev"),
+				pkg("b")),
+		},
+		lm: map[string]fixLock{
+			"a 1.0.0": mklock(
+				"b 1.0.0 foorev",
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0 foorev",
+		),
+	},
+	// Preferred versions can only work if the thing offering it has been
+	// selected, or at least marked in the unselected queue
+	"prefv only works if depper is selected": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a", "b")),
+			// Three atoms for a, which will mean it gets visited after b
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b")),
+			dsp(mkDepspec("a 1.0.1"),
+				pkg("a", "b")),
+			dsp(mkDepspec("a 1.1.0"),
+				pkg("a", "b")),
+			dsp(mkDepspec("b 1.0.0 foorev"),
+				pkg("b")),
+			dsp(mkDepspec("b 2.0.0 barrev"),
+				pkg("b")),
+		},
+		lm: map[string]fixLock{
+			"a 1.0.0": mklock(
+				"b 1.0.0 foorev",
+			),
+		},
+		r: mksolution(
+			"a 1.1.0",
+			"b 2.0.0 barrev",
+		),
+	},
+}
+
+// tpkg is a representation of a single package. It has its own import path, as
+// well as a list of paths it itself "imports".
+type tpkg struct {
+	// Full import path of this package
+	path string
+	// Slice of full paths to its virtual imports
+	imports []string
+}
+
+type bimodalFixture struct {
+	// name of this fixture datum
+	n string
+	// bimodal project. first is always treated as root project
+	ds []depspec
+	// results; map of name/version pairs
+	r map[string]Version
+	// max attempts the solver should need to find solution. 0 means no limit
+	maxAttempts int
+	// Use downgrade instead of default upgrade sorter
+	downgrade bool
+	// lock file simulator, if one's to be used at all
+	l fixLock
+	// map of locks for deps, if any. keys should be of the form:
+	// "<project> <version>"
+	lm map[string]fixLock
+	// projects expected to have errors, if any
+	errp []string
+	// request up/downgrade to all projects
+	changeall bool
+	// pkgs to ignore
+	ignore []string
+}
+
+func (f bimodalFixture) name() string {
+	return f.n
+}
+
+func (f bimodalFixture) specs() []depspec {
+	return f.ds
+}
+
+func (f bimodalFixture) maxTries() int {
+	return f.maxAttempts
+}
+
+func (f bimodalFixture) expectErrs() []string {
+	return f.errp
+}
+
+func (f bimodalFixture) solution() map[string]Version {
+	return f.r
+}
+
+// bmSourceManager is an SM specifically for the bimodal fixtures. It composes
+// the general depspec SM, and differs from it in how it answers static analysis
+// calls, and its support for package ignores and dep lock data.
+type bmSourceManager struct {
+	depspecSourceManager
+	lm map[string]fixLock
+}
+
+var _ SourceManager = &bmSourceManager{}
+
+func newbmSM(bmf bimodalFixture) *bmSourceManager {
+	sm := &bmSourceManager{
+		depspecSourceManager: *newdepspecSM(bmf.ds, bmf.ignore),
+	}
+	sm.rm = computeBimodalExternalMap(bmf.ds)
+	sm.lm = bmf.lm
+
+	return sm
+}
+
+func (sm *bmSourceManager) ListPackages(n ProjectRoot, v Version) (PackageTree, error) {
+	for k, ds := range sm.specs {
+		// Cheat for root, otherwise we blow up b/c version is empty
+		if n == ds.n && (k == 0 || ds.v.Matches(v)) {
+			ptree := PackageTree{
+				ImportRoot: string(n),
+				Packages:   make(map[string]PackageOrErr),
+			}
+			for _, pkg := range ds.pkgs {
+				ptree.Packages[pkg.path] = PackageOrErr{
+					P: Package{
+						ImportPath: pkg.path,
+						Name:       filepath.Base(pkg.path),
+						Imports:    pkg.imports,
+					},
+				}
+			}
+
+			return ptree, nil
+		}
+	}
+
+	return PackageTree{}, fmt.Errorf("Project %s at version %s could not be found", n, v)
+}
+
+func (sm *bmSourceManager) GetProjectInfo(n ProjectRoot, v Version) (Manifest, Lock, error) {
+	for _, ds := range sm.specs {
+		if n == ds.n && v.Matches(ds.v) {
+			if l, exists := sm.lm[string(n)+" "+v.String()]; exists {
+				return ds, l, nil
+			}
+			return ds, dummyLock{}, nil
+		}
+	}
+
+	// TODO(sdboyer) proper solver-type errors
+	return nil, nil, fmt.Errorf("Project %s at version %s could not be found", n, v)
+}
+
+// computeBimodalExternalMap takes a set of depspecs and computes an
+// internally-versioned external reach map that is useful for quickly answering
+// ListExternal()-type calls.
+//
+// Note that it does not do things like stripping out stdlib packages - these
+// maps are intended for use in SM fixtures, and that's a higher-level
+// responsibility within the system.
+func computeBimodalExternalMap(ds []depspec) map[pident]map[string][]string {
+	// map of project name+version -> map of subpkg name -> external pkg list
+	rm := make(map[pident]map[string][]string)
+
+	// algorithm adapted from externalReach()
+	for _, d := range ds {
+		// Keeps a list of all internal and external reaches for packages within
+		// a given root. We create one on each pass through, rather than doing
+		// them all at once, because the depspec set may (read: is expected to)
+		// have multiple versions of the same base project, and each of those
+		// must be calculated independently.
+		workmap := make(map[string]wm)
+
+		for _, pkg := range d.pkgs {
+			if !checkPrefixSlash(filepath.Clean(pkg.path), string(d.n)) {
+				panic(fmt.Sprintf("pkg %s is not a child of %s, cannot be a part of that project", pkg.path, d.n))
+			}
+
+			w := wm{
+				ex: make(map[string]bool),
+				in: make(map[string]bool),
+			}
+
+			for _, imp := range pkg.imports {
+				if !checkPrefixSlash(filepath.Clean(imp), string(d.n)) {
+					// Easy case - if the import is not a child of the base
+					// project path, put it in the external map
+					w.ex[imp] = true
+				} else {
+					if w2, seen := workmap[imp]; seen {
+						// If it is, and we've seen that path, dereference it
+						// immediately
+						for i := range w2.ex {
+							w.ex[i] = true
+						}
+						for i := range w2.in {
+							w.in[i] = true
+						}
+					} else {
+						// Otherwise, put it in the 'in' map for later
+						// reprocessing
+						w.in[imp] = true
+					}
+				}
+			}
+			workmap[pkg.path] = w
+		}
+
+		drm := wmToReach(workmap, "")
+		rm[pident{n: d.n, v: d.v}] = drm
+	}
+
+	return rm
+}
diff --git a/vendor/github.com/sdboyer/gps/solve_test.go b/vendor/github.com/sdboyer/gps/solve_test.go
new file mode 100644
index 0000000..95db023
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solve_test.go
@@ -0,0 +1,440 @@
+package gps
+
+import (
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"math/rand"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"testing"
+)
+
+var fixtorun string
+
+// TODO(sdboyer) regression test ensuring that locks with only revs for projects don't cause errors
+func init() {
+	flag.StringVar(&fixtorun, "gps.fix", "", "A single fixture to run in TestBasicSolves")
+	overrideMkBridge()
+}
+
+// sets the mkBridge global func to one that allows virtualized RootDirs
+func overrideMkBridge() {
+	// For all tests, override the base bridge with the depspecBridge that skips
+	// verifyRootDir calls
+	mkBridge = func(s *solver, sm SourceManager) sourceBridge {
+		return &depspecBridge{
+			&bridge{
+				sm:     sm,
+				s:      s,
+				vlists: make(map[ProjectRoot][]Version),
+			},
+		}
+	}
+}
+
+var stderrlog = log.New(os.Stderr, "", 0)
+
+func fixSolve(params SolveParameters, sm SourceManager) (Solution, error) {
+	if testing.Verbose() {
+		params.Trace = true
+		params.TraceLogger = stderrlog
+	}
+
+	s, err := Prepare(params, sm)
+	if err != nil {
+		return nil, err
+	}
+
+	return s.Solve()
+}
+
+// Test all the basic table fixtures.
+//
+// Or, just the one named in the fix arg.
+func TestBasicSolves(t *testing.T) {
+	if fixtorun != "" {
+		if fix, exists := basicFixtures[fixtorun]; exists {
+			solveBasicsAndCheck(fix, t)
+		}
+	} else {
+		// sort them by their keys so we get stable output
+		var names []string
+		for n := range basicFixtures {
+			names = append(names, n)
+		}
+
+		sort.Strings(names)
+		for _, n := range names {
+			solveBasicsAndCheck(basicFixtures[n], t)
+			if testing.Verbose() {
+				// insert a line break between tests
+				stderrlog.Println("")
+			}
+		}
+	}
+}
+
+func solveBasicsAndCheck(fix basicFixture, t *testing.T) (res Solution, err error) {
+	if testing.Verbose() {
+		stderrlog.Printf("[[fixture %q]]", fix.n)
+	}
+	sm := newdepspecSM(fix.ds, nil)
+
+	params := SolveParameters{
+		RootDir:    string(fix.ds[0].n),
+		ImportRoot: ProjectRoot(fix.ds[0].n),
+		Manifest:   fix.ds[0],
+		Lock:       dummyLock{},
+		Downgrade:  fix.downgrade,
+		ChangeAll:  fix.changeall,
+	}
+
+	if fix.l != nil {
+		params.Lock = fix.l
+	}
+
+	res, err = fixSolve(params, sm)
+
+	return fixtureSolveSimpleChecks(fix, res, err, t)
+}
+
+// Test all the bimodal table fixtures.
+//
+// Or, just the one named in the fix arg.
+func TestBimodalSolves(t *testing.T) {
+	if fixtorun != "" {
+		if fix, exists := bimodalFixtures[fixtorun]; exists {
+			solveBimodalAndCheck(fix, t)
+		}
+	} else {
+		// sort them by their keys so we get stable output
+		var names []string
+		for n := range bimodalFixtures {
+			names = append(names, n)
+		}
+
+		sort.Strings(names)
+		for _, n := range names {
+			solveBimodalAndCheck(bimodalFixtures[n], t)
+			if testing.Verbose() {
+				// insert a line break between tests
+				stderrlog.Println("")
+			}
+		}
+	}
+}
+
+func solveBimodalAndCheck(fix bimodalFixture, t *testing.T) (res Solution, err error) {
+	if testing.Verbose() {
+		stderrlog.Printf("[[fixture %q]]", fix.n)
+	}
+	sm := newbmSM(fix)
+
+	params := SolveParameters{
+		RootDir:    string(fix.ds[0].n),
+		ImportRoot: ProjectRoot(fix.ds[0].n),
+		Manifest:   fix.ds[0],
+		Lock:       dummyLock{},
+		Ignore:     fix.ignore,
+		Downgrade:  fix.downgrade,
+		ChangeAll:  fix.changeall,
+	}
+
+	if fix.l != nil {
+		params.Lock = fix.l
+	}
+
+	res, err = fixSolve(params, sm)
+
+	return fixtureSolveSimpleChecks(fix, res, err, t)
+}
+
+func fixtureSolveSimpleChecks(fix specfix, res Solution, err error, t *testing.T) (Solution, error) {
+	if err != nil {
+		errp := fix.expectErrs()
+		if len(errp) == 0 {
+			t.Errorf("(fixture: %q) Solver failed; error was type %T, text:\n%s", fix.name(), err, err)
+			return res, err
+		}
+
+		switch fail := err.(type) {
+		case *badOptsFailure:
+			t.Errorf("(fixture: %q) Unexpected bad opts failure solve error: %s", fix.name(), err)
+		case *noVersionError:
+			if errp[0] != string(fail.pn.ProjectRoot) { // TODO(sdboyer) identifierify
+				t.Errorf("(fixture: %q) Expected failure on project %s, but was on project %s", fix.name(), errp[0], fail.pn.ProjectRoot)
+			}
+
+			ep := make(map[string]struct{})
+			for _, p := range errp[1:] {
+				ep[p] = struct{}{}
+			}
+
+			found := make(map[string]struct{})
+			for _, vf := range fail.fails {
+				for _, f := range getFailureCausingProjects(vf.f) {
+					found[f] = struct{}{}
+				}
+			}
+
+			var missing []string
+			var extra []string
+			for p := range found {
+				if _, has := ep[p]; !has {
+					extra = append(extra, p)
+				}
+			}
+			if len(extra) > 0 {
+				t.Errorf("(fixture: %q) Expected solve failures due to projects %s, but solve failures also arose from %s", fix.name(), strings.Join(errp[1:], ", "), strings.Join(extra, ", "))
+			}
+
+			for p := range ep {
+				if _, has := found[p]; !has {
+					missing = append(missing, p)
+				}
+			}
+			if len(missing) > 0 {
+				t.Errorf("(fixture: %q) Expected solve failures due to projects %s, but %s had no failures", fix.name(), strings.Join(errp[1:], ", "), strings.Join(missing, ", "))
+			}
+
+		default:
+			// TODO(sdboyer) round these out
+			panic(fmt.Sprintf("unhandled solve failure type: %s", err))
+		}
+	} else if len(fix.expectErrs()) > 0 {
+		t.Errorf("(fixture: %q) Solver succeeded, but expected failure", fix.name())
+	} else {
+		r := res.(solution)
+		if fix.maxTries() > 0 && r.Attempts() > fix.maxTries() {
+			t.Errorf("(fixture: %q) Solver completed in %v attempts, but expected %v or fewer", fix.name(), r.att, fix.maxTries())
+		}
+
+		// Dump result projects into a map for easier interrogation
+		rp := make(map[string]Version)
+		for _, p := range r.p {
+			pa := p.toAtom()
+			rp[string(pa.id.ProjectRoot)] = pa.v
+		}
+
+		fixlen, rlen := len(fix.solution()), len(rp)
+		if fixlen != rlen {
+			// Different length, so they definitely disagree
+			t.Errorf("(fixture: %q) Solver reported %v package results, result expected %v", fix.name(), rlen, fixlen)
+		}
+
+		// Whether or not len is same, still have to verify that results agree
+		// Walk through fixture/expected results first
+		for p, v := range fix.solution() {
+			if av, exists := rp[p]; !exists {
+				t.Errorf("(fixture: %q) Project %q expected but missing from results", fix.name(), p)
+			} else {
+				// delete result from map so we skip it on the reverse pass
+				delete(rp, p)
+				if v != av {
+					t.Errorf("(fixture: %q) Expected version %q of project %q, but actual version was %q", fix.name(), v, p, av)
+				}
+			}
+		}
+
+		// Now walk through remaining actual results
+		for p, v := range rp {
+			if fv, exists := fix.solution()[p]; !exists {
+				t.Errorf("(fixture: %q) Unexpected project %q present in results", fix.name(), p)
+			} else if v != fv {
+				t.Errorf("(fixture: %q) Got version %q of project %q, but expected version was %q", fix.name(), v, p, fv)
+			}
+		}
+	}
+
+	return res, err
+}
+
+// This tests that, when a root lock is underspecified (has only a version) we
+// don't allow a match on that version from a rev in the manifest. We may allow
+// this in the future, but disallow it for now because going from an immutable
+// requirement to a mutable lock automagically is a bad direction that could
+// produce weird side effects.
+func TestRootLockNoVersionPairMatching(t *testing.T) {
+	fix := basicFixture{
+		n: "does not pair bare revs in manifest with unpaired lock version",
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"), // foo's constraint rewritten below to foorev
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2 foorev", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2 foorev",
+			"bar 1.0.1",
+		),
+	}
+
+	pd := fix.ds[0].deps[0]
+	pd.Constraint = Revision("foorev")
+	fix.ds[0].deps[0] = pd
+
+	sm := newdepspecSM(fix.ds, nil)
+
+	l2 := make(fixLock, 1)
+	copy(l2, fix.l)
+	l2[0].v = nil
+
+	params := SolveParameters{
+		RootDir:    string(fix.ds[0].n),
+		ImportRoot: ProjectRoot(fix.ds[0].n),
+		Manifest:   fix.ds[0],
+		Lock:       l2,
+	}
+
+	res, err := fixSolve(params, sm)
+
+	fixtureSolveSimpleChecks(fix, res, err, t)
+}
+
+func getFailureCausingProjects(err error) (projs []string) {
+	switch e := err.(type) {
+	case *noVersionError:
+		projs = append(projs, string(e.pn.ProjectRoot)) // TODO(sdboyer) identifierify
+	case *disjointConstraintFailure:
+		for _, f := range e.failsib {
+			projs = append(projs, string(f.depender.id.ProjectRoot))
+		}
+	case *versionNotAllowedFailure:
+		for _, f := range e.failparent {
+			projs = append(projs, string(f.depender.id.ProjectRoot))
+		}
+	case *constraintNotAllowedFailure:
+		// No sane way of knowing why the currently selected version is
+		// selected, so do nothing
+	case *sourceMismatchFailure:
+		projs = append(projs, string(e.prob.id.ProjectRoot))
+		for _, c := range e.sel {
+			projs = append(projs, string(c.depender.id.ProjectRoot))
+		}
+	case *checkeeHasProblemPackagesFailure:
+		projs = append(projs, string(e.goal.id.ProjectRoot))
+		for _, errdep := range e.failpkg {
+			for _, atom := range errdep.deppers {
+				projs = append(projs, string(atom.id.ProjectRoot))
+			}
+		}
+	case *depHasProblemPackagesFailure:
+		projs = append(projs, string(e.goal.depender.id.ProjectRoot), string(e.goal.dep.Ident.ProjectRoot))
+	case *nonexistentRevisionFailure:
+		projs = append(projs, string(e.goal.depender.id.ProjectRoot), string(e.goal.dep.Ident.ProjectRoot))
+	default:
+		panic(fmt.Sprintf("unknown failtype %T, msg: %s", err, err))
+	}
+
+	return
+}
+
+func TestBadSolveOpts(t *testing.T) {
+	pn := strconv.FormatInt(rand.Int63(), 36)
+	fix := basicFixtures["no dependencies"]
+	fix.ds[0].n = ProjectRoot(pn)
+
+	sm := newdepspecSM(fix.ds, nil)
+	params := SolveParameters{}
+
+	_, err := Prepare(params, nil)
+	if err == nil {
+		t.Errorf("Prepare should have errored on nil SourceManager")
+	} else if !strings.Contains(err.Error(), "non-nil SourceManager") {
+		t.Error("Prepare should have given error on nil SourceManager, but gave:", err)
+	}
+
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Prepare should have errored on empty root")
+	} else if !strings.Contains(err.Error(), "non-empty root directory") {
+		t.Error("Prepare should have given error on empty root, but gave:", err)
+	}
+
+	params.RootDir = pn
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Prepare should have errored on empty name")
+	} else if !strings.Contains(err.Error(), "non-empty import root") {
+		t.Error("Prepare should have given error on empty import root, but gave:", err)
+	}
+
+	params.ImportRoot = ProjectRoot(pn)
+	params.Trace = true
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on trace with no logger")
+	} else if !strings.Contains(err.Error(), "no logger provided") {
+		t.Error("Prepare should have given error on missing trace logger, but gave:", err)
+	}
+
+	params.TraceLogger = log.New(ioutil.Discard, "", 0)
+	_, err = Prepare(params, sm)
+	if err != nil {
+		t.Error("Basic conditions satisfied, prepare should have completed successfully, err as:", err)
+	}
+
+	// swap out the test mkBridge override temporarily, just to make sure we get
+	// the right error
+	mkBridge = func(s *solver, sm SourceManager) sourceBridge {
+		return &bridge{
+			sm:     sm,
+			s:      s,
+			vlists: make(map[ProjectRoot][]Version),
+		}
+	}
+
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on nonexistent root")
+	} else if !strings.Contains(err.Error(), "could not read project root") {
+		t.Error("Prepare should have given error nonexistent project root dir, but gave:", err)
+	}
+
+	// Pointing it at a file should also be an err
+	params.RootDir = "solve_test.go"
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on file for RootDir")
+	} else if !strings.Contains(err.Error(), "is a file, not a directory") {
+		t.Error("Prepare should have given error on file as RootDir, but gave:", err)
+	}
+
+	// swap them back...not sure if this matters, but just in case
+	overrideMkBridge()
+}
+
+func TestIgnoreDedupe(t *testing.T) {
+	fix := basicFixtures["no dependencies"]
+
+	ig := []string{"foo", "foo", "bar"}
+	params := SolveParameters{
+		RootDir:    string(fix.ds[0].n),
+		ImportRoot: ProjectRoot(fix.ds[0].n),
+		Manifest:   fix.ds[0],
+		Ignore:     ig,
+	}
+
+	s, _ := Prepare(params, newdepspecSM(basicFixtures["no dependencies"].ds, nil))
+	ts := s.(*solver)
+
+	expect := map[string]bool{
+		"foo": true,
+		"bar": true,
+	}
+
+	if !reflect.DeepEqual(ts.ig, expect) {
+		t.Errorf("Expected solver's ignore list to be deduplicated map, got %v", ts.ig)
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/solver.go b/vendor/github.com/sdboyer/gps/solver.go
new file mode 100644
index 0000000..121bc81
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solver.go
@@ -0,0 +1,1246 @@
+package gps
+
+import (
+	"container/heap"
+	"fmt"
+	"log"
+	"os"
+	"sort"
+	"strings"
+
+	"github.com/armon/go-radix"
+)
+
+// SolveParameters hold all arguments to a solver run.
+//
+// Only RootDir and ImportRoot are absolutely required. A nil Manifest is
+// allowed, though it usually makes little sense.
+//
+// Of these properties, only Manifest and Ignore are (directly) incorporated in
+// memoization hashing.
+type SolveParameters struct {
+	// The path to the root of the project on which the solver should operate.
+	// This should point to the directory that should contain the vendor/
+	// directory.
+	//
+	// In general, it is wise for this to be under an active GOPATH, though it
+	// is not (currently) required.
+	//
+	// A real path to a readable directory is required.
+	RootDir string
+
+	// The import path at the base of all import paths covered by the project.
+	// For example, the appropriate value for gps itself here is:
+	//
+	//  github.com/sdboyer/gps
+	//
+	// In most cases, this should match the latter portion of RootDir. However,
+	// that is not (currently) required.
+	//
+	// A non-empty string is required.
+	ImportRoot ProjectRoot
+
+	// The root manifest. This contains all the dependencies, constraints, and
+	// other controls available to the root project.
+	//
+	// May be nil, but for most cases, that would be unwise.
+	Manifest Manifest
+
+	// The root lock. Optional. Generally, this lock is the output of a previous
+	// solve run.
+	//
+	// If provided, the solver will attempt to preserve the versions specified
+	// in the lock, unless ToChange or ChangeAll settings indicate otherwise.
+	Lock Lock
+
+	// A list of packages (import paths) to ignore. These can be in the root
+	// project, or from elsewhere. Ignoring a package means that both it and its
+	// imports will be disregarded by all relevant solver operations.
+	Ignore []string
+
+	// ToChange is a list of project names that should be changed - that is, any
+	// versions specified for those projects in the root lock file should be
+	// ignored.
+	//
+	// Passing ChangeAll has subtly different behavior from enumerating all
+	// projects into ToChange. In general, ToChange should *only* be used if the
+	// user expressly requested an upgrade for a specific project.
+	ToChange []ProjectRoot
+
+	// ChangeAll indicates that all projects should be changed - that is, any
+	// versions specified in the root lock file should be ignored.
+	ChangeAll bool
+
+	// Downgrade indicates whether the solver will attempt to upgrade (false) or
+	// downgrade (true) projects that are not locked, or are marked for change.
+	//
+	// Upgrading is, by far, the most typical case. The field is named
+	// 'Downgrade' so that the bool's zero value corresponds to that most
+	// typical case.
+	Downgrade bool
+
+	// Trace controls whether the solver will generate informative trace output
+	// as it moves through the solving process.
+	Trace bool
+
+	// TraceLogger is the logger to use for generating trace output. If Trace is
+	// true but no logger is provided, solving will result in an error.
+	TraceLogger *log.Logger
+}
+
+// solver is a CDCL-style SAT solver with satisfiability conditions hardcoded to
+// the needs of the Go package management problem space.
+type solver struct {
+	// The current number of attempts made over the course of this solve. This
+	// number increments each time the algorithm completes a backtrack and
+	// starts moving forward again.
+	attempts int
+
+	// SolveParameters are the inputs to the solver. They determine both what
+	// data the solver should operate on, and certain aspects of how solving
+	// proceeds.
+	//
+	// Prepare() validates these, so by the time we have a *solver instance, we
+	// know they're valid.
+	params SolveParameters
+
+	// Logger used exclusively for trace output, if the trace option is set.
+	tl *log.Logger
+
+	// A bridge to the standard SourceManager. The adapter does some local
+	// caching of pre-sorted version lists, as well as translation between the
+	// full-on ProjectIdentifiers that the solver deals with and the simplified
+	// names a SourceManager operates on.
+	b sourceBridge
+
+	// A stack containing projects and packages that are currently "selected" -
+	// that is, they have passed all satisfiability checks, and are part of the
+	// current solution.
+	//
+	// The *selection type is mostly just a dumb data container; the solver
+	// itself is responsible for maintaining that invariant.
+	sel *selection
+
+	// The current list of projects that we need to incorporate into the solution in
+	// order for the solution to be complete. This list is implemented as a
+	// priority queue that places projects least likely to induce errors at the
+	// front, in order to minimize the amount of backtracking required to find a
+	// solution.
+	//
+	// Entries are added to and removed from this list by the solver at the same
+	// time that the selected queue is updated, either with an addition or
+	// removal.
+	unsel *unselected
+
+	// Map of packages to ignore. Derived by converting SolveParameters.Ignore
+	// into a map during solver prep - which also, nicely, deduplicates it.
+	ig map[string]bool
+
+	// A stack of all the currently active versionQueues in the solver. The set
+	// of projects represented here corresponds closely to what's in s.sel,
+	// although s.sel will always contain the root project, and s.vqs never
+	// will. Also, s.vqs is only added to (or popped from during backtracking)
+	// when a new project is selected; it is untouched when new packages are
+	// added to an existing project.
+	vqs []*versionQueue
+
+	// A map of the ProjectRoot (local names) that should be allowed to change
+	chng map[ProjectRoot]struct{}
+
+	// A map of the ProjectRoot (local names) that are currently selected, and
+	// the network name to which they currently correspond.
+	names map[ProjectRoot]string
+
+	// A map of the names listed in the root's lock.
+	rlm map[ProjectIdentifier]LockedProject
+
+	// A normalized, copied version of the root manifest.
+	rm Manifest
+
+	// A normalized, copied version of the root lock.
+	rl Lock
+}
+
+// A Solver is the main workhorse of gps: given a set of project inputs, it
+// performs a constraint solving analysis to develop a complete Result that can
+// be used as a lock file, and to populate a vendor directory.
+type Solver interface {
+	HashInputs() ([]byte, error)
+	Solve() (Solution, error)
+}
+
+// Prepare readies a Solver for use.
+//
+// This function reads and validates the provided SolveParameters. If a problem
+// with the inputs is detected, an error is returned. Otherwise, a Solver is
+// returned, ready to hash and check inputs or perform a solving run.
+func Prepare(params SolveParameters, sm SourceManager) (Solver, error) {
+	// local overrides would need to be handled first.
+	// TODO(sdboyer) local overrides! heh
+
+	if sm == nil {
+		return nil, badOptsFailure("must provide non-nil SourceManager")
+	}
+	if params.RootDir == "" {
+		return nil, badOptsFailure("params must specify a non-empty root directory")
+	}
+	if params.ImportRoot == "" {
+		return nil, badOptsFailure("params must include a non-empty import root")
+	}
+	if params.Trace && params.TraceLogger == nil {
+		return nil, badOptsFailure("trace requested, but no logger provided")
+	}
+
+	if params.Manifest == nil {
+		params.Manifest = SimpleManifest{}
+	}
+
+	// Ensure the ignore map is at least initialized
+	ig := make(map[string]bool)
+	if len(params.Ignore) > 0 {
+		for _, pkg := range params.Ignore {
+			ig[pkg] = true
+		}
+	}
+
+	s := &solver{
+		params: params,
+		ig:     ig,
+		tl:     params.TraceLogger,
+	}
+
+	// Set up the bridge and ensure the root dir is in good, working order
+	// before doing anything else. (This call is stubbed out in tests, via
+	// overriding mkBridge(), so we can run with virtual RootDir.)
+	s.b = mkBridge(s, sm)
+	err := s.b.verifyRootDir(s.params.RootDir)
+	if err != nil {
+		return nil, err
+	}
+
+	// Initialize maps
+	s.chng = make(map[ProjectRoot]struct{})
+	s.rlm = make(map[ProjectIdentifier]LockedProject)
+	s.names = make(map[ProjectRoot]string)
+
+	for _, v := range s.params.ToChange {
+		s.chng[v] = struct{}{}
+	}
+
+	// Initialize stacks and queues
+	s.sel = &selection{
+		deps: make(map[ProjectIdentifier][]dependency),
+		sm:   s.b,
+	}
+	s.unsel = &unselected{
+		sl:  make([]bimodalIdentifier, 0),
+		cmp: s.unselectedComparator,
+	}
+
+	// Prep safe, normalized versions of root manifest and lock data
+	s.rm = prepManifest(s.params.Manifest)
+	if s.params.Lock != nil {
+		for _, lp := range s.params.Lock.Projects() {
+			s.rlm[lp.Ident().normalize()] = lp
+		}
+
+		// Also keep a prepped one, mostly for the bridge. This is probably
+		// wasteful, but only minimally so, and yay symmetry
+		s.rl = prepLock(s.params.Lock)
+	}
+
+	return s, nil
+}
+
+// Solve attempts to find a dependency solution for the given project, as
+// represented by the SolveParameters with which this Solver was created.
+//
+// This is the entry point to the main gps workhorse.
+func (s *solver) Solve() (Solution, error) {
+	// Prime the queues with the root project
+	err := s.selectRoot()
+	if err != nil {
+		// TODO(sdboyer) this properly with errs, yar
+		return nil, err
+	}
+
+	// Log initial step
+	s.logSolve()
+	all, err := s.solve()
+
+	// Solver finished with an err; return that and we're done
+	if err != nil {
+		return nil, err
+	}
+
+	r := solution{
+		att: s.attempts,
+	}
+
+	// An err here is impossible at this point; we already know the root tree is
+	// fine
+	r.hd, _ = s.HashInputs()
+
+	// Convert ProjectAtoms into LockedProjects
+	r.p = make([]LockedProject, len(all))
+	k := 0
+	for pa, pl := range all {
+		r.p[k] = pa2lp(pa, pl)
+		k++
+	}
+
+	return r, nil
+}
+
+// solve is the top-level loop for the SAT solving process.
+func (s *solver) solve() (map[atom]map[string]struct{}, error) {
+	// Main solving loop
+	for {
+		bmi, has := s.nextUnselected()
+
+		if !has {
+			// no more packages to select - we're done.
+			break
+		}
+
+		// This split is the heart of "bimodal solving": we follow different
+		// satisfiability and selection paths depending on whether we've already
+		// selected the base project/repo that came off the unselected queue.
+		//
+		// (If we already have selected the project, other parts of the
+		// algorithm guarantee the bmi will contain at least one package from
+		// this project that has yet to be selected.)
+		if awp, is := s.sel.selected(bmi.id); !is {
+			// Analysis path for when we haven't selected the project yet - need
+			// to create a version queue.
+			s.logStart(bmi)
+			queue, err := s.createVersionQueue(bmi)
+			if err != nil {
+				// Err means a failure somewhere down the line; try backtracking.
+				if s.backtrack() {
+					// backtracking succeeded, move to the next unselected id
+					continue
+				}
+				return nil, err
+			}
+
+			if queue.current() == nil {
+				panic("canary - queue is empty, but flow indicates success")
+			}
+
+			s.selectAtomWithPackages(atomWithPackages{
+				a: atom{
+					id: queue.id,
+					v:  queue.current(),
+				},
+				pl: bmi.pl,
+			})
+			s.vqs = append(s.vqs, queue)
+			s.logSolve()
+		} else {
+			// We're just trying to add packages to an already-selected project.
+			// That means it's not OK to burn through the version queue for that
+			// project as we do when first selecting a project, as doing so
+			// would upend the guarantees on which all previous selections of
+			// the project are based (both the initial one, and any package-only
+			// ones).
+
+			// Because we can only safely operate within the scope of the
+			// single, currently selected version, we can skip looking for the
+			// queue and just use the version given in what came back from
+			// s.sel.selected().
+			nawp := atomWithPackages{
+				a: atom{
+					id: bmi.id,
+					v:  awp.a.v,
+				},
+				pl: bmi.pl,
+			}
+
+			s.logStart(bmi) // TODO(sdboyer) different special start logger for this path
+			err := s.checkPackage(nawp)
+			if err != nil {
+				// Err means a failure somewhere down the line; try backtracking.
+				if s.backtrack() {
+					// backtracking succeeded, move to the next unselected id
+					continue
+				}
+				return nil, err
+			}
+			s.selectPackages(nawp)
+			// We don't add anything to the stack of version queues because the
+			// backtracker knows not to pop the vqstack if it backtracks
+			// across a pure-package addition.
+			s.logSolve()
+		}
+	}
+
+	// Getting this far means we successfully found a solution. Combine the
+	// selected projects and packages.
+	projs := make(map[atom]map[string]struct{})
+
+	// Skip the first project. It's always the root, and that shouldn't be
+	// included in results.
+	for _, sel := range s.sel.projects[1:] {
+		pm, exists := projs[sel.a.a]
+		if !exists {
+			pm = make(map[string]struct{})
+			projs[sel.a.a] = pm
+		}
+
+		for _, path := range sel.a.pl {
+			pm[path] = struct{}{}
+		}
+	}
+	return projs, nil
+}
+
+// selectRoot is a specialized selectAtomWithPackages, used solely to initially
+// populate the queues at the beginning of a solve run.
+func (s *solver) selectRoot() error {
+	pa := atom{
+		id: ProjectIdentifier{
+			ProjectRoot: s.params.ImportRoot,
+		},
+		// This is a hack so that the root project doesn't have a nil version.
+		// It's sort of OK because the root never makes it out into the results.
+		// We may need a more elegant solution if we discover other side
+		// effects, though.
+		v: Revision(""),
+	}
+
+	ptree, err := s.b.listPackages(pa.id, nil)
+	if err != nil {
+		return err
+	}
+
+	list := make([]string, len(ptree.Packages))
+	k := 0
+	for path := range ptree.Packages {
+		list[k] = path
+		k++
+	}
+
+	a := atomWithPackages{
+		a:  pa,
+		pl: list,
+	}
+
+	// Push the root project onto the queue.
+	// TODO(sdboyer) maybe it'd just be better to skip this?
+	s.sel.pushSelection(a, true)
+
+	// If we're looking for root's deps, get it from opts and local root
+	// analysis, rather than having the sm do it
+	mdeps := append(s.rm.DependencyConstraints(), s.rm.TestDependencyConstraints()...)
+
+	// Err is not possible at this point, as it could only come from
+	// listPackages(), which if we're here already succeeded for root
+	reach, _ := s.b.computeRootReach()
+
+	deps, err := s.intersectConstraintsWithImports(mdeps, reach)
+	if err != nil {
+		// TODO(sdboyer) this could well happen; handle it with a more graceful error
+		panic(fmt.Sprintf("shouldn't be possible %s", err))
+	}
+
+	for _, dep := range deps {
+		s.sel.pushDep(dependency{depender: pa, dep: dep})
+		// Add all to unselected queue
+		s.names[dep.Ident.ProjectRoot] = dep.Ident.netName()
+		heap.Push(s.unsel, bimodalIdentifier{id: dep.Ident, pl: dep.pl, fromRoot: true})
+	}
+
+	return nil
+}
+
+func (s *solver) getImportsAndConstraintsOf(a atomWithPackages) ([]completeDep, error) {
+	var err error
+
+	if s.params.ImportRoot == a.a.id.ProjectRoot {
+		panic("Should never need to recheck imports/constraints from root during solve")
+	}
+
+	// Work through the source manager to get project info and static analysis
+	// information.
+	m, _, err := s.b.getProjectInfo(a.a)
+	if err != nil {
+		return nil, err
+	}
+
+	ptree, err := s.b.listPackages(a.a.id, a.a.v)
+	if err != nil {
+		return nil, err
+	}
+
+	allex := ptree.ExternalReach(false, false, s.ig)
+	// Use a map to dedupe the unique external packages
+	exmap := make(map[string]struct{})
+	// Add the packages reached by the packages explicitly listed in the atom to
+	// the list
+	for _, pkg := range a.pl {
+		expkgs, exists := allex[pkg]
+		if !exists {
+			// missing package here *should* only happen if the target pkg was
+			// poisoned somehow - check the original ptree.
+			if perr, exists := ptree.Packages[pkg]; exists {
+				if perr.Err != nil {
+					return nil, fmt.Errorf("package %s has errors: %s", pkg, perr.Err)
+				}
+				return nil, fmt.Errorf("package %s depends on some other package within %s with errors", pkg, a.a.id.errString())
+			}
+			// Nope, it's actually not there. This shouldn't happen.
+			return nil, fmt.Errorf("package %s does not exist within project %s", pkg, a.a.id.errString())
+		}
+
+		for _, ex := range expkgs {
+			exmap[ex] = struct{}{}
+		}
+	}
+
+	reach := make([]string, len(exmap))
+	k := 0
+	for pkg := range exmap {
+		reach[k] = pkg
+		k++
+	}
+
+	deps := m.DependencyConstraints()
+	// TODO(sdboyer) add overrides here...if we impl the concept (which we should)
+
+	return s.intersectConstraintsWithImports(deps, reach)
+}
+
+// intersectConstraintsWithImports takes a list of constraints and a list of
+// externally reached packages, and creates a []completeDep that is guaranteed
+// to include all packages named by import reach, using constraints where they
+// are available, or Any() where they are not.
+func (s *solver) intersectConstraintsWithImports(deps []ProjectConstraint, reach []string) ([]completeDep, error) {
+	// Create a radix tree with all the projects we know from the manifest
+	// TODO(sdboyer) make this smarter once we allow non-root inputs as 'projects'
+	xt := radix.New()
+	for _, dep := range deps {
+		xt.Insert(string(dep.Ident.ProjectRoot), dep)
+	}
+
+	// Step through the reached packages; if they have prefix matches in
+	// the trie, assume (mostly) it's a correct correspondence.
+	dmap := make(map[ProjectRoot]completeDep)
+	for _, rp := range reach {
+		// If it's a stdlib package, skip it.
+		// TODO(sdboyer) this just hardcodes us to the packages in tip - should we
+		// have go version magic here, too?
+		if stdlib[rp] {
+			continue
+		}
+
+		// Look for a prefix match; it'll be the root project/repo containing
+		// the reached package
+		if k, idep, match := xt.LongestPrefix(rp); match {
+			// The radix tree gets it mostly right, but we have to guard against
+			// possibilities like this:
+			//
+			// github.com/sdboyer/foo
+			// github.com/sdboyer/foobar/baz
+			//
+			// The latter would incorrectly be conflated in with the former. So,
+			// as we know we're operating on strings that describe paths, guard
+			// against this case by verifying that either the input is the same
+			// length as the match (in which case we know they're equal), or
+			// that the next character is the is the PathSeparator.
+			if len(k) == len(rp) || strings.IndexRune(rp[:len(k)], os.PathSeparator) == 0 {
+				// Match is valid; put it in the dmap, either creating a new
+				// completeDep or appending it to the existing one for this base
+				// project/prefix.
+				dep := idep.(ProjectConstraint)
+				if cdep, exists := dmap[dep.Ident.ProjectRoot]; exists {
+					cdep.pl = append(cdep.pl, rp)
+					dmap[dep.Ident.ProjectRoot] = cdep
+				} else {
+					dmap[dep.Ident.ProjectRoot] = completeDep{
+						ProjectConstraint: dep,
+						pl:                []string{rp},
+					}
+				}
+				continue
+			}
+		}
+
+		// No match. Let the SourceManager try to figure out the root
+		root, err := s.b.deduceRemoteRepo(rp)
+		if err != nil {
+			// Nothing we can do if we can't suss out a root
+			return nil, err
+		}
+
+		// Still no matches; make a new completeDep with an open constraint
+		pd := ProjectConstraint{
+			Ident: ProjectIdentifier{
+				ProjectRoot: ProjectRoot(root.Base),
+				NetworkName: root.Base,
+			},
+			Constraint: Any(),
+		}
+
+		// Insert the pd into the trie so that further deps from this
+		// project get caught by the prefix search
+		xt.Insert(root.Base, pd)
+		// And also put the complete dep into the dmap
+		dmap[ProjectRoot(root.Base)] = completeDep{
+			ProjectConstraint: pd,
+			pl:                []string{rp},
+		}
+	}
+
+	// Dump all the deps from the map into the expected return slice
+	cdeps := make([]completeDep, len(dmap))
+	k := 0
+	for _, cdep := range dmap {
+		cdeps[k] = cdep
+		k++
+	}
+
+	return cdeps, nil
+}
+
+func (s *solver) createVersionQueue(bmi bimodalIdentifier) (*versionQueue, error) {
+	id := bmi.id
+	// If on the root package, there's no queue to make
+	if s.params.ImportRoot == id.ProjectRoot {
+		return newVersionQueue(id, nil, nil, s.b)
+	}
+
+	exists, err := s.b.repoExists(id)
+	if err != nil {
+		return nil, err
+	}
+	if !exists {
+		exists, err = s.b.vendorCodeExists(id)
+		if err != nil {
+			return nil, err
+		}
+		if exists {
+			// Project exists only in vendor (and in some manifest somewhere)
+			// TODO(sdboyer) mark this for special handling, somehow?
+		} else {
+			return nil, newSolveError(fmt.Sprintf("Project '%s' could not be located.", id), cannotResolve)
+		}
+	}
+
+	var lockv Version
+	if len(s.rlm) > 0 {
+		lockv, err = s.getLockVersionIfValid(id)
+		if err != nil {
+			// Can only get an error here if an upgrade was expressly requested on
+			// code that exists only in vendor
+			return nil, err
+		}
+	}
+
+	var prefv Version
+	if bmi.fromRoot {
+		// If this bmi came from the root, then we want to search through things
+		// with a dependency on it in order to see if any have a lock that might
+		// express a prefv
+		//
+		// TODO(sdboyer) nested loop; prime candidate for a cache somewhere
+		for _, dep := range s.sel.getDependenciesOn(bmi.id) {
+			// Skip the root, of course
+			if s.params.ImportRoot == dep.depender.id.ProjectRoot {
+				continue
+			}
+
+			_, l, err := s.b.getProjectInfo(dep.depender)
+			if err != nil || l == nil {
+				// err being non-nil really shouldn't be possible, but the lock
+				// being nil is quite likely
+				continue
+			}
+
+			for _, lp := range l.Projects() {
+				if lp.Ident().eq(bmi.id) {
+					prefv = lp.Version()
+				}
+			}
+		}
+
+		// OTHER APPROACH - WRONG, BUT MAYBE USEFUL FOR REFERENCE?
+		// If this bmi came from the root, then we want to search the unselected
+		// queue to see if anything *else* wants this ident, in which case we
+		// pick up that prefv
+		//for _, bmi2 := range s.unsel.sl {
+		//// Take the first thing from the queue that's for the same ident,
+		//// and has a non-nil prefv
+		//if bmi.id.eq(bmi2.id) {
+		//if bmi2.prefv != nil {
+		//prefv = bmi2.prefv
+		//}
+		//}
+		//}
+
+	} else {
+		// Otherwise, just use the preferred version expressed in the bmi
+		prefv = bmi.prefv
+	}
+
+	q, err := newVersionQueue(id, lockv, prefv, s.b)
+	if err != nil {
+		// TODO(sdboyer) this particular err case needs to be improved to be ONLY for cases
+		// where there's absolutely nothing findable about a given project name
+		return nil, err
+	}
+
+	// Hack in support for revisions.
+	//
+	// By design, revs aren't returned from ListVersion(). Thus, if the dep in
+	// the bmi was has a rev constraint, it is (almost) guaranteed to fail, even
+	// if that rev does exist in the repo. So, detect a rev and push it into the
+	// vq here, instead.
+	//
+	// Happily, the solver maintains the invariant that constraints on a given
+	// ident cannot be incompatible, so we know that if we find one rev, then
+	// any other deps will have to also be on that rev (or Any).
+	//
+	// TODO(sdboyer) while this does work, it bypasses the interface-implied guarantees
+	// of the version queue, and is therefore not a great strategy for API
+	// coherency. Folding this in to a formal interface would be better.
+	switch tc := s.sel.getConstraint(bmi.id).(type) {
+	case Revision:
+		// We know this is the only thing that could possibly match, so put it
+		// in at the front - if it isn't there already.
+		if q.pi[0] != tc {
+			// Existence of the revision is guaranteed by checkRevisionExists().
+			q.pi = append([]Version{tc}, q.pi...)
+		}
+	}
+
+	// Having assembled the queue, search it for a valid version.
+	return q, s.findValidVersion(q, bmi.pl)
+}
+
+// findValidVersion walks through a versionQueue until it finds a version that
+// satisfies the constraints held in the current state of the solver.
+//
+// The satisfiability checks triggered from here are constrained to operate only
+// on those dependencies induced by the list of packages given in the second
+// parameter.
+func (s *solver) findValidVersion(q *versionQueue, pl []string) error {
+	if nil == q.current() {
+		// this case should not be reachable, but reflects improper solver state
+		// if it is, so panic immediately
+		panic("version queue is empty, should not happen")
+	}
+
+	faillen := len(q.fails)
+
+	for {
+		cur := q.current()
+		err := s.checkProject(atomWithPackages{
+			a: atom{
+				id: q.id,
+				v:  cur,
+			},
+			pl: pl,
+		})
+		if err == nil {
+			// we have a good version, can return safely
+			return nil
+		}
+
+		if q.advance(err) != nil {
+			// Error on advance, have to bail out
+			break
+		}
+		if q.isExhausted() {
+			// Queue is empty, bail with error
+			break
+		}
+	}
+
+	s.fail(s.sel.getDependenciesOn(q.id)[0].depender.id)
+
+	// Return a compound error of all the new errors encountered during this
+	// attempt to find a new, valid version
+	return &noVersionError{
+		pn:    q.id,
+		fails: q.fails[faillen:],
+	}
+}
+
+// getLockVersionIfValid finds an atom for the given ProjectIdentifier from the
+// root lock, assuming:
+//
+// 1. A root lock was provided
+// 2. The general flag to change all projects was not passed
+// 3. A flag to change this particular ProjectIdentifier was not passed
+//
+// If any of these three conditions are true (or if the id cannot be found in
+// the root lock), then no atom will be returned.
+func (s *solver) getLockVersionIfValid(id ProjectIdentifier) (Version, error) {
+	// If the project is specifically marked for changes, then don't look for a
+	// locked version.
+	if _, explicit := s.chng[id.ProjectRoot]; explicit || s.params.ChangeAll {
+		// For projects with an upstream or cache repository, it's safe to
+		// ignore what's in the lock, because there's presumably more versions
+		// to be found and attempted in the repository. If it's only in vendor,
+		// though, then we have to try to use what's in the lock, because that's
+		// the only version we'll be able to get.
+		if exist, _ := s.b.repoExists(id); exist {
+			return nil, nil
+		}
+
+		// However, if a change was *expressly* requested for something that
+		// exists only in vendor, then that guarantees we don't have enough
+		// information to complete a solution. In that case, error out.
+		if explicit {
+			return nil, &missingSourceFailure{
+				goal: id,
+				prob: "Cannot upgrade %s, as no source repository could be found.",
+			}
+		}
+	}
+
+	lp, exists := s.rlm[id]
+	if !exists {
+		return nil, nil
+	}
+
+	constraint := s.sel.getConstraint(id)
+	v := lp.Version()
+	if !constraint.Matches(v) {
+		var found bool
+		if tv, ok := v.(Revision); ok {
+			// If we only have a revision from the root's lock, allow matching
+			// against other versions that have that revision
+			for _, pv := range s.b.pairRevision(id, tv) {
+				if constraint.Matches(pv) {
+					v = pv
+					found = true
+					break
+				}
+			}
+			//} else if _, ok := constraint.(Revision); ok {
+			//// If the current constraint is itself a revision, and the lock gave
+			//// an unpaired version, see if they match up
+			////
+			//if u, ok := v.(UnpairedVersion); ok {
+			//pv := s.sm.pairVersion(id, u)
+			//if constraint.Matches(pv) {
+			//v = pv
+			//found = true
+			//}
+			//}
+		}
+
+		if !found {
+			s.logSolve("%s in root lock, but current constraints disallow it", id.errString())
+			return nil, nil
+		}
+	}
+
+	s.logSolve("using root lock's version of %s", id.errString())
+
+	return v, nil
+}
+
+// backtrack works backwards from the current failed solution to find the next
+// solution to try.
+func (s *solver) backtrack() bool {
+	if len(s.vqs) == 0 {
+		// nothing to backtrack to
+		return false
+	}
+
+	for {
+		for {
+			if len(s.vqs) == 0 {
+				// no more versions, nowhere further to backtrack
+				return false
+			}
+			if s.vqs[len(s.vqs)-1].failed {
+				break
+			}
+
+			s.vqs, s.vqs[len(s.vqs)-1] = s.vqs[:len(s.vqs)-1], nil
+
+			// Pop selections off until we get to a project.
+			var proj bool
+			for !proj {
+				_, proj = s.unselectLast()
+			}
+		}
+
+		// Grab the last versionQueue off the list of queues
+		q := s.vqs[len(s.vqs)-1]
+		// Walk back to the next project
+		var awp atomWithPackages
+		var proj bool
+
+		for !proj {
+			awp, proj = s.unselectLast()
+		}
+
+		if !q.id.eq(awp.a.id) {
+			panic("canary - version queue stack and selected project stack are out of alignment")
+		}
+
+		// Advance the queue past the current version, which we know is bad
+		// TODO(sdboyer) is it feasible to make available the failure reason here?
+		if q.advance(nil) == nil && !q.isExhausted() {
+			// Search for another acceptable version of this failed dep in its queue
+			if s.findValidVersion(q, awp.pl) == nil {
+				s.logSolve()
+
+				// Found one! Put it back on the selected queue and stop
+				// backtracking
+				s.selectAtomWithPackages(atomWithPackages{
+					a: atom{
+						id: q.id,
+						v:  q.current(),
+					},
+					pl: awp.pl,
+				})
+				break
+			}
+		}
+
+		s.logSolve("no more versions of %s, backtracking", q.id.errString())
+
+		// No solution found; continue backtracking after popping the queue
+		// we just inspected off the list
+		// GC-friendly pop pointer elem in slice
+		s.vqs, s.vqs[len(s.vqs)-1] = s.vqs[:len(s.vqs)-1], nil
+	}
+
+	// Backtracking was successful if loop ended before running out of versions
+	if len(s.vqs) == 0 {
+		return false
+	}
+	s.attempts++
+	return true
+}
+
+func (s *solver) nextUnselected() (bimodalIdentifier, bool) {
+	if len(s.unsel.sl) > 0 {
+		return s.unsel.sl[0], true
+	}
+
+	return bimodalIdentifier{}, false
+}
+
+func (s *solver) unselectedComparator(i, j int) bool {
+	ibmi, jbmi := s.unsel.sl[i], s.unsel.sl[j]
+	iname, jname := ibmi.id, jbmi.id
+
+	// Most important thing is pushing package additions ahead of project
+	// additions. Package additions can't walk their version queue, so all they
+	// do is narrow the possibility of success; better to find out early and
+	// fast if they're going to fail than wait until after we've done real work
+	// on a project and have to backtrack across it.
+
+	// FIXME the impl here is currently O(n) in the number of selections; it
+	// absolutely cannot stay in a hot sorting path like this
+	_, isel := s.sel.selected(iname)
+	_, jsel := s.sel.selected(jname)
+
+	if isel && !jsel {
+		return true
+	}
+	if !isel && jsel {
+		return false
+	}
+
+	if iname.eq(jname) {
+		return false
+	}
+
+	_, ilock := s.rlm[iname]
+	_, jlock := s.rlm[jname]
+
+	switch {
+	case ilock && !jlock:
+		return true
+	case !ilock && jlock:
+		return false
+	case ilock && jlock:
+		return iname.less(jname)
+	}
+
+	// Now, sort by number of available versions. This will trigger network
+	// activity, but at this point we know that the project we're looking at
+	// isn't locked by the root. And, because being locked by root is the only
+	// way avoid that call when making a version queue, we know we're gonna have
+	// to pay that cost anyway.
+
+	// We can safely ignore an err from ListVersions here because, if there is
+	// an actual problem, it'll be noted and handled somewhere else saner in the
+	// solving algorithm.
+	ivl, _ := s.b.listVersions(iname)
+	jvl, _ := s.b.listVersions(jname)
+	iv, jv := len(ivl), len(jvl)
+
+	// Packages with fewer versions to pick from are less likely to benefit from
+	// backtracking, so deal with them earlier in order to minimize the amount
+	// of superfluous backtracking through them we do.
+	switch {
+	case iv == 0 && jv != 0:
+		return true
+	case iv != 0 && jv == 0:
+		return false
+	case iv != jv:
+		return iv < jv
+	}
+
+	// Finally, if all else fails, fall back to comparing by name
+	return iname.less(jname)
+}
+
+func (s *solver) fail(id ProjectIdentifier) {
+	// TODO(sdboyer) does this need updating, now that we have non-project package
+	// selection?
+
+	// skip if the root project
+	if s.params.ImportRoot != id.ProjectRoot {
+		// just look for the first (oldest) one; the backtracker will necessarily
+		// traverse through and pop off any earlier ones
+		for _, vq := range s.vqs {
+			if vq.id.eq(id) {
+				vq.failed = true
+				return
+			}
+		}
+	}
+}
+
+// selectAtomWithPackages handles the selection case where a new project is
+// being added to the selection queue, alongside some number of its contained
+// packages. This method pushes them onto the selection queue, then adds any
+// new resultant deps to the unselected queue.
+func (s *solver) selectAtomWithPackages(a atomWithPackages) {
+	s.unsel.remove(bimodalIdentifier{
+		id: a.a.id,
+		pl: a.pl,
+	})
+
+	s.sel.pushSelection(a, true)
+
+	deps, err := s.getImportsAndConstraintsOf(a)
+	if err != nil {
+		// This shouldn't be possible; other checks should have ensured all
+		// packages and deps are present for any argument passed to this method.
+		panic(fmt.Sprintf("canary - shouldn't be possible %s", err))
+	}
+
+	// If this atom has a lock, pull it out so that we can potentially inject
+	// preferred versions into any bmis we enqueue
+	_, l, _ := s.b.getProjectInfo(a.a)
+	var lmap map[ProjectIdentifier]Version
+	if l != nil {
+		lmap = make(map[ProjectIdentifier]Version)
+		for _, lp := range l.Projects() {
+			lmap[lp.Ident()] = lp.Version()
+		}
+	}
+
+	for _, dep := range deps {
+		s.sel.pushDep(dependency{depender: a.a, dep: dep})
+		// Go through all the packages introduced on this dep, selecting only
+		// the ones where the only depper on them is what we pushed in. Then,
+		// put those into the unselected queue.
+		rpm := s.sel.getRequiredPackagesIn(dep.Ident)
+		var newp []string
+		for _, pkg := range dep.pl {
+			if rpm[pkg] == 1 {
+				newp = append(newp, pkg)
+			}
+		}
+
+		if len(newp) > 0 {
+			bmi := bimodalIdentifier{
+				id: dep.Ident,
+				pl: newp,
+				// This puts in a preferred version if one's in the map, else
+				// drops in the zero value (nil)
+				prefv: lmap[dep.Ident],
+			}
+			heap.Push(s.unsel, bmi)
+		}
+
+		if s.sel.depperCount(dep.Ident) == 1 {
+			s.names[dep.Ident.ProjectRoot] = dep.Ident.netName()
+		}
+	}
+}
+
+// selectPackages handles the selection case where we're just adding some new
+// packages to a project that was already selected. After pushing the selection,
+// it adds any newly-discovered deps to the unselected queue.
+//
+// It also takes an atomWithPackages because we need that same information in
+// order to enqueue the selection.
+func (s *solver) selectPackages(a atomWithPackages) {
+	s.unsel.remove(bimodalIdentifier{
+		id: a.a.id,
+		pl: a.pl,
+	})
+
+	s.sel.pushSelection(a, false)
+
+	deps, err := s.getImportsAndConstraintsOf(a)
+	if err != nil {
+		// This shouldn't be possible; other checks should have ensured all
+		// packages and deps are present for any argument passed to this method.
+		panic(fmt.Sprintf("canary - shouldn't be possible %s", err))
+	}
+
+	// If this atom has a lock, pull it out so that we can potentially inject
+	// preferred versions into any bmis we enqueue
+	_, l, _ := s.b.getProjectInfo(a.a)
+	var lmap map[ProjectIdentifier]Version
+	if l != nil {
+		lmap = make(map[ProjectIdentifier]Version)
+		for _, lp := range l.Projects() {
+			lmap[lp.Ident()] = lp.Version()
+		}
+	}
+
+	for _, dep := range deps {
+		s.sel.pushDep(dependency{depender: a.a, dep: dep})
+		// Go through all the packages introduced on this dep, selecting only
+		// the ones where the only depper on them is what we pushed in. Then,
+		// put those into the unselected queue.
+		rpm := s.sel.getRequiredPackagesIn(dep.Ident)
+		var newp []string
+		for _, pkg := range dep.pl {
+			if rpm[pkg] == 1 {
+				newp = append(newp, pkg)
+			}
+		}
+
+		if len(newp) > 0 {
+			bmi := bimodalIdentifier{
+				id: dep.Ident,
+				pl: newp,
+				// This puts in a preferred version if one's in the map, else
+				// drops in the zero value (nil)
+				prefv: lmap[dep.Ident],
+			}
+			heap.Push(s.unsel, bmi)
+		}
+
+		if s.sel.depperCount(dep.Ident) == 1 {
+			s.names[dep.Ident.ProjectRoot] = dep.Ident.netName()
+		}
+	}
+}
+
+func (s *solver) unselectLast() (atomWithPackages, bool) {
+	awp, first := s.sel.popSelection()
+	heap.Push(s.unsel, bimodalIdentifier{id: awp.a.id, pl: awp.pl})
+
+	deps, err := s.getImportsAndConstraintsOf(awp)
+	if err != nil {
+		// This shouldn't be possible; other checks should have ensured all
+		// packages and deps are present for any argument passed to this method.
+		panic(fmt.Sprintf("canary - shouldn't be possible %s", err))
+	}
+
+	for _, dep := range deps {
+		s.sel.popDep(dep.Ident)
+
+		// if no parents/importers, remove from unselected queue
+		if s.sel.depperCount(dep.Ident) == 0 {
+			delete(s.names, dep.Ident.ProjectRoot)
+			s.unsel.remove(bimodalIdentifier{id: dep.Ident, pl: dep.pl})
+		}
+	}
+
+	return awp, first
+}
+
+func (s *solver) logStart(bmi bimodalIdentifier) {
+	if !s.params.Trace {
+		return
+	}
+
+	prefix := strings.Repeat("| ", len(s.vqs)+1)
+	// TODO(sdboyer) how...to list the packages in the limited space we have?
+	s.tl.Printf("%s\n", tracePrefix(fmt.Sprintf("? attempting %s (with %v packages)", bmi.id.errString(), len(bmi.pl)), prefix, prefix))
+}
+
+func (s *solver) logSolve(args ...interface{}) {
+	if !s.params.Trace {
+		return
+	}
+
+	preflen := len(s.vqs)
+	var msg string
+	if len(args) == 0 {
+		// Generate message based on current solver state
+		if len(s.vqs) == 0 {
+			msg = "✓ (root)"
+		} else {
+			vq := s.vqs[len(s.vqs)-1]
+			msg = fmt.Sprintf("✓ select %s at %s", vq.id.errString(), vq.current())
+		}
+	} else {
+		// Use longer prefix length for these cases, as they're the intermediate
+		// work
+		preflen++
+		switch data := args[0].(type) {
+		case string:
+			msg = tracePrefix(fmt.Sprintf(data, args[1:]), "| ", "| ")
+		case traceError:
+			// We got a special traceError, use its custom method
+			msg = tracePrefix(data.traceString(), "| ", "✗ ")
+		case error:
+			// Regular error; still use the x leader but default Error() string
+			msg = tracePrefix(data.Error(), "| ", "✗ ")
+		default:
+			// panic here because this can *only* mean a stupid internal bug
+			panic("canary - must pass a string as first arg to logSolve, or no args at all")
+		}
+	}
+
+	prefix := strings.Repeat("| ", preflen)
+	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
+}
+
+func tracePrefix(msg, sep, fsep string) string {
+	parts := strings.Split(strings.TrimSuffix(msg, "\n"), "\n")
+	for k, str := range parts {
+		if k == 0 {
+			parts[k] = fmt.Sprintf("%s%s", fsep, str)
+		} else {
+			parts[k] = fmt.Sprintf("%s%s", sep, str)
+		}
+	}
+
+	return strings.Join(parts, "\n")
+}
+
+// simple (temporary?) helper just to convert atoms into locked projects
+func pa2lp(pa atom, pkgs map[string]struct{}) LockedProject {
+	lp := LockedProject{
+		pi: pa.id.normalize(), // shouldn't be necessary, but normalize just in case
+	}
+
+	switch v := pa.v.(type) {
+	case UnpairedVersion:
+		lp.v = v
+	case Revision:
+		lp.r = v
+	case versionPair:
+		lp.v = v.v
+		lp.r = v.r
+	default:
+		panic("unreachable")
+	}
+
+	for pkg := range pkgs {
+		lp.pkgs = append(lp.pkgs, strings.TrimPrefix(pkg, string(pa.id.ProjectRoot)+string(os.PathSeparator)))
+	}
+	sort.Strings(lp.pkgs)
+
+	return lp
+}
diff --git a/vendor/github.com/sdboyer/gps/source_manager.go b/vendor/github.com/sdboyer/gps/source_manager.go
new file mode 100644
index 0000000..86627a1
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/source_manager.go
@@ -0,0 +1,297 @@
+package gps
+
+import (
+	"encoding/json"
+	"fmt"
+	"go/build"
+	"os"
+	"path"
+
+	"github.com/Masterminds/vcs"
+)
+
+// A SourceManager is responsible for retrieving, managing, and interrogating
+// source repositories. Its primary purpose is to serve the needs of a Solver,
+// but it is handy for other purposes, as well.
+//
+// gps's built-in SourceManager, accessible via NewSourceManager(), is
+// intended to be generic and sufficient for any purpose. It provides some
+// additional semantics around the methods defined here.
+type SourceManager interface {
+	// RepoExists checks if a repository exists, either upstream or in the
+	// SourceManager's central repository cache.
+	RepoExists(ProjectRoot) (bool, error)
+
+	// ListVersions retrieves a list of the available versions for a given
+	// repository name.
+	ListVersions(ProjectRoot) ([]Version, error)
+
+	// RevisionPresentIn indicates whether the provided Version is present in
+	// the given repository.
+	RevisionPresentIn(ProjectRoot, Revision) (bool, error)
+
+	// ListPackages retrieves a tree of the Go packages at or below the provided
+	// import path, at the provided version.
+	ListPackages(ProjectRoot, Version) (PackageTree, error)
+
+	// GetProjectInfo returns manifest and lock information for the provided
+	// import path. gps currently requires that projects be rooted at their
+	// repository root, which means that this ProjectRoot must also be a
+	// repository root.
+	GetProjectInfo(ProjectRoot, Version) (Manifest, Lock, error)
+
+	// ExportProject writes out the tree of the provided import path, at the
+	// provided version, to the provided directory.
+	ExportProject(ProjectRoot, Version, string) error
+
+	// Release lets go of any locks held by the SourceManager.
+	Release()
+}
+
+// A ProjectAnalyzer is responsible for analyzing a path for Manifest and Lock
+// information. Tools relying on gps must implement one.
+type ProjectAnalyzer interface {
+	GetInfo(string, ProjectRoot) (Manifest, Lock, error)
+}
+
+// SourceMgr is the default SourceManager for gps.
+//
+// There's no (planned) reason why it would need to be reimplemented by other
+// tools; control via dependency injection is intended to be sufficient.
+type SourceMgr struct {
+	cachedir string
+	pms      map[ProjectRoot]*pmState
+	an       ProjectAnalyzer
+	ctx      build.Context
+	//pme               map[ProjectRoot]error
+}
+
+var _ SourceManager = &SourceMgr{}
+
+// Holds a projectManager, caches of the managed project's data, and information
+// about the freshness of those caches
+type pmState struct {
+	pm   *projectManager
+	cf   *os.File // handle for the cache file
+	vcur bool     // indicates that we've called ListVersions()
+}
+
+// NewSourceManager produces an instance of gps's built-in SourceManager. It
+// takes a cache directory (where local instances of upstream repositories are
+// stored), a vendor directory for the project currently being worked on, and a
+// force flag indicating whether to overwrite the global cache lock file (if
+// present).
+//
+// The returned SourceManager aggressively caches information wherever possible.
+// It is recommended that, if tools need to do preliminary, work involving
+// upstream repository analysis prior to invoking a solve run, that they create
+// this SourceManager as early as possible and use it to their ends. That way,
+// the solver can benefit from any caches that may have already been warmed.
+//
+// gps's SourceManager is intended to be threadsafe (if it's not, please
+// file a bug!). It should certainly be safe to reuse from one solving run to
+// the next; however, the fact that it takes a basedir as an argument makes it
+// much less useful for simultaneous use by separate solvers operating on
+// different root projects. This architecture may change in the future.
+func NewSourceManager(an ProjectAnalyzer, cachedir string, force bool) (*SourceMgr, error) {
+	if an == nil {
+		return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
+	}
+
+	err := os.MkdirAll(cachedir, 0777)
+	if err != nil {
+		return nil, err
+	}
+
+	glpath := path.Join(cachedir, "sm.lock")
+	_, err = os.Stat(glpath)
+	if err == nil && !force {
+		return nil, fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath)
+	}
+
+	_, err = os.OpenFile(glpath, os.O_CREATE|os.O_RDONLY, 0700) // is 0700 sane for this purpose?
+	if err != nil {
+		return nil, fmt.Errorf("failed to create global cache lock file at %s with err %s", glpath, err)
+	}
+
+	ctx := build.Default
+	// Replace GOPATH with our cache dir
+	ctx.GOPATH = cachedir
+
+	return &SourceMgr{
+		cachedir: cachedir,
+		pms:      make(map[ProjectRoot]*pmState),
+		ctx:      ctx,
+		an:       an,
+	}, nil
+}
+
+// Release lets go of any locks held by the SourceManager.
+func (sm *SourceMgr) Release() {
+	os.Remove(path.Join(sm.cachedir, "sm.lock"))
+}
+
+// GetProjectInfo returns manifest and lock information for the provided import
+// path. gps currently requires that projects be rooted at their repository
+// root, which means that this ProjectRoot must also be a repository root.
+//
+// The work of producing the manifest and lock information is delegated to the
+// injected ProjectAnalyzer.
+func (sm *SourceMgr) GetProjectInfo(n ProjectRoot, v Version) (Manifest, Lock, error) {
+	pmc, err := sm.getProjectManager(n)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return pmc.pm.GetInfoAt(v)
+}
+
+// ListPackages retrieves a tree of the Go packages at or below the provided
+// import path, at the provided version.
+func (sm *SourceMgr) ListPackages(n ProjectRoot, v Version) (PackageTree, error) {
+	pmc, err := sm.getProjectManager(n)
+	if err != nil {
+		return PackageTree{}, err
+	}
+
+	return pmc.pm.ListPackages(v)
+}
+
+// ListVersions retrieves a list of the available versions for a given
+// repository name.
+//
+// The list is not sorted; while it may be returned in the order that the
+// underlying VCS reports version information, no guarantee is made. It is
+// expected that the caller either not care about order, or sort the result
+// themselves.
+//
+// This list is always retrieved from upstream; if upstream is not accessible
+// (network outage, access issues, or the resource actually went away), an error
+// will be returned.
+func (sm *SourceMgr) ListVersions(n ProjectRoot) ([]Version, error) {
+	pmc, err := sm.getProjectManager(n)
+	if err != nil {
+		// TODO(sdboyer) More-er proper-er errors
+		return nil, err
+	}
+
+	return pmc.pm.ListVersions()
+}
+
+// RevisionPresentIn indicates whether the provided Revision is present in the given
+// repository.
+func (sm *SourceMgr) RevisionPresentIn(n ProjectRoot, r Revision) (bool, error) {
+	pmc, err := sm.getProjectManager(n)
+	if err != nil {
+		// TODO(sdboyer) More-er proper-er errors
+		return false, err
+	}
+
+	return pmc.pm.RevisionPresentIn(r)
+}
+
+// RepoExists checks if a repository exists, either upstream or in the cache,
+// for the provided ProjectRoot.
+func (sm *SourceMgr) RepoExists(n ProjectRoot) (bool, error) {
+	pms, err := sm.getProjectManager(n)
+	if err != nil {
+		return false, err
+	}
+
+	return pms.pm.CheckExistence(existsInCache) || pms.pm.CheckExistence(existsUpstream), nil
+}
+
+// ExportProject writes out the tree of the provided import path, at the
+// provided version, to the provided directory.
+func (sm *SourceMgr) ExportProject(n ProjectRoot, v Version, to string) error {
+	pms, err := sm.getProjectManager(n)
+	if err != nil {
+		return err
+	}
+
+	return pms.pm.ExportVersionTo(v, to)
+}
+
+// getProjectManager gets the project manager for the given ProjectRoot.
+//
+// If no such manager yet exists, it attempts to create one.
+func (sm *SourceMgr) getProjectManager(n ProjectRoot) (*pmState, error) {
+	// Check pm cache and errcache first
+	if pm, exists := sm.pms[n]; exists {
+		return pm, nil
+		//} else if pme, errexists := sm.pme[name]; errexists {
+		//return nil, pme
+	}
+
+	repodir := path.Join(sm.cachedir, "src", string(n))
+	// TODO(sdboyer) be more robust about this
+	r, err := vcs.NewRepo("https://"+string(n), repodir)
+	if err != nil {
+		// TODO(sdboyer) be better
+		return nil, err
+	}
+	if !r.CheckLocal() {
+		// TODO(sdboyer) cloning the repo here puts it on a blocking, and possibly
+		// unnecessary path. defer it
+		err = r.Get()
+		if err != nil {
+			// TODO(sdboyer) be better
+			return nil, err
+		}
+	}
+
+	// Ensure cache dir exists
+	metadir := path.Join(sm.cachedir, "metadata", string(n))
+	err = os.MkdirAll(metadir, 0777)
+	if err != nil {
+		// TODO(sdboyer) be better
+		return nil, err
+	}
+
+	pms := &pmState{}
+	cpath := path.Join(metadir, "cache.json")
+	fi, err := os.Stat(cpath)
+	var dc *projectDataCache
+	if fi != nil {
+		pms.cf, err = os.OpenFile(cpath, os.O_RDWR, 0777)
+		if err != nil {
+			// TODO(sdboyer) be better
+			return nil, fmt.Errorf("Err on opening metadata cache file: %s", err)
+		}
+
+		err = json.NewDecoder(pms.cf).Decode(dc)
+		if err != nil {
+			// TODO(sdboyer) be better
+			return nil, fmt.Errorf("Err on JSON decoding metadata cache file: %s", err)
+		}
+	} else {
+		// TODO(sdboyer) commented this out for now, until we manage it correctly
+		//pms.cf, err = os.Create(cpath)
+		//if err != nil {
+		//// TODO(sdboyer) be better
+		//return nil, fmt.Errorf("Err on creating metadata cache file: %s", err)
+		//}
+
+		dc = &projectDataCache{
+			Infos:    make(map[Revision]projectInfo),
+			Packages: make(map[Revision]PackageTree),
+			VMap:     make(map[Version]Revision),
+			RMap:     make(map[Revision][]Version),
+		}
+	}
+
+	pm := &projectManager{
+		n:   n,
+		ctx: sm.ctx,
+		an:  sm.an,
+		dc:  dc,
+		crepo: &repo{
+			rpath: repodir,
+			r:     r,
+		},
+	}
+
+	pms.pm = pm
+	sm.pms[n] = pms
+	return pms, nil
+}
diff --git a/vendor/github.com/sdboyer/gps/types.go b/vendor/github.com/sdboyer/gps/types.go
new file mode 100644
index 0000000..f720fa2
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/types.go
@@ -0,0 +1,195 @@
+package gps
+
+import (
+	"fmt"
+	"math/rand"
+	"strconv"
+)
+
+// ProjectRoot is the topmost import path in a tree of other import paths - the
+// root of the tree. In gps' current design, ProjectRoots have to correspond to
+// a repository root (mostly), but their real purpose is to identify the root
+// import path of a "project", logically encompassing all child packages.
+//
+// Projects are a crucial unit of operation in gps. Constraints are declared by
+// a project's manifest, and apply to all packages in a ProjectRoot's tree.
+// Solving itself mostly proceeds on a project-by-project basis.
+//
+// Aliasing string types is usually a bit of an anti-pattern. We do it here as a
+// means of clarifying API intent. This is important because Go's package
+// management domain has lots of different path-ish strings floating around:
+//
+//  actual directories:
+//	/home/sdboyer/go/src/github.com/sdboyer/gps/example
+//  URLs:
+//	https://github.com/sdboyer/gps
+//  import paths:
+//	github.com/sdboyer/gps/example
+//  portions of import paths that refer to a package:
+//	example
+//  portions that could not possibly refer to anything sane:
+//	github.com/sdboyer
+//  portions that correspond to a repository root:
+//	github.com/sdboyer/gps
+//
+// While not a panacea, defining ProjectRoot at least allows us to clearly
+// identify when one of these path-ish strings is *supposed* to have certain
+// semantics.
+type ProjectRoot string
+
+// A ProjectIdentifier is, more or less, the name of a dependency. It is related
+// to, but differs in two keys ways from, an import path.
+//
+// First, ProjectIdentifiers do not identify a single package. Rather, they
+// encompasses the whole tree of packages that exist at or below their
+// ProjectRoot. In gps' current design, this ProjectRoot must correspond to the
+// root of a repository, though this may not always be the case.
+//
+// Second, ProjectIdentifiers can optionally carry a NetworkName, which
+// identifies where the underlying source code can be located on the network.
+// These can be either a full URL, including protocol, or plain import paths.
+// So, these are all valid data for NetworkName:
+//
+//  github.com/sdboyer/gps
+//  github.com/fork/gps
+//  git@github.com:sdboyer/gps
+//  https://github.com/sdboyer/gps
+//
+// With plain import paths, network addresses are derived purely through an
+// algorithm. By having an explicit network name, it becomes possible to, for
+// example, transparently substitute a fork for an original upstream repository.
+//
+// Note that gps makes no guarantees about the actual import paths contained in
+// a repository aligning with ImportRoot. If tools, or their users, specify an
+// alternate NetworkName that contains a repository with incompatible internal
+// import paths, gps will fail. (gps does no import rewriting.)
+//
+// Also note that if different projects' manifests report a different
+// NetworkName for a given ImportRoot, it is a solve failure. Everyone has to
+// agree on where a given import path should be sourced from.
+//
+// If NetworkName is not explicitly set, gps will derive the network address from
+// the ImportRoot using a similar algorithm to that of the official go tooling.
+type ProjectIdentifier struct {
+	ProjectRoot ProjectRoot
+	NetworkName string
+}
+
+// A ProjectConstraint combines a ProjectIdentifier with a Constraint. It
+// indicates that, if packages contained in the ProjectIdentifier enter the
+// depgraph, they must do so at a version that is allowed by the Constraint.
+type ProjectConstraint struct {
+	Ident      ProjectIdentifier
+	Constraint Constraint
+}
+
+func (i ProjectIdentifier) less(j ProjectIdentifier) bool {
+	if i.ProjectRoot < j.ProjectRoot {
+		return true
+	}
+	if j.ProjectRoot < i.ProjectRoot {
+		return false
+	}
+
+	return i.NetworkName < j.NetworkName
+}
+
+func (i ProjectIdentifier) eq(j ProjectIdentifier) bool {
+	if i.ProjectRoot != j.ProjectRoot {
+		return false
+	}
+	if i.NetworkName == j.NetworkName {
+		return true
+	}
+
+	if (i.NetworkName == "" && j.NetworkName == string(j.ProjectRoot)) ||
+		(j.NetworkName == "" && i.NetworkName == string(i.ProjectRoot)) {
+		return true
+	}
+
+	// TODO(sdboyer) attempt conversion to URL and compare base + path
+
+	return false
+}
+
+func (i ProjectIdentifier) netName() string {
+	if i.NetworkName == "" {
+		return string(i.ProjectRoot)
+	}
+	return i.NetworkName
+}
+
+func (i ProjectIdentifier) errString() string {
+	if i.NetworkName == "" || i.NetworkName == string(i.ProjectRoot) {
+		return string(i.ProjectRoot)
+	}
+	return fmt.Sprintf("%s (from %s)", i.ProjectRoot, i.NetworkName)
+}
+
+func (i ProjectIdentifier) normalize() ProjectIdentifier {
+	if i.NetworkName == "" {
+		i.NetworkName = string(i.ProjectRoot)
+	}
+
+	return i
+}
+
+// Package represents a Go package. It contains a subset of the information
+// go/build.Package does.
+type Package struct {
+	ImportPath, CommentPath string
+	Name                    string
+	Imports                 []string
+	TestImports             []string
+}
+
+// bimodalIdentifiers are used to track work to be done in the unselected queue.
+// TODO(sdboyer) marker for root, to know to ignore prefv...or can we do unselected queue
+// sorting only?
+type bimodalIdentifier struct {
+	id ProjectIdentifier
+	// List of packages required within/under the ProjectIdentifier
+	pl []string
+	// prefv is used to indicate a 'preferred' version. This is expected to be
+	// derived from a dep's lock data, or else is empty.
+	prefv Version
+	// Indicates that the bmi came from the root project originally
+	fromRoot bool
+}
+
+type atom struct {
+	id ProjectIdentifier
+	v  Version
+}
+
+// With a random revision and no name, collisions are...unlikely
+var nilpa = atom{
+	v: Revision(strconv.FormatInt(rand.Int63(), 36)),
+}
+
+type atomWithPackages struct {
+	a  atom
+	pl []string
+}
+
+//type byImportPath []Package
+
+//func (s byImportPath) Len() int           { return len(s) }
+//func (s byImportPath) Less(i, j int) bool { return s[i].ImportPath < s[j].ImportPath }
+//func (s byImportPath) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// completeDep (name hopefully to change) provides the whole picture of a
+// dependency - the root (repo and project, since currently we assume the two
+// are the same) name, a constraint, and the actual packages needed that are
+// under that root.
+type completeDep struct {
+	// The base ProjectDep
+	ProjectConstraint
+	// The specific packages required from the ProjectDep
+	pl []string
+}
+
+type dependency struct {
+	depender atom
+	dep      completeDep
+}
diff --git a/vendor/github.com/sdboyer/gps/version.go b/vendor/github.com/sdboyer/gps/version.go
new file mode 100644
index 0000000..57d37ec
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/version.go
@@ -0,0 +1,512 @@
+package gps
+
+import "github.com/Masterminds/semver"
+
+// Version represents one of the different types of versions used by gps.
+//
+// Version composes Constraint, because all versions can be used as a constraint
+// (where they allow one, and only one, version - themselves), but constraints
+// are not necessarily discrete versions.
+//
+// Version is an interface, but it contains private methods, which restricts it
+// to gps's own internal implementations. We do this for the confluence of
+// two reasons: the implementation of Versions is complete (there is no case in
+// which we'd need other types), and the implementation relies on type magic
+// under the hood, which would be unsafe to do if other dynamic types could be
+// hiding behind the interface.
+type Version interface {
+	Constraint
+	// Indicates the type of version - Revision, Branch, Version, or Semver
+	Type() string
+}
+
+// PairedVersion represents a normal Version, but paired with its corresponding,
+// underlying Revision.
+type PairedVersion interface {
+	Version
+	// Underlying returns the immutable Revision that identifies this Version.
+	Underlying() Revision
+	// Ensures it is impossible to be both a PairedVersion and an
+	// UnpairedVersion
+	_pair(int)
+}
+
+// UnpairedVersion represents a normal Version, with a method for creating a
+// VersionPair by indicating the version's corresponding, underlying Revision.
+type UnpairedVersion interface {
+	Version
+	// Is takes the underlying Revision that this UnpairedVersion corresponds
+	// to and unites them into a PairedVersion.
+	Is(Revision) PairedVersion
+	// Ensures it is impossible to be both a PairedVersion and an
+	// UnpairedVersion
+	_pair(bool)
+}
+
+// types are weird
+func (branchVersion) _private()  {}
+func (branchVersion) _pair(bool) {}
+func (plainVersion) _private()   {}
+func (plainVersion) _pair(bool)  {}
+func (semVersion) _private()     {}
+func (semVersion) _pair(bool)    {}
+func (versionPair) _private()    {}
+func (versionPair) _pair(int)    {}
+func (Revision) _private()       {}
+
+// NewBranch creates a new Version to represent a floating version (in
+// general, a branch).
+func NewBranch(body string) UnpairedVersion {
+	return branchVersion(body)
+}
+
+// NewVersion creates a Semver-typed Version if the provided version string is
+// valid semver, and a plain/non-semver version if not.
+func NewVersion(body string) UnpairedVersion {
+	sv, err := semver.NewVersion(body)
+
+	if err != nil {
+		return plainVersion(body)
+	}
+	return semVersion{sv: sv}
+}
+
+// A Revision represents an immutable versioning identifier.
+type Revision string
+
+// String converts the Revision back into a string.
+func (r Revision) String() string {
+	return string(r)
+}
+
+func (r Revision) Type() string {
+	return "rev"
+}
+
+// Matches is the Revision acting as a constraint; it checks to see if the provided
+// version is the same Revision as itself.
+func (r Revision) Matches(v Version) bool {
+	switch tv := v.(type) {
+	case versionTypeUnion:
+		return tv.Matches(r)
+	case Revision:
+		return r == tv
+	case versionPair:
+		return r == tv.r
+	}
+
+	return false
+}
+
+// MatchesAny is the Revision acting as a constraint; it checks to see if the provided
+// version is the same Revision as itself.
+func (r Revision) MatchesAny(c Constraint) bool {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return true
+	case noneConstraint:
+		return false
+	case versionTypeUnion:
+		return tc.MatchesAny(r)
+	case Revision:
+		return r == tc
+	case versionPair:
+		return r == tc.r
+	}
+
+	return false
+}
+
+func (r Revision) Intersect(c Constraint) Constraint {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return r
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(r)
+	case Revision:
+		if r == tc {
+			return r
+		}
+	case versionPair:
+		if r == tc.r {
+			return r
+		}
+	}
+
+	return none
+}
+
+type branchVersion string
+
+func (v branchVersion) String() string {
+	return string(v)
+}
+
+func (r branchVersion) Type() string {
+	return "branch"
+}
+
+func (v branchVersion) Matches(v2 Version) bool {
+	switch tv := v2.(type) {
+	case versionTypeUnion:
+		return tv.Matches(v)
+	case branchVersion:
+		return v == tv
+	case versionPair:
+		if tv2, ok := tv.v.(branchVersion); ok {
+			return tv2 == v
+		}
+	}
+	return false
+}
+
+func (v branchVersion) MatchesAny(c Constraint) bool {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return true
+	case noneConstraint:
+		return false
+	case versionTypeUnion:
+		return tc.MatchesAny(v)
+	case branchVersion:
+		return v == tc
+	case versionPair:
+		if tc2, ok := tc.v.(branchVersion); ok {
+			return tc2 == v
+		}
+	}
+
+	return false
+}
+
+func (v branchVersion) Intersect(c Constraint) Constraint {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return v
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(v)
+	case branchVersion:
+		if v == tc {
+			return v
+		}
+	case versionPair:
+		if tc2, ok := tc.v.(branchVersion); ok {
+			if v == tc2 {
+				return v
+			}
+		}
+	}
+
+	return none
+}
+
+func (v branchVersion) Is(r Revision) PairedVersion {
+	return versionPair{
+		v: v,
+		r: r,
+	}
+}
+
+type plainVersion string
+
+func (v plainVersion) String() string {
+	return string(v)
+}
+
+func (r plainVersion) Type() string {
+	return "version"
+}
+
+func (v plainVersion) Matches(v2 Version) bool {
+	switch tv := v2.(type) {
+	case versionTypeUnion:
+		return tv.Matches(v)
+	case plainVersion:
+		return v == tv
+	case versionPair:
+		if tv2, ok := tv.v.(plainVersion); ok {
+			return tv2 == v
+		}
+	}
+	return false
+}
+
+func (v plainVersion) MatchesAny(c Constraint) bool {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return true
+	case noneConstraint:
+		return false
+	case versionTypeUnion:
+		return tc.MatchesAny(v)
+	case plainVersion:
+		return v == tc
+	case versionPair:
+		if tc2, ok := tc.v.(plainVersion); ok {
+			return tc2 == v
+		}
+	}
+
+	return false
+}
+
+func (v plainVersion) Intersect(c Constraint) Constraint {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return v
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(v)
+	case plainVersion:
+		if v == tc {
+			return v
+		}
+	case versionPair:
+		if tc2, ok := tc.v.(plainVersion); ok {
+			if v == tc2 {
+				return v
+			}
+		}
+	}
+
+	return none
+}
+
+func (v plainVersion) Is(r Revision) PairedVersion {
+	return versionPair{
+		v: v,
+		r: r,
+	}
+}
+
+type semVersion struct {
+	sv *semver.Version
+}
+
+func (v semVersion) String() string {
+	return v.sv.Original()
+}
+
+func (r semVersion) Type() string {
+	return "semver"
+}
+
+func (v semVersion) Matches(v2 Version) bool {
+	switch tv := v2.(type) {
+	case versionTypeUnion:
+		return tv.Matches(v)
+	case semVersion:
+		return v.sv.Equal(tv.sv)
+	case versionPair:
+		if tv2, ok := tv.v.(semVersion); ok {
+			return tv2.sv.Equal(v.sv)
+		}
+	}
+	return false
+}
+
+func (v semVersion) MatchesAny(c Constraint) bool {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return true
+	case noneConstraint:
+		return false
+	case versionTypeUnion:
+		return tc.MatchesAny(v)
+	case semVersion:
+		return v.sv.Equal(tc.sv)
+	case semverConstraint:
+		return tc.Intersect(v) != none
+	case versionPair:
+		if tc2, ok := tc.v.(semVersion); ok {
+			return tc2.sv.Equal(v.sv)
+		}
+	}
+
+	return false
+}
+
+func (v semVersion) Intersect(c Constraint) Constraint {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return v
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(v)
+	case semVersion:
+		if v.sv.Equal(tc.sv) {
+			return v
+		}
+	case semverConstraint:
+		return tc.Intersect(v)
+	case versionPair:
+		if tc2, ok := tc.v.(semVersion); ok {
+			if v.sv.Equal(tc2.sv) {
+				return v
+			}
+		}
+	}
+
+	return none
+}
+
+func (v semVersion) Is(r Revision) PairedVersion {
+	return versionPair{
+		v: v,
+		r: r,
+	}
+}
+
+type versionPair struct {
+	v UnpairedVersion
+	r Revision
+}
+
+func (v versionPair) String() string {
+	return v.v.String()
+}
+
+func (v versionPair) Type() string {
+	return v.v.Type()
+}
+
+func (v versionPair) Underlying() Revision {
+	return v.r
+}
+
+func (v versionPair) Matches(v2 Version) bool {
+	switch tv2 := v2.(type) {
+	case versionTypeUnion:
+		return tv2.Matches(v)
+	case versionPair:
+		return v.r == tv2.r
+	case Revision:
+		return v.r == tv2
+	}
+
+	switch tv := v.v.(type) {
+	case plainVersion:
+		if tv.Matches(v2) {
+			return true
+		}
+	case branchVersion:
+		if tv.Matches(v2) {
+			return true
+		}
+	case semVersion:
+		if tv2, ok := v2.(semVersion); ok {
+			if tv.sv.Equal(tv2.sv) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+func (v versionPair) MatchesAny(c2 Constraint) bool {
+	return c2.Matches(v)
+}
+
+func (v versionPair) Intersect(c2 Constraint) Constraint {
+	switch tc := c2.(type) {
+	case anyConstraint:
+		return v
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(v)
+	case versionPair:
+		if v.r == tc.r {
+			return v.r
+		}
+	case Revision:
+		if v.r == tc {
+			return v.r
+		}
+	case semverConstraint:
+		if tv, ok := v.v.(semVersion); ok {
+			if tc.Intersect(tv) == v.v {
+				return v
+			}
+		}
+		// If the semver intersection failed, we know nothing could work
+		return none
+	}
+
+	switch tv := v.v.(type) {
+	case plainVersion, branchVersion:
+		if c2.Matches(v) {
+			return v
+		}
+	case semVersion:
+		if tv2, ok := c2.(semVersion); ok {
+			if tv.sv.Equal(tv2.sv) {
+				return v
+			}
+		}
+	}
+
+	return none
+}
+
+// compareVersionType is a sort func helper that makes a coarse-grained sorting
+// decision based on version type.
+//
+// Make sure that l and r have already been converted from versionPair (if
+// applicable).
+func compareVersionType(l, r Version) int {
+	// Big fugly double type switch. No reflect, because this can be smack in a hot loop
+	switch l.(type) {
+	case Revision:
+		switch r.(type) {
+		case Revision:
+			return 0
+		case branchVersion, plainVersion, semVersion:
+			return 1
+		default:
+			panic("unknown version type")
+		}
+	case branchVersion:
+		switch r.(type) {
+		case Revision:
+			return -1
+		case branchVersion:
+			return 0
+		case plainVersion, semVersion:
+			return 1
+		default:
+			panic("unknown version type")
+		}
+
+	case plainVersion:
+		switch r.(type) {
+		case Revision, branchVersion:
+			return -1
+		case plainVersion:
+			return 0
+		case semVersion:
+			return 1
+		default:
+			panic("unknown version type")
+		}
+
+	case semVersion:
+		switch r.(type) {
+		case Revision, branchVersion, plainVersion:
+			return -1
+		case semVersion:
+			return 0
+		default:
+			panic("unknown version type")
+		}
+	default:
+		panic("unknown version type")
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/version_queue.go b/vendor/github.com/sdboyer/gps/version_queue.go
new file mode 100644
index 0000000..e74a1da
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/version_queue.go
@@ -0,0 +1,142 @@
+package gps
+
+import (
+	"fmt"
+	"strings"
+)
+
+type failedVersion struct {
+	v Version
+	f error
+}
+
+type versionQueue struct {
+	id           ProjectIdentifier
+	pi           []Version
+	lockv, prefv Version
+	fails        []failedVersion
+	b            sourceBridge
+	failed       bool
+	allLoaded    bool
+}
+
+func newVersionQueue(id ProjectIdentifier, lockv, prefv Version, b sourceBridge) (*versionQueue, error) {
+	vq := &versionQueue{
+		id: id,
+		b:  b,
+	}
+
+	// Lock goes in first, if present
+	if lockv != nil {
+		vq.lockv = lockv
+		vq.pi = append(vq.pi, lockv)
+	}
+
+	// Preferred version next
+	if prefv != nil {
+		vq.prefv = prefv
+		vq.pi = append(vq.pi, prefv)
+	}
+
+	if len(vq.pi) == 0 {
+		var err error
+		vq.pi, err = vq.b.listVersions(vq.id)
+		if err != nil {
+			// TODO(sdboyer) pushing this error this early entails that we
+			// unconditionally deep scan (e.g. vendor), as well as hitting the
+			// network.
+			return nil, err
+		}
+		vq.allLoaded = true
+	}
+
+	return vq, nil
+}
+
+func (vq *versionQueue) current() Version {
+	if len(vq.pi) > 0 {
+		return vq.pi[0]
+	}
+
+	return nil
+}
+
+// advance moves the versionQueue forward to the next available version,
+// recording the failure that eliminated the current version.
+func (vq *versionQueue) advance(fail error) (err error) {
+	// Nothing in the queue means...nothing in the queue, nicely enough
+	if len(vq.pi) == 0 {
+		return
+	}
+
+	// Record the fail reason and pop the queue
+	vq.fails = append(vq.fails, failedVersion{
+		v: vq.pi[0],
+		f: fail,
+	})
+	vq.pi = vq.pi[1:]
+
+	// *now*, if the queue is empty, ensure all versions have been loaded
+	if len(vq.pi) == 0 {
+		if vq.allLoaded {
+			// This branch gets hit when the queue is first fully exhausted,
+			// after having been populated by ListVersions() on a previous
+			// advance()
+			return
+		}
+
+		vq.allLoaded = true
+		vq.pi, err = vq.b.listVersions(vq.id)
+		if err != nil {
+			return err
+		}
+
+		// search for and remove locked and pref versions
+		//
+		// could use the version comparator for binary search here to avoid
+		// O(n) each time...if it matters
+		for k, pi := range vq.pi {
+			if pi == vq.lockv || pi == vq.prefv {
+				// GC-safe deletion for slice w/pointer elements
+				vq.pi, vq.pi[len(vq.pi)-1] = append(vq.pi[:k], vq.pi[k+1:]...), nil
+				//vq.pi = append(vq.pi[:k], vq.pi[k+1:]...)
+			}
+		}
+
+		if len(vq.pi) == 0 {
+			// If listing versions added nothing (new), then return now
+			return
+		}
+	}
+
+	// We're finally sure that there's something in the queue. Remove the
+	// failure marker, as the current version may have failed, but the next one
+	// hasn't yet
+	vq.failed = false
+
+	// If all have been loaded and the queue is empty, we're definitely out
+	// of things to try. Return empty, though, because vq semantics dictate
+	// that we don't explicitly indicate the end of the queue here.
+	return
+}
+
+// isExhausted indicates whether or not the queue has definitely been exhausted,
+// in which case it will return true.
+//
+// It may return false negatives - suggesting that there is more in the queue
+// when a subsequent call to current() will be empty. Plan accordingly.
+func (vq *versionQueue) isExhausted() bool {
+	if !vq.allLoaded {
+		return false
+	}
+	return len(vq.pi) == 0
+}
+
+func (vq *versionQueue) String() string {
+	var vs []string
+
+	for _, v := range vq.pi {
+		vs = append(vs, v.String())
+	}
+	return fmt.Sprintf("[%s]", strings.Join(vs, ", "))
+}
diff --git a/vendor/github.com/sdboyer/gps/version_test.go b/vendor/github.com/sdboyer/gps/version_test.go
new file mode 100644
index 0000000..f8b9b89
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/version_test.go
@@ -0,0 +1,103 @@
+package gps
+
+import (
+	"sort"
+	"testing"
+)
+
+func TestVersionSorts(t *testing.T) {
+	rev := Revision("flooboofoobooo")
+	v1 := NewBranch("master").Is(rev)
+	v2 := NewBranch("test").Is(rev)
+	v3 := NewVersion("1.0.0").Is(rev)
+	v4 := NewVersion("1.0.1")
+	v5 := NewVersion("v2.0.5")
+	v6 := NewVersion("2.0.5.2")
+	v7 := NewBranch("unwrapped")
+	v8 := NewVersion("20.0.5.2")
+
+	start := []Version{
+		v1,
+		v2,
+		v3,
+		v4,
+		v5,
+		v6,
+		v7,
+		v8,
+		rev,
+	}
+
+	down := make([]Version, len(start))
+	copy(down, start)
+	up := make([]Version, len(start))
+	copy(up, start)
+
+	edown := []Version{
+		v3, v4, v5, // semvers
+		v6, v8, // plain versions
+		v1, v2, v7, // floating/branches
+		rev, // revs
+	}
+
+	eup := []Version{
+		v5, v4, v3, // semvers
+		v6, v8, // plain versions
+		v1, v2, v7, // floating/branches
+		rev, // revs
+	}
+
+	sort.Sort(upgradeVersionSorter(up))
+	var wrong []int
+	for k, v := range up {
+		if eup[k] != v {
+			wrong = append(wrong, k)
+			t.Errorf("Expected version %s in position %v on upgrade sort, but got %s", eup[k], k, v)
+		}
+	}
+	if len(wrong) > 0 {
+		// Just helps with readability a bit
+		t.Errorf("Upgrade sort positions with wrong versions: %v", wrong)
+	}
+
+	sort.Sort(downgradeVersionSorter(down))
+	wrong = wrong[:0]
+	for k, v := range down {
+		if edown[k] != v {
+			wrong = append(wrong, k)
+			t.Errorf("Expected version %s in position %v on downgrade sort, but got %s", edown[k], k, v)
+		}
+	}
+	if len(wrong) > 0 {
+		// Just helps with readability a bit
+		t.Errorf("Downgrade sort positions with wrong versions: %v", wrong)
+	}
+
+	// Now make sure we sort back the other way correctly...just because
+	sort.Sort(upgradeVersionSorter(down))
+	wrong = wrong[:0]
+	for k, v := range down {
+		if eup[k] != v {
+			wrong = append(wrong, k)
+			t.Errorf("Expected version %s in position %v on down-then-upgrade sort, but got %s", eup[k], k, v)
+		}
+	}
+	if len(wrong) > 0 {
+		// Just helps with readability a bit
+		t.Errorf("Down-then-upgrade sort positions with wrong versions: %v", wrong)
+	}
+
+	// Now make sure we sort back the other way correctly...just because
+	sort.Sort(downgradeVersionSorter(up))
+	wrong = wrong[:0]
+	for k, v := range up {
+		if edown[k] != v {
+			wrong = append(wrong, k)
+			t.Errorf("Expected version %s in position %v on up-then-downgrade sort, but got %s", edown[k], k, v)
+		}
+	}
+	if len(wrong) > 0 {
+		// Just helps with readability a bit
+		t.Errorf("Up-then-downgrade sort positions with wrong versions: %v", wrong)
+	}
+}
diff --git a/vendor/github.com/termie/go-shutil/.gitignore b/vendor/github.com/termie/go-shutil/.gitignore
new file mode 100644
index 0000000..139b1ee
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/.gitignore
@@ -0,0 +1 @@
+test/testfile3
diff --git a/vendor/github.com/termie/go-shutil/LICENSE b/vendor/github.com/termie/go-shutil/LICENSE
new file mode 100644
index 0000000..3890b94
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/LICENSE
@@ -0,0 +1 @@
+I guess Python's? If that doesn't apply then MIT. Have fun.
diff --git a/vendor/github.com/termie/go-shutil/README.rst b/vendor/github.com/termie/go-shutil/README.rst
new file mode 100644
index 0000000..b63b016
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/README.rst
@@ -0,0 +1,24 @@
+=========================================
+High-level Filesystem Operations (for Go)
+=========================================
+
+
+A direct port of a few of the functions from Python's shutil package for
+high-level filesystem operations.
+
+This project pretty much only exists so that other people don't have to keep
+re-writing this code in their projects, at this time we have been unable to
+find any helpful packages for this in the stdlib or elsewhere.
+
+We don't expect it to be perfect, just better than whatever your first draft
+would have been. Patches welcome.
+
+See also: https://docs.python.org/3.5/library/shutil.html
+
+================
+Functions So Far
+================
+
+We support Copy, CopyFile, CopyMode, and CopyTree. CopyStat would be nice if
+anybody wants to write that. Also the other functions that might be useful in
+the python library :D
diff --git a/vendor/github.com/termie/go-shutil/shutil.go b/vendor/github.com/termie/go-shutil/shutil.go
new file mode 100644
index 0000000..09fcd38
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/shutil.go
@@ -0,0 +1,326 @@
+package shutil
+
+import (
+  "fmt"
+  "io"
+  "io/ioutil"
+  "os"
+  "path/filepath"
+)
+
+
+type SameFileError struct {
+  Src string
+  Dst string
+}
+
+func (e SameFileError) Error() string {
+  return fmt.Sprintf("%s and %s are the same file", e.Src, e.Dst)
+}
+
+type SpecialFileError struct {
+  File string
+  FileInfo os.FileInfo
+}
+
+func (e SpecialFileError) Error() string {
+  return fmt.Sprintf("`%s` is a named pipe", e.File)
+}
+
+type NotADirectoryError struct {
+  Src string
+}
+
+func (e NotADirectoryError) Error() string {
+  return fmt.Sprintf("`%s` is not a directory", e.Src)
+}
+
+
+type AlreadyExistsError struct {
+  Dst string
+}
+
+func (e AlreadyExistsError) Error() string {
+  return fmt.Sprintf("`%s` already exists", e.Dst)
+}
+
+
+func samefile(src string, dst string) bool {
+  srcInfo, _ := os.Stat(src)
+  dstInfo, _ := os.Stat(dst)
+  return os.SameFile(srcInfo, dstInfo)
+}
+
+func specialfile(fi os.FileInfo) bool {
+  return (fi.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe
+}
+
+func stringInSlice(a string, list []string) bool {
+    for _, b := range list {
+        if b == a {
+            return true
+        }
+    }
+    return false
+}
+
+func IsSymlink(fi os.FileInfo) bool {
+  return (fi.Mode() & os.ModeSymlink) == os.ModeSymlink
+}
+
+
+// Copy data from src to dst
+//
+// If followSymlinks is not set and src is a symbolic link, a
+// new symlink will be created instead of copying the file it points
+// to.
+func CopyFile(src, dst string, followSymlinks bool) (error) {
+  if samefile(src, dst) {
+    return &SameFileError{src, dst}
+  }
+
+  // Make sure src exists and neither are special files
+  srcStat, err := os.Lstat(src)
+  if err != nil {
+    return err
+  }
+  if specialfile(srcStat) {
+    return &SpecialFileError{src, srcStat}
+  }
+
+  dstStat, err := os.Stat(dst)
+  if err != nil && !os.IsNotExist(err) {
+    return err
+  } else if err == nil {
+    if specialfile(dstStat) {
+      return &SpecialFileError{dst, dstStat}
+    }
+  }
+
+  // If we don't follow symlinks and it's a symlink, just link it and be done
+  if !followSymlinks && IsSymlink(srcStat) {
+    return os.Symlink(src, dst)
+  }
+
+  // If we are a symlink, follow it
+  if IsSymlink(srcStat) {
+    src, err = os.Readlink(src)
+    if err != nil {
+      return err
+    }
+    srcStat, err = os.Stat(src)
+    if err != nil {
+      return err
+    }
+  }
+
+  // Do the actual copy
+  fsrc, err := os.Open(src)
+  if err != nil {
+    return err
+  }
+  defer fsrc.Close()
+
+  fdst, err := os.Create(dst)
+  if err != nil {
+    return err
+  }
+  defer fdst.Close()
+
+  size, err := io.Copy(fdst, fsrc)
+  if err != nil {
+    return err
+  }
+
+  if size != srcStat.Size() {
+    return fmt.Errorf("%s: %d/%d copied", src, size, srcStat.Size())
+  }
+
+  return nil
+}
+
+
+// Copy mode bits from src to dst.
+//
+// If followSymlinks is false, symlinks aren't followed if and only
+// if both `src` and `dst` are symlinks. If `lchmod` isn't available
+// and both are symlinks this does nothing. (I don't think lchmod is
+// available in Go)
+func CopyMode(src, dst string, followSymlinks bool) error {
+  srcStat, err := os.Lstat(src)
+  if err != nil {
+    return err
+  }
+
+  dstStat, err := os.Lstat(dst)
+  if err != nil {
+    return err
+  }
+
+  // They are both symlinks and we can't change mode on symlinks.
+  if !followSymlinks && IsSymlink(srcStat) && IsSymlink(dstStat) {
+    return nil
+  }
+
+  // Atleast one is not a symlink, get the actual file stats
+  srcStat, _ = os.Stat(src)
+  err = os.Chmod(dst, srcStat.Mode())
+  return err
+}
+
+
+// Copy data and mode bits ("cp src dst"). Return the file's destination.
+//
+// The destination may be a directory.
+//
+// If followSymlinks is false, symlinks won't be followed. This
+// resembles GNU's "cp -P src dst".
+//
+// If source and destination are the same file, a SameFileError will be
+// rased.
+func Copy(src, dst string, followSymlinks bool) (string, error){
+  dstInfo, err := os.Stat(dst)
+
+  if err == nil && dstInfo.Mode().IsDir() {
+    dst = filepath.Join(dst, filepath.Base(src))
+  }
+
+  if err != nil && !os.IsNotExist(err) {
+    return dst, err
+  }
+
+  err = CopyFile(src, dst, followSymlinks)
+  if err != nil {
+    return dst, err
+  }
+
+  err = CopyMode(src, dst, followSymlinks)
+  if err != nil {
+    return dst, err
+  }
+
+  return dst, nil
+}
+
+type CopyTreeOptions struct {
+  Symlinks bool
+  IgnoreDanglingSymlinks bool
+  CopyFunction func (string, string, bool) (string, error)
+  Ignore func (string, []os.FileInfo) []string
+}
+
+// Recursively copy a directory tree.
+//
+// The destination directory must not already exist.
+//
+// If the optional Symlinks flag is true, symbolic links in the
+// source tree result in symbolic links in the destination tree; if
+// it is false, the contents of the files pointed to by symbolic
+// links are copied. If the file pointed by the symlink doesn't
+// exist, an error will be returned.
+//
+// You can set the optional IgnoreDanglingSymlinks flag to true if you
+// want to silence this error. Notice that this has no effect on
+// platforms that don't support os.Symlink.
+//
+// The optional ignore argument is a callable. If given, it
+// is called with the `src` parameter, which is the directory
+// being visited by CopyTree(), and `names` which is the list of
+// `src` contents, as returned by ioutil.ReadDir():
+//
+//   callable(src, entries) -> ignoredNames
+//
+// Since CopyTree() is called recursively, the callable will be
+// called once for each directory that is copied. It returns a
+// list of names relative to the `src` directory that should
+// not be copied.
+//
+// The optional copyFunction argument is a callable that will be used
+// to copy each file. It will be called with the source path and the
+// destination path as arguments. By default, Copy() is used, but any
+// function that supports the same signature (like Copy2() when it
+// exists) can be used.
+func CopyTree(src, dst string, options *CopyTreeOptions) error {
+  if options == nil {
+    options = &CopyTreeOptions{Symlinks:false,
+                               Ignore:nil,
+                               CopyFunction:Copy,
+                               IgnoreDanglingSymlinks:false}
+  }
+
+
+  srcFileInfo, err := os.Stat(src)
+  if err != nil {
+    return err
+  }
+
+  if !srcFileInfo.IsDir() {
+    return &NotADirectoryError{src}
+  }
+
+  _, err = os.Open(dst)
+  if !os.IsNotExist(err) {
+    return &AlreadyExistsError{dst}
+  }
+
+  entries, err := ioutil.ReadDir(src)
+  if err != nil {
+    return err
+  }
+
+  err = os.MkdirAll(dst, srcFileInfo.Mode())
+  if err != nil {
+    return err
+  }
+
+  ignoredNames := []string{}
+  if options.Ignore != nil {
+    ignoredNames = options.Ignore(src, entries)
+  }
+
+  for _, entry := range entries {
+    if stringInSlice(entry.Name(), ignoredNames) {
+      continue
+    }
+    srcPath := filepath.Join(src, entry.Name())
+    dstPath := filepath.Join(dst, entry.Name())
+
+    entryFileInfo, err := os.Lstat(srcPath)
+    if err != nil {
+      return err
+    }
+
+    // Deal with symlinks
+    if IsSymlink(entryFileInfo) {
+      linkTo, err := os.Readlink(srcPath)
+      if err != nil {
+        return err
+      }
+      if options.Symlinks {
+        os.Symlink(linkTo, dstPath)
+        //CopyStat(srcPath, dstPath, false)
+      } else {
+        // ignore dangling symlink if flag is on
+        _, err = os.Stat(linkTo)
+        if os.IsNotExist(err) && options.IgnoreDanglingSymlinks {
+          continue
+        }
+        _, err = options.CopyFunction(srcPath, dstPath, false)
+        if err != nil {
+          return err
+        }
+      }
+    } else if entryFileInfo.IsDir() {
+      err = CopyTree(srcPath, dstPath, options)
+      if err != nil {
+        return err
+      }
+    } else {
+      _, err = options.CopyFunction(srcPath, dstPath, false)
+      if err != nil {
+        return err
+      }
+    }
+  }
+  return nil
+}
diff --git a/vendor/github.com/termie/go-shutil/shutil_test.go b/vendor/github.com/termie/go-shutil/shutil_test.go
new file mode 100644
index 0000000..f6ec261
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/shutil_test.go
@@ -0,0 +1,156 @@
+package shutil
+
+import (
+  "bytes"
+  "io/ioutil"
+  "os"
+  "testing"
+)
+
+
+func filesMatch(src, dst string) (bool, error) {
+  srcContents, err := ioutil.ReadFile(src)
+  if err != nil {
+    return false, err
+  }
+
+  dstContents, err := ioutil.ReadFile(dst)
+  if err != nil {
+    return false, err
+  }
+
+  if bytes.Compare(srcContents, dstContents) != 0 {
+    return false, nil
+  }
+  return true, nil
+}
+
+
+func TestSameFileError(t *testing.T) {
+  _, err := Copy("test/testfile", "test/testfile", false)
+  _, ok := err.(*SameFileError)
+  if !ok {
+    t.Error(err)
+  }
+}
+
+
+func TestCopyFile(t *testing.T) {
+  // clear out existing files if they exist
+  os.Remove("test/testfile3")
+
+  err := CopyFile("test/testfile", "test/testfile3", false)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match, err := filesMatch("test/testfile", "test/testfile3")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+  if !match {
+    t.Fail()
+    return
+  }
+
+  // And again without clearing the files
+  err = CopyFile("test/testfile2", "test/testfile3", false)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match2, err := filesMatch("test/testfile2", "test/testfile3")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  if !match2 {
+    t.Fail()
+    return
+  }
+}
+
+
+func TestCopy(t *testing.T) {
+  // clear out existing files if they exist
+  os.Remove("test/testfile3")
+
+  _, err := Copy("test/testfile", "test/testfile3", false)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match, err := filesMatch("test/testfile", "test/testfile3")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+  if !match {
+    t.Fail()
+    return
+  }
+
+  // And again without clearing the files
+  _, err = Copy("test/testfile2", "test/testfile3", false)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match2, err := filesMatch("test/testfile2", "test/testfile3")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  if !match2 {
+    t.Fail()
+    return
+  }
+}
+
+
+func TestCopyTree(t *testing.T) {
+  // clear out existing files if they exist
+  os.RemoveAll("test/testdir3")
+
+  err := CopyTree("test/testdir", "test/testdir3", nil)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match, err := filesMatch("test/testdir/file1", "test/testdir3/file1")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+  if !match {
+    t.Fail()
+    return
+  }
+
+  // // And again without clearing the files
+  // _, err = Copy("test/testfile2", "test/testfile3", false)
+  // if err != nil {
+  //   t.Error(err)
+  //   return
+  // }
+
+  // match2, err := filesMatch("test/testfile2", "test/testfile3")
+  // if err != nil {
+  //   t.Error(err)
+  //   return
+  // }
+
+  // if !match2 {
+  //   t.Fail()
+  //   return
+  // }
+}
+
diff --git a/vendor/github.com/termie/go-shutil/test/testdir/file1 b/vendor/github.com/termie/go-shutil/test/testdir/file1
new file mode 100644
index 0000000..e212970
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/test/testdir/file1
@@ -0,0 +1 @@
+file1
diff --git a/vendor/github.com/termie/go-shutil/test/testdir/file2 b/vendor/github.com/termie/go-shutil/test/testdir/file2
new file mode 100644
index 0000000..6c493ff
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/test/testdir/file2
@@ -0,0 +1 @@
+file2
diff --git a/vendor/github.com/termie/go-shutil/test/testfile b/vendor/github.com/termie/go-shutil/test/testfile
new file mode 100644
index 0000000..2691857
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/test/testfile
@@ -0,0 +1 @@
+testfile
diff --git a/vendor/github.com/termie/go-shutil/test/testfile2 b/vendor/github.com/termie/go-shutil/test/testfile2
new file mode 100644
index 0000000..7d57647
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/test/testfile2
@@ -0,0 +1 @@
+testfile2