Merge branch 'master' into catch-master

This definitely doesn't fix everything - it won't compile right now. But
it's a good checkpoint.
diff --git a/action/config_wizard.go b/action/config_wizard.go
index ae2b317..8dcc695 100644
--- a/action/config_wizard.go
+++ b/action/config_wizard.go
@@ -13,6 +13,7 @@
 	gpath "github.com/Masterminds/glide/path"
 	"github.com/Masterminds/semver"
 	"github.com/Masterminds/vcs"
+	"github.com/sdboyer/gps"
 )
 
 // ConfigWizard reads configuration from a glide.yaml file and attempts to suggest
@@ -69,7 +70,7 @@
 
 		// First check, ask if the tag should be used instead of the commit id for it.
 		cur := cache.MemCurrent(remote)
-		if cur != "" && cur != dep.Reference {
+		if cur != "" && cur != dep.Version {
 			wizardSugOnce()
 			var dres bool
 			asked, use, val := wizardOnce("current")
@@ -86,15 +87,15 @@
 			}
 
 			if dres {
-				msg.Info("Updating %s to use the tag %s instead of commit id %s", dep.Name, cur, dep.Reference)
-				dep.Reference = cur
+				msg.Info("Updating %s to use the tag %s instead of commit id %s", dep.Name, cur, dep.Version)
+				dep.Version = cur
 				changes++
 			}
 		}
 
 		// Second check, if no version is being used and there's a semver release ask about latest.
 		memlatest := cache.MemLatest(remote)
-		if dep.Reference == "" && memlatest != "" {
+		if dep.IsUnconstrained() && memlatest != "" {
 			wizardSugOnce()
 			var dres bool
 			asked, use, val := wizardOnce("latest")
@@ -112,13 +113,13 @@
 
 			if dres {
 				msg.Info("Updating %s to use the release %s instead of no release", dep.Name, memlatest)
-				dep.Reference = memlatest
+				dep.Version = memlatest
 				changes++
 			}
 		}
 
 		// Third check, if the version is semver offer to use a range instead.
-		sv, err := semver.NewVersion(dep.Reference)
+		sv, err := semver.NewVersion(dep.Version)
 		if err == nil {
 			wizardSugOnce()
 			var res string
@@ -137,13 +138,13 @@
 
 			if res == "m" {
 				r := "^" + sv.String()
-				msg.Info("Updating %s to use the range %s instead of commit id %s", dep.Name, r, dep.Reference)
-				dep.Reference = r
+				msg.Info("Updating %s to use the range %s instead of commit id %s", dep.Name, r, dep.Version)
+				dep.Version = r
 				changes++
 			} else if res == "p" {
 				r := "~" + sv.String()
-				msg.Info("Updating %s to use the range %s instead of commit id %s", dep.Name, r, dep.Reference)
-				dep.Reference = r
+				msg.Info("Updating %s to use the range %s instead of commit id %s", dep.Name, r, dep.Version)
+				dep.Version = r
 				changes++
 			}
 		}
@@ -222,7 +223,7 @@
 }
 
 func wizardAskCurrent(cur string, d *cfg.Dependency) bool {
-	msg.Info("The package %s is currently set to use the version %s.", d.Name, d.Reference)
+	msg.Info("The package %s is currently set to use the version %s.", d.Name, d.GetConstraint())
 	msg.Info("There is an equivalent semantic version (http://semver.org) release of %s. Would", cur)
 	msg.Info("you like to use that instead? Yes (Y) or No (N)")
 	return msg.PromptUntilYorN()
@@ -236,7 +237,7 @@
 }
 
 func wizardLookInto(d *cfg.Dependency) bool {
-	_, err := semver.NewConstraint(d.Reference)
+	_, err := semver.NewConstraint(d.Version)
 
 	// The existing version is already a valid semver constraint so we skip suggestions.
 	if err == nil {
@@ -286,7 +287,7 @@
 				if found := createGitParseVersion.FindString(ti); found != "" {
 					tg := strings.TrimPrefix(strings.TrimSuffix(found, "^{}"), "tags/")
 					cache.MemPut(remote, tg)
-					if d.Reference != "" && strings.HasPrefix(ti, d.Reference) {
+					if !d.IsUnconstrained() && strings.HasPrefix(ti, d.Version) {
 						cache.MemSetCurrent(remote, tg)
 					}
 				}
@@ -316,15 +317,17 @@
 				cache.MemPut(remote, v)
 			}
 		}
-		if d.Reference != "" && repo.IsReference(d.Reference) {
-			tgs, err = repo.TagsFromCommit(d.Reference)
-			if err != nil {
-				msg.Debug("Problem getting tags for commit: %s", err)
-			} else {
-				if len(tgs) > 0 {
-					for _, v := range tgs {
-						if !(repo.Vcs() == vcs.Hg && v == "tip") {
-							cache.MemSetCurrent(remote, v)
+		if !d.IsUnconstrained() {
+			if rev, ok := d.GetConstraint().(gps.Revision); ok {
+				tgs, err = repo.TagsFromCommit(string(rev))
+				if err != nil {
+					msg.Debug("Problem getting tags for commit: %s", err)
+				} else {
+					if len(tgs) > 0 {
+						for _, v := range tgs {
+							if !(repo.Vcs() == vcs.Hg && v == "tip") {
+								cache.MemSetCurrent(remote, v)
+							}
 						}
 					}
 				}
diff --git a/action/create.go b/action/create.go
index 5d42199..4abf3f6 100644
--- a/action/create.go
+++ b/action/create.go
@@ -129,32 +129,20 @@
 
 	for _, pa := range sortable {
 		n := strings.TrimPrefix(pa, vpath)
-		root, subpkg := util.NormalizeName(n)
+		root, _ := util.NormalizeName(n)
 
 		if !config.Imports.Has(root) && root != config.Name {
 			msg.Info("--> Found reference to %s\n", n)
 			d := &cfg.Dependency{
 				Name: root,
 			}
-			if len(subpkg) > 0 {
-				d.Subpackages = []string{subpkg}
-			}
 			config.Imports = append(config.Imports, d)
-		} else if config.Imports.Has(root) {
-			if len(subpkg) > 0 {
-				subpkg = strings.TrimPrefix(subpkg, "/")
-				d := config.Imports.Get(root)
-				if !d.HasSubpackage(subpkg) {
-					msg.Info("--> Adding sub-package %s to %s\n", subpkg, root)
-					d.Subpackages = append(d.Subpackages, subpkg)
-				}
-			}
 		}
 	}
 
 	for _, pa := range testSortable {
 		n := strings.TrimPrefix(pa, vpath)
-		root, subpkg := util.NormalizeName(n)
+		root, _ := util.NormalizeName(n)
 
 		if config.Imports.Has(root) && root != config.Name {
 			msg.Debug("--> Found test reference to %s already listed as an import", n)
@@ -163,19 +151,7 @@
 			d := &cfg.Dependency{
 				Name: root,
 			}
-			if len(subpkg) > 0 {
-				d.Subpackages = []string{subpkg}
-			}
 			config.DevImports = append(config.DevImports, d)
-		} else if config.DevImports.Has(root) {
-			if len(subpkg) > 0 {
-				subpkg = strings.TrimPrefix(subpkg, "/")
-				d := config.DevImports.Get(root)
-				if !d.HasSubpackage(subpkg) {
-					msg.Info("--> Adding test sub-package %s to %s\n", subpkg, root)
-					d.Subpackages = append(d.Subpackages, subpkg)
-				}
-			}
 		}
 	}
 
@@ -207,10 +183,10 @@
 	}
 
 	for _, i := range deps {
-		if i.Reference == "" {
+		if i.IsUnconstrained() {
 			msg.Info("--> Found imported reference to %s", i.Name)
 		} else {
-			msg.Info("--> Found imported reference to %s at revision %s", i.Name, i.Reference)
+			msg.Info("--> Found imported reference to %s with constraint %s", i.Name, i.GetConstraint())
 		}
 
 		config.Imports = append(config.Imports, i)
diff --git a/action/ensure.go b/action/ensure.go
index 2933c1c..f8a2969 100644
--- a/action/ensure.go
+++ b/action/ensure.go
@@ -30,7 +30,10 @@
 		msg.ExitCode(2)
 		msg.Die("Failed to load %s: %s", yamlpath, err)
 	}
-	conf, err := cfg.ConfigFromYaml(yml)
+	conf, legacy, err := cfg.ConfigFromYaml(yml)
+	if legacy {
+		msg.Warn("glide.yaml was in a legacy format. An attempt will be made to automatically update it.")
+	}
 	if err != nil {
 		msg.ExitCode(3)
 		msg.Die("Failed to parse %s: %s", yamlpath, err)
diff --git a/action/get.go b/action/get.go
index 696c50c..7d27a7a 100644
--- a/action/get.go
+++ b/action/get.go
@@ -2,99 +2,117 @@
 
 import (
 	"fmt"
+	"log"
+	"os"
 	"path/filepath"
 	"strings"
 
 	"github.com/Masterminds/glide/cache"
 	"github.com/Masterminds/glide/cfg"
-	"github.com/Masterminds/glide/godep"
+	"github.com/Masterminds/glide/dependency"
 	"github.com/Masterminds/glide/msg"
 	gpath "github.com/Masterminds/glide/path"
 	"github.com/Masterminds/glide/repo"
 	"github.com/Masterminds/glide/util"
 	"github.com/Masterminds/semver"
+	"github.com/sdboyer/gps"
 )
 
 // Get fetches one or more dependencies and installs.
 //
-// This includes resolving dependency resolution and re-generating the lock file.
-func Get(names []string, installer *repo.Installer, insecure, skipRecursive, stripVendor, nonInteract, testDeps bool) {
-	cache.SystemLock()
-
+// This includes a solver run and re-generating the lock file.
+func Get(names []string, installer *repo.Installer, stripVendor, nonInteract bool) {
 	base := gpath.Basepath()
 	EnsureGopath()
 	EnsureVendorDir()
 	conf := EnsureConfig()
+
 	glidefile, err := gpath.Glide()
 	if err != nil {
 		msg.Die("Could not find Glide file: %s", err)
 	}
 
+	vend, err := gpath.Vendor()
+	if err != nil {
+		msg.Die("Could not find the vendor dir: %s", err)
+	}
+
+	rd := filepath.Dir(glidefile)
+	rt, err := gps.ListPackages(rd, conf.Name)
+	if err != nil {
+		msg.Die("Error while scanning project: %s", err)
+	}
+
+	params := gps.SolveParameters{
+		RootDir:         rd,
+		RootPackageTree: rt,
+		Manifest:        conf,
+		Trace:           true,
+		TraceLogger:     log.New(os.Stdout, "", 0),
+	}
+
+	// We load the lock file early and bail out if there's a problem, because we
+	// don't want a get to just update all deps without the user explictly
+	// making that choice.
+	if gpath.HasLock(base) {
+		params.Lock, _, err = loadLockfile(base, conf)
+		if err != nil {
+			msg.Err("Could not load lockfile; aborting get. Existing dependency versions cannot be safely preserved without a lock file. Error was: %s", err)
+			return
+		}
+	}
+
+	// Create the SourceManager for this run
+	sm, err := gps.NewSourceManager(dependency.Analyzer{}, filepath.Join(installer.Home, "cache"))
+	defer sm.Release()
+	if err != nil {
+		msg.Err(err.Error())
+		return
+	}
+
+	// Now, with the easy/fast errors out of the way, dive into adding the new
+	// deps to the manifest.
+
 	// Add the packages to the config.
-	if count, err2 := addPkgsToConfig(conf, names, insecure, nonInteract, testDeps); err2 != nil {
+	//if count, err2 := addPkgsToConfig(conf, names, insecure, nonInteract, testDeps); err2 != nil {
+	if count, err2 := addPkgsToConfig(conf, names, false, nonInteract, false); err2 != nil {
 		msg.Die("Failed to get new packages: %s", err2)
 	} else if count == 0 {
 		msg.Warn("Nothing to do")
 		return
 	}
 
-	// Fetch the new packages. Can't resolve versions via installer.Update if
-	// get is called while the vendor/ directory is empty so we checkout
-	// everything.
-	err = installer.Checkout(conf)
+	// Prepare a solver. This validates our params.
+	s, err := gps.Prepare(params, sm)
 	if err != nil {
-		msg.Die("Failed to checkout packages: %s", err)
+		msg.Err("Aborted get - could not set up solver to reconcile dependencies: %s", err)
+		return
 	}
 
-	// Prior to resolving dependencies we need to start working with a clone
-	// of the conf because we'll be making real changes to it.
-	confcopy := conf.Clone()
-
-	if !skipRecursive {
-		// Get all repos and update them.
-		// TODO: Can we streamline this in any way? The reason that we update all
-		// of the dependencies is that we need to re-negotiate versions. For example,
-		// if an existing dependency has the constraint >1.0 and this new package
-		// adds the constraint <2.0, then this may re-resolve the existing dependency
-		// to be between 1.0 and 2.0. But changing that dependency may then result
-		// in that dependency's dependencies changing... so we sorta do the whole
-		// thing to be safe.
-		err = installer.Update(confcopy)
-		if err != nil {
-			msg.Die("Could not update packages: %s", err)
-		}
-	}
-
-	// Set Reference
-	if err := repo.SetReference(confcopy, installer.ResolveTest); err != nil {
-		msg.Err("Failed to set references: %s", err)
-	}
-
-	err = installer.Export(confcopy)
+	r, err := s.Solve()
 	if err != nil {
-		msg.Die("Unable to export dependencies to vendor directory: %s", err)
+		// TODO better error handling
+		msg.Err("Failed to find a solution for all new dependencies: %s", err.Error())
+		return
 	}
 
-	// Write YAML
-	if err := conf.WriteFile(glidefile); err != nil {
-		msg.Die("Failed to write glide YAML file: %s", err)
-	}
-	if !skipRecursive {
-		// Write lock
-		if stripVendor {
-			confcopy = godep.RemoveGodepSubpackages(confcopy)
-		}
-		writeLock(conf, confcopy, base)
-	} else {
-		msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated")
+	// Solve succeeded. Write out the yaml, lock, and vendor to a tmpdir, then mv
+	// them all into place iff all the writes worked
+
+	gw := safeGroupWriter{
+		conf:        conf,
+		lock:        params.Lock.(*cfg.Lockfile),
+		resultLock:  r,
+		sm:          sm,
+		glidefile:   glidefile,
+		vendor:      vend,
+		stripVendor: stripVendor,
 	}
 
-	if stripVendor {
-		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
-		err := gpath.StripVendor()
-		if err != nil {
-			msg.Err("Unable to strip vendor directories: %s", err)
-		}
+	err = gw.writeAllSafe()
+	if err != nil {
+		msg.Err(err.Error())
+		return
 	}
 }
 
@@ -122,7 +140,7 @@
 // - sets up insecure repo URLs where necessary
 // - generates a list of subpackages
 func addPkgsToConfig(conf *cfg.Config, names []string, insecure, nonInteract, testDeps bool) (int, error) {
-
+	// TODO refactor this to take and use a gps.SourceManager
 	if len(names) == 1 {
 		msg.Info("Preparing to install %d package.", len(names))
 	} else {
@@ -139,7 +157,7 @@
 
 		msg.Info("Attempting to get package %s", name)
 
-		root, subpkg := util.NormalizeName(name)
+		root, _ := util.NormalizeName(name)
 		if len(root) == 0 {
 			return 0, fmt.Errorf("Package name is required for %q.", name)
 		}
@@ -160,24 +178,7 @@
 				msg.Warn("--> Test dependency %s already listed as import", root)
 			}
 
-			// Check if the subpackage is present.
-			if subpkg != "" {
-				if dep == nil {
-					dep = conf.Imports.Get(root)
-					if dep == nil && testDeps {
-						dep = conf.DevImports.Get(root)
-					}
-				}
-				if dep.HasSubpackage(subpkg) {
-					if !moved {
-						msg.Warn("--> Package %q is already in glide.yaml. Skipping", name)
-					}
-				} else {
-					dep.Subpackages = append(dep.Subpackages, subpkg)
-					msg.Info("--> Adding sub-package %s to existing import %s", subpkg, root)
-					numAdded++
-				}
-			} else if !moved {
+			if !moved {
 				msg.Warn("--> Package %q is already in glide.yaml. Skipping", root)
 			}
 			continue
@@ -199,17 +200,15 @@
 		}
 
 		if version != "" {
-			dep.Reference = version
+			// TODO(sdboyer) set the right type...what is that here?
+			dep.Version = version
+			dep.Branch = "" // just to be sure
 		} else if !nonInteract {
 			getWizard(dep)
 		}
 
-		if len(subpkg) > 0 {
-			dep.Subpackages = []string{subpkg}
-		}
-
-		if dep.Reference != "" {
-			msg.Info("--> Adding %s to your configuration with the version %s", dep.Name, dep.Reference)
+		if !dep.IsUnconstrained() {
+			msg.Info("--> Adding %s to your configuration with the version %s", dep.Name, dep.GetConstraint())
 		} else {
 			msg.Info("--> Adding %s to your configuration", dep.Name)
 		}
@@ -235,15 +234,15 @@
 	if memlatest != "" {
 		dres := wizardAskLatest(memlatest, dep)
 		if dres {
-			dep.Reference = memlatest
+			dep.Version = memlatest
 
-			sv, err := semver.NewVersion(dep.Reference)
-			if err == nil {
+			sv, err := semver.NewVersion(memlatest)
+			if err != nil {
 				res := wizardAskRange(sv, dep)
 				if res == "m" {
-					dep.Reference = "^" + sv.String()
+					dep.Version = "^" + memlatest
 				} else if res == "p" {
-					dep.Reference = "~" + sv.String()
+					dep.Version = "~" + memlatest
 				}
 			}
 		}
diff --git a/action/get_test.go b/action/get_test.go
index 07dba0f..9e5d8f0 100644
--- a/action/get_test.go
+++ b/action/get_test.go
@@ -16,7 +16,6 @@
 	conf := new(cfg.Config)
 	dep := new(cfg.Dependency)
 	dep.Name = "github.com/Masterminds/cookoo"
-	dep.Subpackages = append(dep.Subpackages, "convert")
 	conf.Imports = append(conf.Imports, dep)
 
 	names := []string{
@@ -30,17 +29,6 @@
 		t.Error("addPkgsToConfig failed to add github.com/Masterminds/semver")
 	}
 
-	d := conf.Imports.Get("github.com/Masterminds/cookoo")
-	found := false
-	for _, s := range d.Subpackages {
-		if s == "fmt" {
-			found = true
-		}
-	}
-	if !found {
-		t.Error("addPkgsToConfig failed to add subpackage to existing import")
-	}
-
 	// Restore messaging to original location
 	msg.Default.Stderr = o
 }
diff --git a/action/install.go b/action/install.go
index 75eaeb2..0ce5797 100644
--- a/action/install.go
+++ b/action/install.go
@@ -1,67 +1,368 @@
 package action
 
 import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
 	"path/filepath"
 
-	"github.com/Masterminds/glide/cache"
 	"github.com/Masterminds/glide/cfg"
+	"github.com/Masterminds/glide/dependency"
 	"github.com/Masterminds/glide/msg"
 	gpath "github.com/Masterminds/glide/path"
 	"github.com/Masterminds/glide/repo"
+	"github.com/sdboyer/gps"
 )
 
 // Install installs a vendor directory based on an existing Glide configuration.
-func Install(installer *repo.Installer, stripVendor bool) {
-	cache.SystemLock()
-
+func Install(installer *repo.Installer, io, so, sv bool) {
 	base := "."
 	// Ensure GOPATH
 	EnsureGopath()
 	EnsureVendorDir()
 	conf := EnsureConfig()
 
-	// Lockfile exists
-	if !gpath.HasLock(base) {
-		msg.Info("Lock file (glide.lock) does not exist. Performing update.")
-		Update(installer, false, stripVendor)
+	// TODO might need a better way for discovering the root
+	vend, err := gpath.Vendor()
+	if err != nil {
+		msg.Die("Could not find the vendor dir: %s", err)
+	}
+
+	// Create the SourceManager for this run
+	sm, err := gps.NewSourceManager(dependency.Analyzer{}, filepath.Join(installer.Home, "cache"))
+	defer sm.Release()
+	if err != nil {
+		msg.Err(err.Error())
 		return
 	}
-	// Load lockfile
-	lock, err := cfg.ReadLockFile(filepath.Join(base, gpath.LockFile))
+
+	rd := filepath.Dir(vend)
+	rt, err := gps.ListPackages(rd, conf.Name)
 	if err != nil {
-		msg.Die("Could not load lockfile.")
-	}
-	// Verify lockfile hasn't changed
-	hash, err := conf.Hash()
-	if err != nil {
-		msg.Die("Could not load lockfile.")
-	} else if hash != lock.Hash {
-		msg.Warn("Lock file may be out of date. Hash check of YAML failed. You may need to run 'update'")
+		msg.Die("Error while scanning project: %s", err)
 	}
 
-	// Install
-	newConf, err := installer.Install(lock, conf)
-	if err != nil {
-		msg.Die("Failed to install: %s", err)
+	params := gps.SolveParameters{
+		RootDir:         rd,
+		RootPackageTree: rt,
+		Manifest:        conf,
+		Trace:           true,
+		TraceLogger:     log.New(os.Stdout, "", 0),
 	}
 
-	msg.Info("Setting references.")
-
-	// Set reference
-	if err := repo.SetReference(newConf, installer.ResolveTest); err != nil {
-		msg.Die("Failed to set references: %s (Skip to cleanup)", err)
-	}
-
-	err = installer.Export(newConf)
-	if err != nil {
-		msg.Die("Unable to export dependencies to vendor directory: %s", err)
-	}
-
-	if stripVendor {
-		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
-		err := gpath.StripVendor()
+	var s gps.Solver
+	if gpath.HasLock(base) {
+		var legacy bool
+		params.Lock, legacy, err = loadLockfile(base, conf)
+		if legacy {
+			msg.Warn("glide.lock was in a legacy format. An attempt will be made to automatically update it.")
+		}
 		if err != nil {
-			msg.Err("Unable to strip vendor directories: %s", err)
+			msg.Err("Could not load lockfile.")
+			return
+		}
+
+		s, err = gps.Prepare(params, sm)
+		if err != nil {
+			msg.Err("Could not set up solver: %s", err)
+			return
+		}
+		digest := s.HashInputs()
+
+		// Check if digests match, and warn if they don't
+		if bytes.Equal(digest, params.Lock.InputHash()) {
+			if so {
+				msg.Err("glide.yaml is out of sync with glide.lock")
+				return
+			} else {
+				msg.Warn("glide.yaml is out of sync with glide.lock!")
+			}
+		}
+
+		gw := safeGroupWriter{
+			resultLock:  params.Lock,
+			vendor:      vend,
+			sm:          sm,
+			stripVendor: sv,
+		}
+
+		err = gw.writeAllSafe()
+		if err != nil {
+			msg.Err(err.Error())
+			return
+		}
+	} else if io || so {
+		msg.Err("No glide.lock file could be found.")
+		return
+	} else {
+		// There is no lock, so we have to solve first
+		s, err = gps.Prepare(params, sm)
+		if err != nil {
+			msg.Err("Could not set up solver: %s", err)
+			return
+		}
+
+		r, err := s.Solve()
+		if err != nil {
+			// TODO better error handling
+			msg.Err(err.Error())
+			return
+		}
+
+		gw := safeGroupWriter{
+			resultLock:  r,
+			vendor:      vend,
+			sm:          sm,
+			stripVendor: sv,
+		}
+
+		err = gw.writeAllSafe()
+		if err != nil {
+			msg.Err(err.Error())
+			return
 		}
 	}
 }
+
+// locksAreEquivalent compares the fingerprints between two locks to determine
+// if they're equivalent.
+//
+// If the either of the locks are nil, the input hashes are different, the
+// fingerprints are different, or any error is returned from fingerprinting,
+// this function returns false.
+func locksAreEquivalent(l1, l2 *cfg.Lockfile) bool {
+	if l1 != nil && l2 != nil {
+		if l1.Hash != l2.Hash {
+			return false
+		}
+
+		f1, err := l1.Fingerprint()
+		f2, err2 := l2.Fingerprint()
+		if err == nil && err2 == nil && f1 == f2 {
+			return true
+		}
+	}
+	return false
+}
+
+// safeGroupWriter provides a slipshod-but-better-than-nothing approach to
+// grouping together yaml, lock, and vendor dir writes.
+type safeGroupWriter struct {
+	conf              *cfg.Config
+	lock              *cfg.Lockfile
+	resultLock        gps.Lock
+	sm                gps.SourceManager
+	glidefile, vendor string
+	stripVendor       bool
+}
+
+// writeAllSafe writes out some combination of config yaml, lock, and a vendor
+// tree, to a temp dir, then moves them into place if and only if all the write
+// operations succeeded. It also does its best to roll back if any moves fail.
+//
+// This helps to ensure glide doesn't exit with a partial write, resulting in an
+// undefined disk state.
+//
+// - If a gw.conf is provided, it will be written to gw.glidefile
+// - If gw.lock is provided without a gw.resultLock, it will be written to
+//   `glide.lock` in the parent dir of gw.vendor
+// - If gw.lock and gw.resultLock are both provided and are not equivalent,
+//   the resultLock will be written to the same location as above, and a vendor
+//   tree will be written to gw.vendor
+// - If gw.resultLock is provided and gw.lock is not, it will write both a lock
+//   and vendor dir in the same way
+//
+// Any of the conf, lock, or result can be omitted; the grouped write operation
+// will continue for whichever inputs are present.
+func (gw safeGroupWriter) writeAllSafe() error {
+	// Decide which writes we need to do
+	var writeConf, writeLock, writeVendor bool
+
+	if gw.conf != nil {
+		writeConf = true
+	}
+
+	if gw.resultLock != nil {
+		if gw.lock == nil {
+			writeLock, writeVendor = true, true
+		} else {
+			rlf, err := cfg.LockfileFromSolverLock(gw.resultLock)
+			// This err really shouldn't occur, but could if we get an unpaired
+			// version back from gps somehow
+			if err != nil {
+				return err
+			}
+			if !locksAreEquivalent(rlf, gw.lock) {
+				writeLock, writeVendor = true, true
+			}
+		}
+	} else if gw.lock != nil {
+		writeLock = true
+	}
+
+	if !writeConf && !writeLock && !writeVendor {
+		// nothing to do
+		return nil
+	}
+
+	if writeConf && gw.glidefile == "" {
+		return fmt.Errorf("Must provide a path if writing out a config yaml.")
+	}
+
+	if (writeLock || writeVendor) && gw.vendor == "" {
+		return fmt.Errorf("Must provide a vendor dir if writing out a lock or vendor dir.")
+	}
+
+	if writeVendor && gw.sm == nil {
+		return fmt.Errorf("Must provide a SourceManager if writing out a vendor dir.")
+	}
+
+	td, err := ioutil.TempDir(os.TempDir(), "glide")
+	if err != nil {
+		return fmt.Errorf("Error while creating temp dir for vendor directory: %s", err)
+	}
+	defer os.RemoveAll(td)
+
+	if writeConf {
+		if err := gw.conf.WriteFile(filepath.Join(td, "glide.yaml")); err != nil {
+			return fmt.Errorf("Failed to write glide YAML file: %s", err)
+		}
+	}
+
+	if writeLock {
+		if gw.resultLock == nil {
+			// the result lock is nil but the flag is on, so we must be writing
+			// the other one
+			if err := gw.lock.WriteFile(filepath.Join(td, gpath.LockFile)); err != nil {
+				return fmt.Errorf("Failed to write glide lock file: %s", err)
+			}
+		} else {
+			rlf, err := cfg.LockfileFromSolverLock(gw.resultLock)
+			// As with above, this case really shouldn't get hit unless there's
+			// a bug in gps, or guarantees change
+			if err != nil {
+				return err
+			}
+			if err := rlf.WriteFile(filepath.Join(td, gpath.LockFile)); err != nil {
+				return fmt.Errorf("Failed to write glide lock file: %s", err)
+			}
+		}
+	}
+
+	if writeVendor {
+		err = gps.WriteDepTree(filepath.Join(td, "vendor"), gw.resultLock, gw.sm, gw.stripVendor)
+		if err != nil {
+			return fmt.Errorf("Error while generating vendor tree: %s", err)
+		}
+	}
+
+	// Move the existing files and dirs to the temp dir while we put the new
+	// ones in, to provide insurance against errors for as long as possible
+	var fail bool
+	var failerr error
+	type pathpair struct {
+		from, to string
+	}
+	var restore []pathpair
+
+	if writeConf {
+		if _, err := os.Stat(gw.glidefile); err == nil {
+			// move out the old one
+			tmploc := filepath.Join(td, "glide.yaml-old")
+			failerr = os.Rename(gw.glidefile, tmploc)
+			if failerr != nil {
+				fail = true
+			} else {
+				restore = append(restore, pathpair{from: tmploc, to: gw.glidefile})
+			}
+		}
+
+		// move in the new one
+		failerr = os.Rename(filepath.Join(td, "glide.yaml"), gw.glidefile)
+		if failerr != nil {
+			fail = true
+		}
+	}
+
+	if !fail && writeLock {
+		tgt := filepath.Join(filepath.Dir(gw.vendor), gpath.LockFile)
+		if _, err := os.Stat(tgt); err == nil {
+			// move out the old one
+			tmploc := filepath.Join(td, "glide.lock-old")
+
+			failerr = os.Rename(tgt, tmploc)
+			if failerr != nil {
+				fail = true
+			} else {
+				restore = append(restore, pathpair{from: tmploc, to: tgt})
+			}
+		}
+
+		// move in the new one
+		failerr = os.Rename(filepath.Join(td, gpath.LockFile), tgt)
+		if failerr != nil {
+			fail = true
+		}
+	}
+
+	// have to declare out here so it's present later
+	var vendorbak string
+	if !fail && writeVendor {
+		if _, err := os.Stat(gw.vendor); err == nil {
+			// move out the old vendor dir. just do it into an adjacent dir, in
+			// order to mitigate the possibility of a pointless cross-filesystem move
+			vendorbak = gw.vendor + "-old"
+			if _, err := os.Stat(vendorbak); err == nil {
+				// Just in case that happens to exist...
+				vendorbak = filepath.Join(td, "vendor-old")
+			}
+			failerr = os.Rename(gw.vendor, vendorbak)
+			if failerr != nil {
+				fail = true
+			} else {
+				restore = append(restore, pathpair{from: vendorbak, to: gw.vendor})
+			}
+		}
+
+		// move in the new one
+		failerr = os.Rename(filepath.Join(td, "vendor"), gw.vendor)
+		if failerr != nil {
+			fail = true
+		}
+	}
+
+	// If we failed at any point, move all the things back into place, then bail
+	if fail {
+		for _, pair := range restore {
+			// Nothing we can do on err here, we're already in recovery mode
+			os.Rename(pair.from, pair.to)
+		}
+		return failerr
+	}
+
+	// Renames all went smoothly. The deferred os.RemoveAll will get the temp
+	// dir, but if we wrote vendor, we have to clean that up directly
+
+	if writeVendor {
+		// Again, kinda nothing we can do about an error at this point
+		os.RemoveAll(vendorbak)
+	}
+
+	return nil
+}
+
+// loadLockfile loads the contents of a glide.lock file.
+func loadLockfile(base string, conf *cfg.Config) (*cfg.Lockfile, bool, error) {
+	yml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile))
+	if err != nil {
+		return nil, false, err
+	}
+	lock, legacy, err := cfg.LockfileFromYaml(yml)
+	if err != nil {
+		return nil, false, err
+	}
+
+	return lock, legacy, nil
+}
diff --git a/action/rebuild.go b/action/rebuild.go
index dffde39..6e76625 100644
--- a/action/rebuild.go
+++ b/action/rebuild.go
@@ -41,22 +41,23 @@
 }
 
 func buildDep(dep *cfg.Dependency, vpath string) error {
-	if len(dep.Subpackages) == 0 {
-		buildPath(dep.Name)
-	}
+	buildPath(dep.Name)
 
-	for _, pkg := range dep.Subpackages {
-		if pkg == "**" || pkg == "..." {
-			//Info("Building all packages in %s\n", dep.Name)
-			buildPath(path.Join(dep.Name, "..."))
-		} else {
-			paths, err := resolvePackages(vpath, dep.Name, pkg)
-			if err != nil {
-				msg.Warn("Error resolving packages: %s", err)
-			}
-			buildPaths(paths)
-		}
-	}
+	// TODO(sdboyer) to replace this, would need static analysis. But...rebuild
+	// is going away anyway, right?
+
+	//for _, pkg := range dep.Subpackages {
+	//if pkg == "**" || pkg == "..." {
+	////Info("Building all packages in %s\n", dep.Name)
+	//buildPath(path.Join(dep.Name, "..."))
+	//} else {
+	//paths, err := resolvePackages(vpath, dep.Name, pkg)
+	//if err != nil {
+	//msg.Warn("Error resolving packages: %s", err)
+	//}
+	//buildPaths(paths)
+	//}
+	//}
 
 	return nil
 }
diff --git a/action/update.go b/action/update.go
index 19ede20..324b5a3 100644
--- a/action/update.go
+++ b/action/update.go
@@ -1,113 +1,102 @@
 package action
 
 import (
-	"io/ioutil"
+	"log"
+	"os"
 	"path/filepath"
 
-	"github.com/Masterminds/glide/cache"
 	"github.com/Masterminds/glide/cfg"
+	"github.com/Masterminds/glide/dependency"
 	"github.com/Masterminds/glide/msg"
 	gpath "github.com/Masterminds/glide/path"
 	"github.com/Masterminds/glide/repo"
+	"github.com/sdboyer/gps"
 )
 
 // Update updates repos and the lock file from the main glide yaml.
-func Update(installer *repo.Installer, skipRecursive, stripVendor bool) {
-	cache.SystemLock()
-
+func Update(installer *repo.Installer, sv bool, projs []string) {
 	base := "."
 	EnsureGopath()
 	EnsureVendorDir()
 	conf := EnsureConfig()
 
-	// Try to check out the initial dependencies.
-	if err := installer.Checkout(conf); err != nil {
-		msg.Die("Failed to do initial checkout of config: %s", err)
-	}
-
-	// Set the versions for the initial dependencies so that resolved dependencies
-	// are rooted in the correct version of the base.
-	if err := repo.SetReference(conf, installer.ResolveTest); err != nil {
-		msg.Die("Failed to set initial config references: %s", err)
-	}
-
-	// Prior to resolving dependencies we need to start working with a clone
-	// of the conf because we'll be making real changes to it.
-	confcopy := conf.Clone()
-
-	if !skipRecursive {
-		// Get all repos and update them.
-		err := installer.Update(confcopy)
-		if err != nil {
-			msg.Die("Could not update packages: %s", err)
-		}
-
-		// Set references. There may be no remaining references to set since the
-		// installer set them as it went to make sure it parsed the right imports
-		// from the right version of the package.
-		msg.Info("Setting references for remaining imports")
-		if err := repo.SetReference(confcopy, installer.ResolveTest); err != nil {
-			msg.Err("Failed to set references: %s (Skip to cleanup)", err)
-		}
-	}
-
-	err := installer.Export(confcopy)
-	if err != nil {
-		msg.Die("Unable to export dependencies to vendor directory: %s", err)
-	}
-
-	// Write glide.yaml (Why? Godeps/GPM/GB?)
-	// I think we don't need to write a new Glide file because update should not
-	// change anything important. It will just generate information about
-	// transative dependencies, all of which belongs exclusively in the lock
-	// file, not the glide.yaml file.
 	// TODO(mattfarina): Detect when a new dependency has been added or removed
 	// from the project. A removed dependency should warn and an added dependency
 	// should be added to the glide.yaml file. See issue #193.
 
-	if !skipRecursive {
-		// Write lock
-		hash, err := conf.Hash()
-		if err != nil {
-			msg.Die("Failed to generate config hash. Unable to generate lock file.")
-		}
-		lock, err := cfg.NewLockfile(confcopy.Imports, confcopy.DevImports, hash)
-		if err != nil {
-			msg.Die("Failed to generate lock file: %s", err)
-		}
-		wl := true
-		if gpath.HasLock(base) {
-			yml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile))
-			if err == nil {
-				l2, err := cfg.LockfileFromYaml(yml)
-				if err == nil {
-					f1, err := l2.Fingerprint()
-					f2, err2 := lock.Fingerprint()
-					if err == nil && err2 == nil && f1 == f2 {
-						wl = false
-					}
-				}
-			}
-		}
-		if wl {
-			if err := lock.WriteFile(filepath.Join(base, gpath.LockFile)); err != nil {
-				msg.Err("Could not write lock file to %s: %s", base, err)
-				return
-			}
-		} else {
-			msg.Info("Versions did not change. Skipping glide.lock update.")
-		}
-
-		msg.Info("Project relies on %d dependencies.", len(confcopy.Imports))
-	} else {
-		msg.Warn("Skipping lockfile generation because full dependency tree is not being calculated")
+	// TODO might need a better way for discovering the root
+	vend, err := gpath.Vendor()
+	if err != nil {
+		msg.Die("Could not find the vendor dir: %s", err)
 	}
 
-	if stripVendor {
-		msg.Info("Removing nested vendor and Godeps/_workspace directories...")
-		err := gpath.StripVendor()
-		if err != nil {
-			msg.Err("Unable to strip vendor directories: %s", err)
+	rd := filepath.Dir(vend)
+	rt, err := gps.ListPackages(rd, conf.Name)
+	if err != nil {
+		msg.Die("Error while scanning project: %s", err)
+	}
+
+	params := gps.SolveParameters{
+		RootDir:         rd,
+		RootPackageTree: rt,
+		Manifest:        conf,
+		Trace:           true,
+		TraceLogger:     log.New(os.Stdout, "", 0),
+	}
+
+	if len(projs) == 0 {
+		params.ChangeAll = true
+	} else {
+		params.ChangeAll = false
+		for _, p := range projs {
+			if !conf.HasDependency(p) {
+				msg.Die("Cannot update %s, as it is not listed as dependency in glide.yaml.", p)
+			}
+			params.ToChange = append(params.ToChange, gps.ProjectRoot(p))
 		}
 	}
+
+	if gpath.HasLock(base) {
+		params.Lock, _, err = loadLockfile(base, conf)
+		if err != nil {
+			msg.Err("Could not load lockfile, aborting: %s", err)
+			return
+		}
+	}
+
+	// Create the SourceManager for this run
+	sm, err := gps.NewSourceManager(dependency.Analyzer{}, filepath.Join(installer.Home, "cache"))
+	if err != nil {
+		msg.Err(err.Error())
+		return
+	}
+	defer sm.Release()
+
+	// Prepare a solver. This validates our params.
+	s, err := gps.Prepare(params, sm)
+	if err != nil {
+		msg.Err("Could not set up solver: %s", err)
+		return
+	}
+
+	r, err := s.Solve()
+	if err != nil {
+		// TODO better error handling
+		msg.Err(err.Error())
+		return
+	}
+
+	gw := safeGroupWriter{
+		lock:        params.Lock.(*cfg.Lockfile),
+		resultLock:  r,
+		sm:          sm,
+		vendor:      vend,
+		stripVendor: sv,
+	}
+
+	err = gw.writeAllSafe()
+	if err != nil {
+		msg.Err(err.Error())
+		return
+	}
 }
diff --git a/cfg/config.go b/cfg/config.go
index ae01039..82579e0 100644
--- a/cfg/config.go
+++ b/cfg/config.go
@@ -2,15 +2,16 @@
 
 import (
 	"crypto/sha256"
+	"encoding/hex"
 	"fmt"
 	"io/ioutil"
-	"reflect"
 	"sort"
+	"strconv"
 	"strings"
 
 	"github.com/Masterminds/glide/mirrors"
-	"github.com/Masterminds/glide/util"
 	"github.com/Masterminds/vcs"
+	"github.com/sdboyer/gps"
 	"gopkg.in/yaml.v2"
 )
 
@@ -43,17 +44,18 @@
 	// those to skip.
 	Ignore []string `yaml:"ignore,omitempty"`
 
-	// Exclude contains a list of directories in the local application to
-	// exclude from scanning for dependencies.
-	Exclude []string `yaml:"excludeDirs,omitempty"`
-
-	// Imports contains a list of all non-development imports for a project. For
+	// Imports contains a list of all dependency constraints for a project. For
 	// more detail on how these are captured see the Dependency type.
-	Imports Dependencies `yaml:"import"`
+	// TODO rename
+	// TODO mapify
+	Imports Dependencies `yaml:"dependencies"`
 
-	// DevImports contains the test or other development imports for a project.
-	// See the Dependency type for more details on how this is recorded.
-	DevImports Dependencies `yaml:"testImport,omitempty"`
+	// DevImports contains the test or other development dependency constraints
+	// for a project. See the Dependency type for more details on how this is
+	// recorded.
+	// TODO rename
+	// TODO mapify
+	DevImports Dependencies `yaml:"testDependencies"`
 }
 
 // A transitive representation of a dependency for importing and exporting to yaml.
@@ -64,16 +66,27 @@
 	License     string       `yaml:"license,omitempty"`
 	Owners      Owners       `yaml:"owners,omitempty"`
 	Ignore      []string     `yaml:"ignore,omitempty"`
-	Exclude     []string     `yaml:"excludeDirs,omitempty"`
-	Imports     Dependencies `yaml:"import"`
-	DevImports  Dependencies `yaml:"testImport,omitempty"`
+	Imports     Dependencies `yaml:"dependencies,omitempty"`
+	DevImports  Dependencies `yaml:"testDependencies,omitempty"`
+	// these fields guarantee that this struct fails to unmarshal legacy yamls
+	Compat  int `yaml:"import,omitempty"`
+	Compat2 int `yaml:"testImport,omitempty"`
 }
 
 // ConfigFromYaml returns an instance of Config from YAML
-func ConfigFromYaml(yml []byte) (*Config, error) {
-	cfg := &Config{}
-	err := yaml.Unmarshal([]byte(yml), &cfg)
-	return cfg, err
+func ConfigFromYaml(yml []byte) (cfg *Config, legacy bool, err error) {
+	cfg = &Config{}
+	err = yaml.Unmarshal(yml, cfg)
+	if err != nil {
+		lcfg := &lConfig1{}
+		err = yaml.Unmarshal(yml, &lcfg)
+		if err == nil {
+			legacy = true
+			cfg, err = lcfg.Convert()
+		}
+	}
+
+	return
 }
 
 // Marshal converts a Config instance to YAML
@@ -97,7 +110,6 @@
 	c.License = newConfig.License
 	c.Owners = newConfig.Owners
 	c.Ignore = newConfig.Ignore
-	c.Exclude = newConfig.Exclude
 	c.Imports = newConfig.Imports
 	c.DevImports = newConfig.DevImports
 
@@ -116,7 +128,6 @@
 		License:     c.License,
 		Owners:      c.Owners,
 		Ignore:      c.Ignore,
-		Exclude:     c.Exclude,
 	}
 	i, err := c.Imports.Clone().DeDupe()
 	if err != nil {
@@ -149,6 +160,45 @@
 	return false
 }
 
+// DependencyConstraints lists all the non-test dependency constraints
+// described in a glide manifest in a way gps will understand.
+func (c *Config) DependencyConstraints() []gps.ProjectConstraint {
+	return gpsifyDeps(c.Imports)
+}
+
+// TestDependencyConstraints lists all the test dependency constraints described
+// in a glide manifest in a way gps will understand.
+func (c *Config) TestDependencyConstraints() []gps.ProjectConstraint {
+	return gpsifyDeps(c.DevImports)
+}
+
+func gpsifyDeps(deps Dependencies) []gps.ProjectConstraint {
+	cp := make([]gps.ProjectConstraint, len(deps))
+	for k, d := range deps {
+		cp[k] = gps.ProjectConstraint{
+			Ident: gps.ProjectIdentifier{
+				ProjectRoot: gps.ProjectRoot(d.Name),
+				NetworkName: d.Repository,
+			},
+			Constraint: d.GetConstraint(),
+		}
+	}
+
+	return cp
+}
+
+func (c *Config) IgnorePackages() map[string]bool {
+	m := make(map[string]bool)
+	for _, ig := range c.Ignore {
+		m[ig] = true
+	}
+	return m
+}
+
+func (c *Config) Overrides() gps.ProjectConstraints {
+	return nil
+}
+
 // HasIgnore returns true if the given name is listed on the ignore list.
 func (c *Config) HasIgnore(name string) bool {
 	for _, v := range c.Ignore {
@@ -163,18 +213,6 @@
 	return false
 }
 
-// HasExclude returns true if the given name is listed on the exclude list.
-func (c *Config) HasExclude(ex string) bool {
-	ep := normalizeSlash(ex)
-	for _, v := range c.Exclude {
-		if vp := normalizeSlash(v); vp == ep {
-			return true
-		}
-	}
-
-	return false
-}
-
 // Clone performs a deep clone of the Config instance
 func (c *Config) Clone() *Config {
 	n := &Config{}
@@ -184,7 +222,6 @@
 	n.License = c.License
 	n.Owners = c.Owners.Clone()
 	n.Ignore = c.Ignore
-	n.Exclude = c.Exclude
 	n.Imports = c.Imports.Clone()
 	n.DevImports = c.DevImports.Clone()
 	return n
@@ -351,16 +388,13 @@
 			// In here we've encountered a dependency for the second time.
 			// Make sure the details are the same or return an error.
 			v := imports[val]
-			if dep.Reference != v.Reference {
-				return d, fmt.Errorf("Import %s repeated with different versions '%s' and '%s'", dep.Name, dep.Reference, v.Reference)
+			// Have to do string-based comparison
+			if dep.ConstraintsEq(*v) {
+				return d, fmt.Errorf("Import %s repeated with different versions '%s' and '%s'", dep.Name, dep.GetConstraint(), v.GetConstraint())
 			}
-			if dep.Repository != v.Repository || dep.VcsType != v.VcsType {
+			if dep.Repository != v.Repository {
 				return d, fmt.Errorf("Import %s repeated with different Repository details", dep.Name)
 			}
-			if !reflect.DeepEqual(dep.Os, v.Os) || !reflect.DeepEqual(dep.Arch, v.Arch) {
-				return d, fmt.Errorf("Import %s repeated with different OS or Architecture filtering", dep.Name)
-			}
-			imports[checked[dep.Name]].Subpackages = stringArrayDeDupe(v.Subpackages, dep.Subpackages...)
 		}
 	}
 
@@ -369,92 +403,155 @@
 
 // Dependency describes a package that the present package depends upon.
 type Dependency struct {
-	Name        string   `yaml:"package"`
-	Reference   string   `yaml:"version,omitempty"`
-	Pin         string   `yaml:"-"`
-	Repository  string   `yaml:"repo,omitempty"`
-	VcsType     string   `yaml:"vcs,omitempty"`
-	Subpackages []string `yaml:"subpackages,omitempty"`
-	Arch        []string `yaml:"arch,omitempty"`
-	Os          []string `yaml:"os,omitempty"`
+	Name       string
+	VcsType    string // TODO remove
+	Repository string
+	Branch     string
+	Version    string
 }
 
-// A transitive representation of a dependency for importing and exploting to yaml.
+// A transitive representation of a dependency for yaml import/export.
 type dep struct {
-	Name        string   `yaml:"package"`
-	Reference   string   `yaml:"version,omitempty"`
-	Ref         string   `yaml:"ref,omitempty"`
-	Repository  string   `yaml:"repo,omitempty"`
-	VcsType     string   `yaml:"vcs,omitempty"`
-	Subpackages []string `yaml:"subpackages,omitempty"`
-	Arch        []string `yaml:"arch,omitempty"`
-	Os          []string `yaml:"os,omitempty"`
+	Name       string `yaml:"package"`
+	Version    string `yaml:"version,omitempty"`
+	Branch     string `yaml:"branch,omitempty"`
+	Repository string `yaml:"repo,omitempty"`
 }
 
 // DependencyFromLock converts a Lock to a Dependency
 func DependencyFromLock(lock *Lock) *Dependency {
-	return &Dependency{
-		Name:        lock.Name,
-		Reference:   lock.Version,
-		Repository:  lock.Repository,
-		VcsType:     lock.VcsType,
-		Subpackages: lock.Subpackages,
-		Arch:        lock.Arch,
-		Os:          lock.Os,
+	d := &Dependency{
+		Name:       lock.Name,
+		Repository: lock.Repository,
 	}
+
+	// Because it's not allowed to have both, if we see both, prefer version
+	// over branch
+	if lock.Version != "" {
+		d.Version = lock.Version
+	} else if lock.Branch != "" {
+		d.Branch = lock.Branch
+	} else {
+		d.Version = lock.Revision
+	}
+
+	return d
+}
+
+// GetConstraint constructs an appropriate gps.Constraint from the Dependency's
+// string input data.
+func (d Dependency) GetConstraint() gps.Constraint {
+	// If neither or both Version and Branch are set, accept anything
+	if d.IsUnconstrained() {
+		return gps.Any()
+	} else if d.Version != "" {
+		return DeduceConstraint(d.Version)
+	} else {
+		// only case left is a non-empty branch
+		return gps.NewBranch(d.Branch)
+	}
+}
+
+// IsUnconstrained indicates if this dependency has no constraint information,
+// version or branch.
+func (d Dependency) IsUnconstrained() bool {
+	return (d.Version != "" && d.Branch != "") || (d.Version == "" && d.Branch == "")
+}
+
+// ConstraintsEq checks if the constraints on two Dependency are exactly equal.
+func (d Dependency) ConstraintsEq(d2 Dependency) bool {
+	// Having both branch and version set is always an error, so if either have
+	// it, then return false
+	if (d.Version != "" && d.Branch != "") || (d2.Version != "" && d2.Branch != "") {
+		return false
+	}
+	// Neither being set, though, is OK
+	if (d.Version == "" && d.Branch == "") || (d2.Version == "" && d2.Branch == "") {
+		return true
+	}
+
+	// Now, xors
+	if d.Version != "" && d.Version == d2.Version {
+		return true
+	}
+	if d.Branch == d2.Branch {
+		return true
+	}
+	return false
 }
 
 // UnmarshalYAML is a hook for gopkg.in/yaml.v2 in the unmarshaling process
 func (d *Dependency) UnmarshalYAML(unmarshal func(interface{}) error) error {
-	newDep := &dep{}
+	newDep := dep{}
 	err := unmarshal(&newDep)
 	if err != nil {
 		return err
 	}
+
+	if newDep.Version != "" && newDep.Branch != "" {
+		return fmt.Errorf("Cannot set both a both a branch and a version constraint for %q", d.Name)
+	}
+
 	d.Name = newDep.Name
-	d.Reference = newDep.Reference
 	d.Repository = newDep.Repository
-	d.VcsType = newDep.VcsType
-	d.Subpackages = newDep.Subpackages
-	d.Arch = newDep.Arch
-	d.Os = newDep.Os
-
-	if d.Reference == "" && newDep.Ref != "" {
-		d.Reference = newDep.Ref
-	}
-
-	// Make sure only legitimate VCS are listed.
-	d.VcsType = filterVcsType(d.VcsType)
-
-	// Get the root name for the package
-	tn, subpkg := util.NormalizeName(d.Name)
-	d.Name = tn
-	if subpkg != "" {
-		d.Subpackages = append(d.Subpackages, subpkg)
-	}
-
-	// Older versions of Glide had a / prefix on subpackages in some cases.
-	// Here that's cleaned up. Someday we should be able to remove this.
-	for k, v := range d.Subpackages {
-		d.Subpackages[k] = strings.TrimPrefix(v, "/")
-	}
+	d.Version = newDep.Version
+	d.Branch = newDep.Branch
 
 	return nil
 }
 
+// DeduceConstraint tries to puzzle out what kind of version is given in a string -
+// semver, a revision, or as a fallback, a plain tag
+func DeduceConstraint(s string) gps.Constraint {
+	// always semver if we can
+	c, err := gps.NewSemverConstraint(s)
+	if err == nil {
+		return c
+	}
+
+	slen := len(s)
+	if slen == 40 {
+		if _, err = hex.DecodeString(s); err == nil {
+			// Whether or not it's intended to be a SHA1 digest, this is a
+			// valid byte sequence for that, so go with Revision. This
+			// covers git and hg
+			return gps.Revision(s)
+		}
+	}
+	// Next, try for bzr, which has a three-component GUID separated by
+	// dashes. There should be two, but the email part could contain
+	// internal dashes
+	if strings.Count(s, "-") >= 2 {
+		// Work from the back to avoid potential confusion from the email
+		i3 := strings.LastIndex(s, "-")
+		// Skip if - is last char, otherwise this would panic on bounds err
+		if slen == i3+1 {
+			return gps.NewVersion(s)
+		}
+
+		if _, err = hex.DecodeString(s[i3+1:]); err == nil {
+			i2 := strings.LastIndex(s[:i3], "-")
+			if _, err = strconv.ParseUint(s[i2+1:i3], 10, 64); err == nil {
+				// Getting this far means it'd pretty much be nuts if it's not a
+				// bzr rev, so don't bother parsing the email.
+				return gps.Revision(s)
+			}
+		}
+	}
+
+	// If not a plain SHA1 or bzr custom GUID, assume a plain version.
+	//
+	// svn, you ask? lol, madame. lol.
+	return gps.NewVersion(s)
+}
+
 // MarshalYAML is a hook for gopkg.in/yaml.v2 in the marshaling process
 func (d *Dependency) MarshalYAML() (interface{}, error) {
-
-	// Make sure we only write the correct vcs type to file
-	t := filterVcsType(d.VcsType)
 	newDep := &dep{
-		Name:        d.Name,
-		Reference:   d.Reference,
-		Repository:  d.Repository,
-		VcsType:     t,
-		Subpackages: d.Subpackages,
-		Arch:        d.Arch,
-		Os:          d.Os,
+		Name:       d.Name,
+		Repository: d.Repository,
+		Version:    d.Version,
+		Branch:     d.Branch,
 	}
 
 	return newDep, nil
@@ -499,8 +596,8 @@
 
 // GetRepo retrieves a Masterminds/vcs repo object configured for the root
 // of the package being retrieved.
+// TODO remove
 func (d *Dependency) GetRepo(dest string) (vcs.Repo, error) {
-
 	// The remote location is either the configured repo or the package
 	// name as an https url.
 	remote := d.Remote()
@@ -529,28 +626,9 @@
 
 // Clone creates a clone of a Dependency
 func (d *Dependency) Clone() *Dependency {
-	return &Dependency{
-		Name:        d.Name,
-		Reference:   d.Reference,
-		Pin:         d.Pin,
-		Repository:  d.Repository,
-		VcsType:     d.VcsType,
-		Subpackages: d.Subpackages,
-		Arch:        d.Arch,
-		Os:          d.Os,
-	}
-}
-
-// HasSubpackage returns if the subpackage is present on the dependency
-func (d *Dependency) HasSubpackage(sub string) bool {
-
-	for _, v := range d.Subpackages {
-		if sub == v {
-			return true
-		}
-	}
-
-	return false
+	var d2 Dependency
+	d2 = *d
+	return &d2
 }
 
 // Owners is a list of owners for a project.
diff --git a/cfg/config_test.go b/cfg/config_test.go
index 6313ff0..72400d2 100644
--- a/cfg/config_test.go
+++ b/cfg/config_test.go
@@ -3,10 +3,12 @@
 import (
 	"testing"
 
+	"github.com/sdboyer/gps"
+
 	"gopkg.in/yaml.v2"
 )
 
-var yml = `
+var lyml = `
 package: fake/testing
 description: foo bar baz
 homepage: https://example.com
@@ -34,6 +36,7 @@
       - i386
       - arm
   - package: github.com/Masterminds/structable
+    version: v1.0.0
   - package: github.com/Masterminds/cookoo/color
   - package: github.com/Masterminds/cookoo/convert
 
@@ -41,6 +44,33 @@
   - package: github.com/kylelemons/go-gypsy
 `
 
+var yml = `
+package: fake/testing
+description: foo bar baz
+homepage: https://example.com
+license: MIT
+owners:
+- name: foo
+  email: bar@example.com
+  homepage: https://example.com
+dependencies:
+  - package: github.com/kylelemons/go-gypsy
+    version: v1.0.0
+  - package: github.com/Masterminds/convert
+    repo: git@github.com:Masterminds/convert.git
+    version: a9949121a2e2192ca92fa6dddfeaaa4a4412d955
+  - package: github.com/Masterminds/structable
+    branch: master
+  - package: github.com/Masterminds/cookoo
+    repo: git://github.com/Masterminds/cookoo
+  - package: github.com/sdboyer/gps
+    version: ^v1.0.0
+
+testDependencies:
+  - package: github.com/Sirupsen/logrus
+    version: ~v1.0.0
+`
+
 func TestManualConfigFromYaml(t *testing.T) {
 	cfg := &Config{}
 	err := yaml.Unmarshal([]byte(yml), &cfg)
@@ -48,6 +78,103 @@
 		t.Errorf("Unable to Unmarshal config yaml")
 	}
 
+	found := make(map[string]bool)
+	for _, i := range cfg.Imports {
+		found[i.Name] = true
+
+		switch i.Name {
+		case "github.com/kylelemons/go-gypsy":
+			ref := "v1.0.0"
+			if i.Version != ref {
+				t.Errorf("(%s) Expected %q for constraint, got %q", i.Name, ref, i.Version)
+			}
+
+		case "github.com/Masterminds/convert":
+			ref := "a9949121a2e2192ca92fa6dddfeaaa4a4412d955"
+			if i.Version != ref {
+				t.Errorf("(%s) Expected %q for constraint, got %q", i.Name, ref, i.Version)
+			}
+
+			repo := "git@github.com:Masterminds/convert.git"
+			if i.Repository != repo {
+				t.Errorf("(%s) Expected %q for repository, got %q", i.Name, repo, i.Repository)
+			}
+
+		case "github.com/Masterminds/structable":
+			ref := "master"
+			if i.Branch != ref {
+				t.Errorf("(%s) Expected %q for constraint, got %q", i.Name, ref, i.Branch)
+			}
+
+		case "github.com/Masterminds/cookoo":
+			repo := "git://github.com/Masterminds/cookoo"
+			if i.Repository != repo {
+				t.Errorf("(%s) Expected %q for repository, got %q", i.Name, repo, i.Repository)
+			}
+
+		case "github.com/sdboyer/gps":
+			sv := "^v1.0.0"
+			if i.Version != sv {
+				t.Errorf("(%s) Expected %q for constraint, got %q", i.Name, sv, i.Version)
+			}
+		}
+	}
+
+	names := []string{
+		"github.com/Masterminds/convert",
+		"github.com/Masterminds/cookoo",
+		"github.com/Masterminds/structable",
+		"github.com/kylelemons/go-gypsy",
+		"github.com/sdboyer/gps",
+	}
+
+	for _, n := range names {
+		if !found[n] {
+			t.Errorf("Could not find config entry for %s", n)
+
+		}
+	}
+
+	if len(cfg.DevImports) != 1 {
+		t.Errorf("Expected 1 entry in DevImports, got %v", len(cfg.DevImports))
+	} else {
+		ti := cfg.DevImports[0]
+		n := "github.com/Sirupsen/logrus"
+		if ti.Name != n {
+			t.Errorf("Expected test dependency to be %s, got %s", n, ti.Name)
+		}
+
+		sv := "~v1.0.0"
+		if ti.Version != sv {
+			t.Errorf("(%s) Expected %q for constraint, got %q", ti.Name, sv, ti.Version)
+		}
+	}
+
+	if cfg.Name != "fake/testing" {
+		t.Errorf("Inaccurate name found %s", cfg.Name)
+	}
+
+	if cfg.Description != "foo bar baz" {
+		t.Errorf("Inaccurate description found %s", cfg.Description)
+	}
+
+	if cfg.Home != "https://example.com" {
+		t.Errorf("Inaccurate homepage found %s", cfg.Home)
+	}
+
+	if cfg.License != "MIT" {
+		t.Errorf("Inaccurate license found %s", cfg.License)
+	}
+
+}
+
+func TestLegacyManualConfigFromYaml(t *testing.T) {
+	cfg := &lConfig1{}
+	err := yaml.Unmarshal([]byte(lyml), &cfg)
+	if err != nil {
+		t.Errorf("Unable to Unmarshal config yaml")
+	}
+
 	if cfg.Name != "fake/testing" {
 		t.Errorf("Inaccurate name found %s", cfg.Name)
 	}
@@ -111,8 +238,60 @@
 	}
 }
 
+func TestLegacyConfigAutoconvert(t *testing.T) {
+	c, leg, err := ConfigFromYaml([]byte(lyml))
+	if err != nil {
+		t.Errorf("ConfigFromYaml failed to detect and autoconvert legacy yaml file with err %s", err)
+	}
+
+	if !leg {
+		t.Errorf("ConfigFromYaml failed to report autoconversion of legacy yaml file")
+	}
+
+	if c.Name != "fake/testing" {
+		t.Error("ConfigFromYaml failed to properly autoconvert legacy yaml file")
+	}
+
+	// Two should survive the conversion
+	if len(c.Imports) != 2 {
+		t.Error("Expected two dep clauses to survive conversion, but got ", len(c.Imports))
+	}
+
+	found := false
+	found2 := false
+	for _, i := range c.Imports {
+		if i.Name == "github.com/Masterminds/convert" {
+			found = true
+			ref := "a9949121a2e2192ca92fa6dddfeaaa4a4412d955"
+			if i.Version != ref {
+				t.Errorf("(%s) Expected %q for constraint, got %q", i.Name, ref, i.Version)
+			}
+
+			repo := "git@github.com:Masterminds/convert.git"
+			if i.Repository != repo {
+				t.Errorf("(%s) Expected %q for repository, got %q", i.Name, repo, i.Repository)
+			}
+		}
+
+		if i.Name == "github.com/Masterminds/structable" {
+			found2 = true
+			ref := "v1.0.0"
+			if i.Version != ref {
+				t.Errorf("(%s) Expected %q for constraint, got %q", i.Name, ref, i.Version)
+			}
+		}
+	}
+
+	if !found {
+		t.Error("Unable to find github.com/Masterminds/convert")
+	}
+	if !found2 {
+		t.Error("Unable to find github.com/Masterminds/structable")
+	}
+}
+
 func TestConfigFromYaml(t *testing.T) {
-	c, err := ConfigFromYaml([]byte(yml))
+	c, _, err := ConfigFromYaml([]byte(yml))
 	if err != nil {
 		t.Error("ConfigFromYaml failed to parse yaml")
 	}
@@ -123,12 +302,12 @@
 }
 
 func TestHasDependency(t *testing.T) {
-	c, err := ConfigFromYaml([]byte(yml))
+	c, _, err := ConfigFromYaml([]byte(yml))
 	if err != nil {
 		t.Error("ConfigFromYaml failed to parse yaml for HasDependency")
 	}
 
-	if c.HasDependency("github.com/Masterminds/convert") != true {
+	if !c.HasDependency("github.com/Masterminds/convert") {
 		t.Error("HasDependency failing to pickup depenency")
 	}
 
@@ -176,3 +355,47 @@
 		t.Error("Unable to parse owners from yaml")
 	}
 }
+
+func TestDeduceConstraint(t *testing.T) {
+	// First, valid semver
+	c := DeduceConstraint("v1.0.0")
+	if c.(gps.Version).Type() != "semver" {
+		t.Errorf("Got unexpected version type when passing valid semver string: %T %s", c, c)
+	}
+
+	// Now, 20 hex-encoded bytes (which should be assumed to be a SHA1 digest)
+	revin := "a9949121a2e2192ca92fa6dddfeaaa4a4412d955"
+	c = DeduceConstraint(revin)
+	if c != gps.Revision(revin) {
+		t.Errorf("Got unexpected version type/val when passing hex-encoded SHA1 digest: %T %s", c, c)
+	}
+
+	// Now, the weird bzr guid
+	bzrguid := "john@smith.org-20051026185030-93c7cad63ee570df"
+	c = DeduceConstraint(bzrguid)
+	if c != gps.Revision(bzrguid) {
+		t.Errorf("Expected revision with valid bzr guid, got: %T %s", c, c)
+	}
+
+	// Check fails if the bzr rev is malformed or weirdly formed
+	//
+	// chopping off a char should make the hex decode check fail
+	c = DeduceConstraint(bzrguid[:len(bzrguid)-1])
+	if c != gps.NewVersion(bzrguid[:len(bzrguid)-1]) {
+		t.Errorf("Expected plain version when bzr guid has truncated tail hex bits: %T %s", c, c)
+	}
+
+	// Extra dash in email doesn't mess us up
+	bzrguid2 := "john-smith@smith.org-20051026185030-93c7cad63ee570df"
+	c = DeduceConstraint(bzrguid2)
+	if c != gps.Revision(bzrguid2) {
+		t.Errorf("Expected revision when passing bzr guid has extra dash in email, got: %T %s", c, c)
+	}
+
+	// Non-numeric char in middle section bites it
+	bzrguid3 := "john-smith@smith.org-2005102a6185030-93c7cad63ee570df"
+	c = DeduceConstraint(bzrguid3)
+	if c != gps.NewVersion(bzrguid3) {
+		t.Errorf("Expected plain version when bzr guid has invalid second section, got: %T %s", c, c)
+	}
+}
diff --git a/cfg/legacy.go b/cfg/legacy.go
new file mode 100644
index 0000000..b171587
--- /dev/null
+++ b/cfg/legacy.go
@@ -0,0 +1,338 @@
+package cfg
+
+import (
+	"fmt"
+	"path"
+	"reflect"
+	"strings"
+	"time"
+
+	"github.com/Masterminds/glide/util"
+)
+
+// lConfig1 is a legacy Config file.
+type lConfig1 struct {
+	Name        string         `yaml:"package"`
+	Description string         `json:"description,omitempty"`
+	Home        string         `yaml:"homepage,omitempty"`
+	License     string         `yaml:"license,omitempty"`
+	Owners      Owners         `yaml:"owners,omitempty"`
+	Ignore      []string       `yaml:"ignore,omitempty"`
+	Exclude     []string       `yaml:"excludeDirs,omitempty"`
+	Imports     lDependencies1 `yaml:"import"`
+	DevImports  lDependencies1 `yaml:"testImport,omitempty"`
+}
+
+func (c *lConfig1) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	newConfig := &lcf1{}
+	if err := unmarshal(&newConfig); err != nil {
+		return err
+	}
+	c.Name = newConfig.Name
+	c.Description = newConfig.Description
+	c.Home = newConfig.Home
+	c.License = newConfig.License
+	c.Owners = newConfig.Owners
+	c.Ignore = newConfig.Ignore
+	c.Exclude = newConfig.Exclude
+	c.Imports = newConfig.Imports
+	c.DevImports = newConfig.DevImports
+
+	// Cleanup the Config object now that we have it.
+	err := c.DeDupe()
+
+	return err
+}
+
+func (c *lConfig1) Convert() (*Config, error) {
+	// This is probably already done, but do it just in case
+	err := c.DeDupe()
+	if err != nil {
+		return nil, err
+	}
+
+	// Pull over the easy values
+	c2 := &Config{
+		Name:        c.Name,
+		Description: c.Description,
+		Home:        c.Home,
+		License:     c.License,
+		Owners:      c.Owners,
+		Ignore:      c.Ignore,
+	}
+
+	// _if_ the name is set, path-prepend it to exclude, and add that to ignore.
+	// Otherwise, we just skip excludes. Not that big a deal, since they're a
+	// root-only property anyway; the user can reasonably recreate.
+
+	if c.Name != "" {
+		for _, excl := range c.Exclude {
+			c.Ignore = append(c.Ignore, path.Join(c.Name, excl))
+			// The trailing * is interpreted by gps as an ignore on that and all
+			// child paths (or, soon - https://github.com/sdboyer/gps/issues/88)
+			c.Ignore = append(c.Ignore, path.Join(c.Name, excl, "*"))
+		}
+	}
+
+	// Quitting early on this might seem risky, but a) all possible errs
+	// _should_ have already been surfaced in the earlier DeDupe(), and b) there
+	// are no new errs introduced by the conversion itself, so this doesn't
+	// actually increase the surface area for failures vis-a-vis pre-gps glide.
+	c2.Imports, err = c.Imports.Convert()
+	if err != nil {
+		return nil, err
+	}
+	c2.DevImports, err = c.DevImports.Convert()
+	if err != nil {
+		return nil, err
+	}
+
+	return c2, nil
+}
+
+// DeDupe consolidates duplicate dependencies on a Config instance
+func (c *lConfig1) DeDupe() error {
+	// Remove duplicates in the imports
+	var err error
+	c.Imports, err = c.Imports.DeDupe()
+	if err != nil {
+		return err
+	}
+	c.DevImports, err = c.DevImports.DeDupe()
+	if err != nil {
+		return err
+	}
+
+	// If the name on the config object is part of the imports remove it.
+	found := -1
+	for i, dep := range c.Imports {
+		if dep.Name == c.Name {
+			found = i
+		}
+	}
+	if found >= 0 {
+		c.Imports = append(c.Imports[:found], c.Imports[found+1:]...)
+	}
+
+	found = -1
+	for i, dep := range c.DevImports {
+		if dep.Name == c.Name {
+			found = i
+		}
+	}
+	if found >= 0 {
+		c.DevImports = append(c.DevImports[:found], c.DevImports[found+1:]...)
+	}
+
+	// If something is on the ignore list remove it from the imports.
+	for _, v := range c.Ignore {
+		found = -1
+		for k, d := range c.Imports {
+			if v == d.Name {
+				found = k
+			}
+		}
+		if found >= 0 {
+			c.Imports = append(c.Imports[:found], c.Imports[found+1:]...)
+		}
+
+		found = -1
+		for k, d := range c.DevImports {
+			if v == d.Name {
+				found = k
+			}
+		}
+		if found >= 0 {
+			c.DevImports = append(c.DevImports[:found], c.DevImports[found+1:]...)
+		}
+	}
+
+	return nil
+}
+
+// Legacy representation of a glide.yaml file.
+type lcf1 struct {
+	Name        string         `yaml:"package"`
+	Description string         `yaml:"description,omitempty"`
+	Home        string         `yaml:"homepage,omitempty"`
+	License     string         `yaml:"license,omitempty"`
+	Owners      Owners         `yaml:"owners,omitempty"`
+	Ignore      []string       `yaml:"ignore,omitempty"`
+	Exclude     []string       `yaml:"excludeDirs,omitempty"`
+	Imports     lDependencies1 `yaml:"import"`
+	DevImports  lDependencies1 `yaml:"testImport,omitempty"`
+	// these fields guarantee that this struct fails to unmarshal the new yamls
+	Compat  int `yaml:"dependencies,omitempty"`
+	Compat2 int `yaml:"testDependencies,omitempty"`
+}
+
+type lDependencies1 []*lDependency1
+
+func (ds lDependencies1) Convert() (Dependencies, error) {
+	dds, err := ds.DeDupe()
+	if err != nil {
+		return nil, err
+	}
+
+	var ds2 Dependencies
+	for _, d := range dds {
+		// If we have neither a repo nor a reference, then it's pointless to
+		// include this dep in the list (it will add no information to gps)
+		if d.Repository == "" && d.Reference == "" {
+			continue
+		}
+
+		d2 := &Dependency{
+			Name:       d.Name,
+			Repository: d.Repository,
+			Version:    d.Reference,
+		}
+
+		// TODO(sdboyer) d.Reference doesn't disambiguate between branches and
+		// tags. Check the version list (via source manager) to convert most
+		// sanely?
+		ds2 = append(ds2, d2)
+	}
+
+	return ds2, nil
+}
+
+// DeDupe cleans up duplicates on a list of dependencies.
+func (d lDependencies1) DeDupe() (lDependencies1, error) {
+	checked := map[string]int{}
+	imports := make(lDependencies1, 0, 1)
+	i := 0
+	for _, dep := range d {
+		// The first time we encounter a dependency add it to the list
+		if val, ok := checked[dep.Name]; !ok {
+			checked[dep.Name] = i
+			imports = append(imports, dep)
+			i++
+		} else {
+			// In here we've encountered a dependency for the second time.
+			// Make sure the details are the same or return an error.
+			v := imports[val]
+			if dep.Reference != v.Reference {
+				return d, fmt.Errorf("Import %s repeated with different versions '%s' and '%s'", dep.Name, dep.Reference, v.Reference)
+			}
+			if dep.Repository != v.Repository || dep.VcsType != v.VcsType {
+				return d, fmt.Errorf("Import %s repeated with different Repository details", dep.Name)
+			}
+			if !reflect.DeepEqual(dep.Os, v.Os) || !reflect.DeepEqual(dep.Arch, v.Arch) {
+				return d, fmt.Errorf("Import %s repeated with different OS or Architecture filtering", dep.Name)
+			}
+			imports[checked[dep.Name]].Subpackages = stringArrayDeDupe(v.Subpackages, dep.Subpackages...)
+		}
+	}
+
+	return imports, nil
+}
+
+type lDependency1 struct {
+	Name        string   `yaml:"package"`
+	Reference   string   `yaml:"version,omitempty"`
+	Pin         string   `yaml:"-"`
+	Repository  string   `yaml:"repo,omitempty"`
+	VcsType     string   `yaml:"vcs,omitempty"`
+	Subpackages []string `yaml:"subpackages,omitempty"`
+	Arch        []string `yaml:"arch,omitempty"`
+	Os          []string `yaml:"os,omitempty"`
+}
+
+// Legacy unmarshaler for dependency component of yaml files
+func (d *lDependency1) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	newDep := &ldep1{}
+	err := unmarshal(newDep)
+	if err != nil {
+		return err
+	}
+	d.Name = newDep.Name
+	d.Reference = newDep.Reference
+	d.Repository = newDep.Repository
+	d.VcsType = newDep.VcsType
+	d.Subpackages = newDep.Subpackages
+	d.Arch = newDep.Arch
+	d.Os = newDep.Os
+
+	if d.Reference == "" && newDep.Ref != "" {
+		d.Reference = newDep.Ref
+	}
+
+	// Make sure only legitimate VCS are listed.
+	d.VcsType = filterVcsType(d.VcsType)
+
+	// Get the root name for the package
+	tn, subpkg := util.NormalizeName(d.Name)
+	d.Name = tn
+	if subpkg != "" {
+		d.Subpackages = append(d.Subpackages, subpkg)
+	}
+
+	// Older versions of Glide had a / prefix on subpackages in some cases.
+	// Here that's cleaned up. Someday we should be able to remove this.
+	for k, v := range d.Subpackages {
+		d.Subpackages[k] = strings.TrimPrefix(v, "/")
+	}
+
+	return nil
+}
+
+// Legacy representation of a dep constraint
+type ldep1 struct {
+	Name        string   `yaml:"package"`
+	Reference   string   `yaml:"version,omitempty"`
+	Ref         string   `yaml:"ref,omitempty"`
+	Repository  string   `yaml:"repo,omitempty"`
+	VcsType     string   `yaml:"vcs,omitempty"`
+	Subpackages []string `yaml:"subpackages,omitempty"`
+	Arch        []string `yaml:"arch,omitempty"`
+	Os          []string `yaml:"os,omitempty"`
+}
+
+type lLockfile1 struct {
+	Hash       string    `yaml:"hash"`
+	Updated    time.Time `yaml:"updated"`
+	Imports    lLocks1   `yaml:"imports"`
+	DevImports lLocks1   `yaml:"testImports"` // TODO remove and fold in as prop
+	Compat     int       `yaml:"import,omitempty"`
+	Compat2    int       `yaml:"testImport,omitempty"`
+}
+
+func (l *lLockfile1) Convert() *Lockfile {
+	return &Lockfile{
+		Hash:       l.Hash,
+		Updated:    l.Updated,
+		Imports:    l.Imports.Convert(),
+		DevImports: l.DevImports.Convert(),
+	}
+}
+
+type lLocks1 []*lLock1
+
+func (ll lLocks1) Convert() Locks {
+	var ll2 Locks
+	for _, l := range ll {
+		// If they have no rev, just drop them
+		if l.Version == "" {
+			continue
+		}
+
+		ll2 = append(ll2, &Lock{
+			Name:       l.Name,
+			Repository: l.Repository,
+			Revision:   l.Version,
+		})
+	}
+
+	return ll2
+}
+
+type lLock1 struct {
+	Name        string   `yaml:"name"`
+	Version     string   `yaml:"version"`
+	Repository  string   `yaml:"repo,omitempty"`
+	VcsType     string   `yaml:"vcs,omitempty"`
+	Subpackages []string `yaml:"subpackages,omitempty"`
+	Arch        []string `yaml:"arch,omitempty"`
+	Os          []string `yaml:"os,omitempty"`
+}
diff --git a/cfg/lock.go b/cfg/lock.go
index c03ac41..22362ad 100644
--- a/cfg/lock.go
+++ b/cfg/lock.go
@@ -2,12 +2,15 @@
 
 import (
 	"crypto/sha256"
+	"encoding/hex"
 	"fmt"
 	"io/ioutil"
 	"sort"
 	"strings"
 	"time"
 
+	"github.com/sdboyer/gps"
+
 	"gopkg.in/yaml.v2"
 )
 
@@ -16,18 +19,79 @@
 	Hash       string    `yaml:"hash"`
 	Updated    time.Time `yaml:"updated"`
 	Imports    Locks     `yaml:"imports"`
-	DevImports Locks     `yaml:"testImports"`
+	DevImports Locks     `yaml:"testImports"` // TODO remove and fold in as prop
+}
+
+// LockfileFromSolverLock transforms a gps.Lock into a glide *Lockfile.
+func LockfileFromSolverLock(r gps.Lock) (*Lockfile, error) {
+	if r == nil {
+		return nil, fmt.Errorf("no gps lock data provided to transform")
+	}
+
+	// Create and write out a new lock file from the result
+	lf := &Lockfile{
+		Hash:    hex.EncodeToString(r.InputHash()),
+		Updated: time.Now(),
+	}
+
+	for _, p := range r.Projects() {
+		pi := p.Ident()
+		l := &Lock{
+			Name: string(pi.ProjectRoot),
+		}
+
+		if l.Name != pi.NetworkName && pi.NetworkName != "" {
+			l.Repository = pi.NetworkName
+		}
+
+		v := p.Version()
+		// There's (currently) no way gps can emit a non-paired version in a
+		// solution, so this unchecked type assertion should be safe.
+		//
+		// TODO might still be better to check and return out with an err if
+		// not, though
+		switch tv := v.(type) {
+		case gps.Revision:
+			l.Revision = tv.String()
+		case gps.PairedVersion:
+			l.Revision = v.(gps.PairedVersion).Underlying().String()
+			switch v.Type() {
+			case "branch":
+				l.Branch = v.String()
+			case "semver", "version":
+				l.Version = v.String()
+			}
+		case gps.UnpairedVersion:
+			// this should not be possible - error if we hit it
+			return nil, fmt.Errorf("should not be possible - gps returned an unpaired version for %s", pi)
+		}
+
+		lf.Imports = append(lf.Imports, l)
+	}
+
+	return lf, nil
 }
 
 // LockfileFromYaml returns an instance of Lockfile from YAML
-func LockfileFromYaml(yml []byte) (*Lockfile, error) {
+func LockfileFromYaml(yml []byte) (*Lockfile, bool, error) {
 	lock := &Lockfile{}
-	err := yaml.Unmarshal([]byte(yml), &lock)
-	return lock, err
+	err := yaml.Unmarshal([]byte(yml), lock)
+	if err == nil {
+		return lock, false, nil
+	}
+
+	llock := &lLockfile1{}
+	err2 := yaml.Unmarshal([]byte(yml), llock)
+	if err2 != nil {
+		return nil, false, err2
+	}
+	return llock.Convert(), true, nil
 }
 
-// Marshal converts a Config instance to YAML
+// Marshal converts a Lockfile instance to YAML
 func (lf *Lockfile) Marshal() ([]byte, error) {
+	sort.Sort(lf.Imports)
+	sort.Sort(lf.DevImports)
 	yml, err := yaml.Marshal(&lf)
 	if err != nil {
 		return []byte{}, err
@@ -38,10 +102,6 @@
 // MarshalYAML is a hook for gopkg.in/yaml.v2.
 // It sorts import subpackages lexicographically for reproducibility.
 func (lf *Lockfile) MarshalYAML() (interface{}, error) {
-	for _, imp := range lf.Imports {
-		sort.Strings(imp.Subpackages)
-	}
-
 	// Ensure elements on testImport don't already exist on import.
 	var newDI Locks
 	var found bool
@@ -62,9 +122,6 @@
 	}
 	lf.DevImports = newDI
 
-	for _, imp := range lf.DevImports {
-		sort.Strings(imp.Subpackages)
-	}
 	return lf, nil
 }
 
@@ -80,6 +137,43 @@
 	return ioutil.WriteFile(lockpath, o, 0666)
 }
 
+// InputHash returns the hash of the input arguments that resulted in this lock
+// file.
+func (lf *Lockfile) InputHash() []byte {
+	b, err := hex.DecodeString(lf.Hash)
+	if err != nil {
+		return nil
+	}
+	return b
+}
+
+// Projects returns the list of projects enumerated in the lock file.
+func (lf *Lockfile) Projects() []gps.LockedProject {
+	all := append(lf.Imports, lf.DevImports...)
+	lp := make([]gps.LockedProject, len(all))
+
+	for k, l := range all {
+		r := gps.Revision(l.Revision)
+
+		var v gps.Version
+		if l.Version != "" {
+			v = gps.NewVersion(l.Version).Is(r)
+		} else if l.Branch != "" {
+			v = gps.NewBranch(l.Branch).Is(r)
+		} else {
+			v = r
+		}
+
+		id := gps.ProjectIdentifier{
+			ProjectRoot: gps.ProjectRoot(l.Name),
+			NetworkName: l.Repository,
+		}
+		lp[k] = gps.NewLockedProject(id, v, nil)
+	}
+
+	return lp
+}
+
 // Clone returns a clone of Lockfile
 func (lf *Lockfile) Clone() *Lockfile {
 	n := &Lockfile{}
@@ -93,6 +187,7 @@
 
 // Fingerprint returns a hash of the contents minus the date. This allows for
 // two lockfiles to be compared irrespective of their updated times.
+// TODO remove, or seriously re-adapt
 func (lf *Lockfile) Fingerprint() ([32]byte, error) {
 	c := lf.Clone()
 	c.Updated = time.Time{} // Set the time to be the nil equivalent
@@ -112,7 +207,7 @@
 	if err != nil {
 		return nil, err
 	}
-	lock, err := LockfileFromYaml(yml)
+	lock, _, err := LockfileFromYaml(yml)
 	if err != nil {
 		return nil, err
 	}
@@ -155,42 +250,62 @@
 
 // Lock represents an individual locked dependency.
 type Lock struct {
-	Name        string   `yaml:"name"`
-	Version     string   `yaml:"version"`
-	Repository  string   `yaml:"repo,omitempty"`
-	VcsType     string   `yaml:"vcs,omitempty"`
-	Subpackages []string `yaml:"subpackages,omitempty"`
-	Arch        []string `yaml:"arch,omitempty"`
-	Os          []string `yaml:"os,omitempty"`
+	Name       string `yaml:"name"`
+	Version    string `yaml:"version,omitempty"`
+	Branch     string `yaml:"branch,omitempty"`
+	Revision   string `yaml:"revision"`
+	Repository string `yaml:"repo,omitempty"`
+}
+
+func (l *Lock) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	nl := struct {
+		Name       string `yaml:"name"`
+		Version    string `yaml:"version,omitempty"`
+		Branch     string `yaml:"branch,omitempty"`
+		Revision   string `yaml:"revision"`
+		Repository string `yaml:"repo,omitempty"`
+	}{}
+
+	err := unmarshal(&nl)
+	if err != nil {
+		return err
+	}
+
+	// If Revision field is empty, then we can be certain this is either a
+	// legacy file, or just plain invalid
+	if nl.Revision == "" {
+		return fmt.Errorf("dependency %s is missing a revision; is this a legacy glide.lock file?", nl.Name)
+	}
+
+	l.Name = nl.Name
+	l.Version = nl.Version
+	l.Branch = nl.Branch
+	l.Revision = nl.Revision
+	l.Repository = nl.Repository
+
+	return nil
 }
 
 // Clone creates a clone of a Lock.
 func (l *Lock) Clone() *Lock {
-	return &Lock{
-		Name:        l.Name,
-		Version:     l.Version,
-		Repository:  l.Repository,
-		VcsType:     l.VcsType,
-		Subpackages: l.Subpackages,
-		Arch:        l.Arch,
-		Os:          l.Os,
-	}
+	var l2 Lock
+	l2 = *l
+	return &l2
 }
 
 // LockFromDependency converts a Dependency to a Lock
+// TODO remove
 func LockFromDependency(dep *Dependency) *Lock {
-	return &Lock{
-		Name:        dep.Name,
-		Version:     dep.Pin,
-		Repository:  dep.Repository,
-		VcsType:     dep.VcsType,
-		Subpackages: dep.Subpackages,
-		Arch:        dep.Arch,
-		Os:          dep.Os,
+	l := &Lock{
+		Name:       dep.Name,
+		Repository: dep.Repository,
 	}
+
+	return l
 }
 
 // NewLockfile is used to create an instance of Lockfile.
+// TODO remove
 func NewLockfile(ds, tds Dependencies, hash string) (*Lockfile, error) {
 	lf := &Lockfile{
 		Hash:       hash,
@@ -211,8 +326,8 @@
 		for ii := 0; ii < len(ds); ii++ {
 			if ds[ii].Name == tds[i].Name {
 				found = true
-				if ds[ii].Reference != tds[i].Reference {
-					return &Lockfile{}, fmt.Errorf("Generating lock produced conflicting versions of %s. import (%s), testImport (%s)", tds[i].Name, ds[ii].Reference, tds[i].Reference)
+				if ds[ii].ConstraintsEq(*tds[i]) {
+					return &Lockfile{}, fmt.Errorf("Generating lock produced conflicting versions of %s. import (%s), testImport (%s)", tds[i].Name, ds[ii].GetConstraint(), tds[i].GetConstraint())
 				}
 				break
 			}
@@ -228,6 +343,7 @@
 }
 
 // LockfileFromMap takes a map of dependencies and generates a lock Lockfile instance.
+// TODO remove
 func LockfileFromMap(ds map[string]*Dependency, hash string) *Lockfile {
 	lf := &Lockfile{
 		Hash:    hash,
diff --git a/cfg/lock_test.go b/cfg/lock_test.go
index b6bb31e..855bd62 100644
--- a/cfg/lock_test.go
+++ b/cfg/lock_test.go
@@ -1,112 +1,30 @@
 package cfg
 
-import (
-	"sort"
-	"strings"
-	"testing"
-)
+import "testing"
 
-func TestSortLocks(t *testing.T) {
-	c, err := ConfigFromYaml([]byte(yml))
-	if err != nil {
-		t.Error("ConfigFromYaml failed to parse yaml for TestSortDependencies")
-	}
+const lockFix = `
+imports:
+- name: github.com/gogo/protobuf
+  revision: 82d16f734d6d871204a3feb1a73cb220cc92574c
+`
 
-	ls := make(Locks, len(c.Imports))
-	for i := 0; i < len(c.Imports); i++ {
-		ls[i] = &Lock{
-			Name:    c.Imports[i].Name,
-			Version: c.Imports[i].Reference,
-		}
-	}
-
-	if ls[2].Name != "github.com/Masterminds/structable" {
-		t.Error("Initial dependencies are out of order prior to sort")
-	}
-
-	sort.Sort(ls)
-
-	if ls[0].Name != "github.com/kylelemons/go-gypsy" ||
-		ls[1].Name != "github.com/Masterminds/convert" ||
-		ls[2].Name != "github.com/Masterminds/cookoo" ||
-		ls[3].Name != "github.com/Masterminds/structable" {
-		t.Error("Sorting of dependencies failed")
-	}
-}
-
-const inputSubpkgYaml = `
+const llockFix = `
 imports:
 - name: github.com/gogo/protobuf
   version: 82d16f734d6d871204a3feb1a73cb220cc92574c
-  subpackages:
-  - plugin/equal
-  - sortkeys
-  - plugin/face
-  - plugin/gostring
-  - vanity
-  - plugin/grpc
-  - plugin/marshalto
-  - plugin/populate
-  - plugin/oneofcheck
-  - plugin/size
-  - plugin/stringer
-  - plugin/defaultcheck
-  - plugin/embedcheck
-  - plugin/description
-  - plugin/enumstringer
-  - gogoproto
-  - plugin/testgen
-  - plugin/union
-  - plugin/unmarshal
-  - protoc-gen-gogo/generator
-  - protoc-gen-gogo/plugin
-  - vanity/command
-  - protoc-gen-gogo/descriptor
-  - proto
-`
-const expectSubpkgYaml = `
-imports:
-- name: github.com/gogo/protobuf
-  version: 82d16f734d6d871204a3feb1a73cb220cc92574c
-  subpackages:
-  - gogoproto
-  - plugin/defaultcheck
-  - plugin/description
-  - plugin/embedcheck
-  - plugin/enumstringer
-  - plugin/equal
-  - plugin/face
-  - plugin/gostring
-  - plugin/grpc
-  - plugin/marshalto
-  - plugin/oneofcheck
-  - plugin/populate
-  - plugin/size
-  - plugin/stringer
-  - plugin/testgen
-  - plugin/union
-  - plugin/unmarshal
-  - proto
-  - protoc-gen-gogo/descriptor
-  - protoc-gen-gogo/generator
-  - protoc-gen-gogo/plugin
-  - sortkeys
-  - vanity
-  - vanity/command
 `
 
-func TestSortSubpackages(t *testing.T) {
-	lf, err := LockfileFromYaml([]byte(inputSubpkgYaml))
+func TestLegacyLockAutoconvert(t *testing.T) {
+	ll, legacy, err := LockfileFromYaml([]byte(llockFix))
 	if err != nil {
-		t.Fatal(err)
+		t.Errorf("LockfileFromYaml failed to detect and autoconvert legacy lock file with err %s", err)
 	}
 
-	out, err := lf.Marshal()
-	if err != nil {
-		t.Fatal(err)
+	if !legacy {
+		t.Error("LockfileFromYaml failed to report autoconversion of legacy lock file")
 	}
 
-	if !strings.Contains(string(out), expectSubpkgYaml) {
-		t.Errorf("Expected %q\n to contain\n%q", string(out), expectSubpkgYaml)
+	if len(ll.Imports) != 1 {
+		t.Errorf("LockfileFromYaml autoconverted with wrong number of import stanzas; expected 1, got %v", len(ll.Imports))
 	}
 }
diff --git a/dependency/analyzer.go b/dependency/analyzer.go
new file mode 100644
index 0000000..1372c1d
--- /dev/null
+++ b/dependency/analyzer.go
@@ -0,0 +1,178 @@
+package dependency
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/Masterminds/glide/cfg"
+	"github.com/Masterminds/glide/gb"
+	"github.com/Masterminds/glide/godep"
+	"github.com/Masterminds/glide/gom"
+	"github.com/Masterminds/glide/gpm"
+	gpath "github.com/Masterminds/glide/path"
+	"github.com/Masterminds/semver"
+	"github.com/sdboyer/gps"
+)
+
+type notApplicable struct{}
+
+func (notApplicable) Error() string {
+	return ""
+}
+
+// Analyzer implements gps.ProjectAnalyzer. We inject the Analyzer into a
+// gps.SourceManager, and it reports manifest and lock information to the
+// SourceManager on request.
+type Analyzer struct{}
+
+func (a Analyzer) Info() (name string, version *semver.Version) {
+	name = "glide"
+	version, _ = semver.NewVersion("0.0.1")
+	return
+}
+
+func (a Analyzer) DeriveManifestAndLock(root string, pn gps.ProjectRoot) (gps.Manifest, gps.Lock, error) {
+	// this check should be unnecessary, but keeping it for now as a canary
+	if _, err := os.Lstat(root); err != nil {
+		return nil, nil, fmt.Errorf("No directory exists at %s; cannot produce ProjectInfo", root)
+	}
+
+	m, l, err := a.lookForGlide(root)
+	if err == nil {
+		// TODO verify project name is same as what SourceManager passed in?
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// The happy path of finding a glide manifest and/or lock file failed. Now,
+	// we begin our descent: we must attempt to divine just exactly *which*
+	// circle of hell we're in.
+
+	// Try godep first
+	m, l, err = a.lookForGodep(root)
+	if err == nil {
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// Next, gpm
+	m, l, err = a.lookForGPM(root)
+	if err == nil {
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// Next, gb
+	m, l, err = a.lookForGb(root)
+	if err == nil {
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// Next, gom
+	m, l, err = a.lookForGom(root)
+	if err == nil {
+		return m, l, nil
+	} else if _, ok := err.(notApplicable); !ok {
+		return nil, nil, err
+	}
+
+	// If none of our parsers matched, but none had actual errors, then we just
+	// go hands-off; gps itself will do the source analysis and use the Any
+	// constraint for all discovered package.
+	return nil, nil, nil
+}
+
+func (a Analyzer) lookForGlide(root string) (gps.Manifest, gps.Lock, error) {
+	mpath := filepath.Join(root, gpath.GlideFile)
+	if _, err := os.Lstat(mpath); err != nil {
+		return nil, nil, notApplicable{}
+	}
+	// Manifest found, so from here on, we're locked in - a returned error will
+	// make it back to the SourceManager
+
+	yml, err := ioutil.ReadFile(mpath)
+	if err != nil {
+		return nil, nil, fmt.Errorf("Error while reading glide manifest data: %s", root)
+	}
+
+	// We don't care here if it's legacy
+	m, _, err := cfg.ConfigFromYaml(yml)
+	if err != nil {
+		return nil, nil, fmt.Errorf("Error while parsing glide manifest data: %s", root)
+	}
+
+	// Manifest found, read, and parsed - we're on the happy path. Whether we
+	// find a lock or not, we will produce a valid result back to the
+	// SourceManager.
+	lpath := filepath.Join(root, gpath.LockFile)
+	if _, err := os.Lstat(lpath); err != nil {
+		return m, nil, nil
+	}
+
+	yml, err = ioutil.ReadFile(lpath)
+	if err != nil {
+		return m, nil, nil
+	}
+
+	// Again, legacy doesn't matter here
+	l, _, err := cfg.LockfileFromYaml(yml)
+	if err != nil {
+		return m, nil, nil
+	}
+
+	return m, l, nil
+}
+
+func (a Analyzer) lookForGodep(root string) (gps.Manifest, gps.Lock, error) {
+	if !godep.Has(root) {
+		return nil, nil, notApplicable{}
+	}
+
+	d, l, err := godep.AsMetadataPair(root)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return &cfg.Config{Name: root, Imports: d}, l, nil
+}
+
+func (a Analyzer) lookForGPM(root string) (gps.Manifest, gps.Lock, error) {
+	if !gpm.Has(root) {
+		return nil, nil, notApplicable{}
+	}
+
+	d, l, err := gpm.AsMetadataPair(root)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return &cfg.Config{Name: root, Imports: d}, l, nil
+}
+
+func (a Analyzer) lookForGb(root string) (gps.Manifest, gps.Lock, error) {
+	if !gpm.Has(root) {
+		return nil, nil, notApplicable{}
+	}
+
+	d, l, err := gb.AsMetadataPair(root)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return &cfg.Config{Name: root, Imports: d}, l, nil
+}
+
+func (a Analyzer) lookForGom(root string) (gps.Manifest, gps.Lock, error) {
+	if !gpm.Has(root) {
+		return nil, nil, notApplicable{}
+	}
+
+	return gom.AsMetadataPair(root)
+}
diff --git a/dependency/resolver.go b/dependency/resolver.go
index bf59c3e..7a70415 100644
--- a/dependency/resolver.go
+++ b/dependency/resolver.go
@@ -272,10 +272,10 @@
 		}
 		pt := strings.TrimPrefix(path, r.basedir+string(os.PathSeparator))
 		pt = strings.TrimSuffix(pt, string(os.PathSeparator))
-		if r.Config.HasExclude(pt) {
-			msg.Debug("Excluding %s", pt)
-			return filepath.SkipDir
-		}
+		//if r.Config.HasExclude(pt) {
+		//msg.Debug("Excluding %s", pt)
+		//return filepath.SkipDir
+		//}
 		if !fi.IsDir() {
 			return nil
 		}
@@ -627,7 +627,7 @@
 	// In addition to generating a list
 	for e := queue.Front(); e != nil; e = e.Next() {
 		t := r.Stripv(e.Value.(string))
-		root, sp := util.NormalizeName(t)
+		root, _ := util.NormalizeName(t)
 
 		if root == r.Config.Name {
 			continue
@@ -641,25 +641,19 @@
 
 		// TODO(mattfarina): Need to eventually support devImport
 		existing := r.Config.Imports.Get(root)
-		if existing == nil && addTest {
-			existing = r.Config.DevImports.Get(root)
-		}
-		if existing != nil {
-			if sp != "" && !existing.HasSubpackage(sp) {
-				existing.Subpackages = append(existing.Subpackages, sp)
-			}
-		} else {
-			newDep := &cfg.Dependency{
-				Name: root,
-			}
-			if sp != "" {
-				newDep.Subpackages = []string{sp}
-			}
-
+		if existing == nil {
 			if addTest {
-				r.Config.DevImports = append(r.Config.DevImports, newDep)
+				existing = r.Config.DevImports.Get(root)
 			} else {
-				r.Config.Imports = append(r.Config.Imports, newDep)
+				newDep := &cfg.Dependency{
+					Name: root,
+				}
+
+				if addTest {
+					r.Config.DevImports = append(r.Config.DevImports, newDep)
+				} else {
+					r.Config.Imports = append(r.Config.Imports, newDep)
+				}
 			}
 		}
 		res = append(res, t)
@@ -732,33 +726,26 @@
 	// In addition to generating a list
 	for e := queue.Front(); e != nil; e = e.Next() {
 		t := strings.TrimPrefix(e.Value.(string), r.VendorDir+string(os.PathSeparator))
-		root, sp := util.NormalizeName(t)
+		root, _ := util.NormalizeName(t)
 
 		if root == r.Config.Name {
 			continue
 		}
 
 		existing := r.Config.Imports.Get(root)
-		if existing == nil && addTest {
-			existing = r.Config.DevImports.Get(root)
-		}
-
-		if existing != nil {
-			if sp != "" && !existing.HasSubpackage(sp) {
-				existing.Subpackages = append(existing.Subpackages, sp)
-			}
-		} else {
-			newDep := &cfg.Dependency{
-				Name: root,
-			}
-			if sp != "" {
-				newDep.Subpackages = []string{sp}
-			}
-
+		if existing == nil {
 			if addTest {
-				r.Config.DevImports = append(r.Config.DevImports, newDep)
+				existing = r.Config.DevImports.Get(root)
 			} else {
-				r.Config.Imports = append(r.Config.Imports, newDep)
+				newDep := &cfg.Dependency{
+					Name: root,
+				}
+
+				if addTest {
+					r.Config.DevImports = append(r.Config.DevImports, newDep)
+				} else {
+					r.Config.Imports = append(r.Config.Imports, newDep)
+				}
 			}
 		}
 		res = append(res, e.Value.(string))
@@ -912,20 +899,8 @@
 func sliceToQueue(deps []*cfg.Dependency, basepath string) *list.List {
 	l := list.New()
 	for _, e := range deps {
-		if len(e.Subpackages) > 0 {
-			for _, v := range e.Subpackages {
-				ip := e.Name
-				if v != "." && v != "" {
-					ip = ip + "/" + v
-				}
-				msg.Debug("Adding local Import %s to queue", ip)
-				l.PushBack(filepath.Join(basepath, filepath.FromSlash(ip)))
-			}
-		} else {
-			msg.Debug("Adding local Import %s to queue", e.Name)
-			l.PushBack(filepath.Join(basepath, filepath.FromSlash(e.Name)))
-		}
-
+		msg.Debug("Adding local Import %s to queue", e.Name)
+		l.PushBack(filepath.Join(basepath, filepath.FromSlash(e.Name)))
 	}
 	return l
 }
diff --git a/gb/gb.go b/gb/gb.go
index 7837a42..aa30a81 100644
--- a/gb/gb.go
+++ b/gb/gb.go
@@ -14,8 +14,8 @@
 // Has returns true if this dir has a GB-flavored manifest file.
 func Has(dir string) bool {
 	path := filepath.Join(dir, "vendor/manifest")
-	_, err := os.Stat(path)
-	return err == nil
+	fi, err := os.Stat(path)
+	return err == nil && !fi.IsDir()
 }
 
 // Parse parses a GB-flavored manifest file.
@@ -44,28 +44,55 @@
 	seen := map[string]bool{}
 
 	for _, d := range man.Dependencies {
-		pkg, sub := util.NormalizeName(d.Importpath)
-		if _, ok := seen[pkg]; ok {
-			if len(sub) == 0 {
-				continue
-			}
-			for _, dep := range buf {
-				if dep.Name == pkg {
-					dep.Subpackages = append(dep.Subpackages, sub)
-				}
-			}
-		} else {
+		// TODO(sdboyer) move to the corresponding SourceManager call...though
+		// that matters less once gps caches these results
+		pkg, _ := util.NormalizeName(d.Importpath)
+		if !seen[pkg] {
 			seen[pkg] = true
 			dep := &cfg.Dependency{
 				Name:       pkg,
-				Reference:  d.Revision,
+				Version:    d.Revision,
 				Repository: d.Repository,
 			}
-			if len(sub) > 0 {
-				dep.Subpackages = []string{sub}
-			}
 			buf = append(buf, dep)
 		}
 	}
 	return buf, nil
 }
+
+// AsMetadataPair attempts to extract manifest and lock data from gb metadata.
+func AsMetadataPair(dir string) (m []*cfg.Dependency, l *cfg.Lockfile, err error) {
+	path := filepath.Join(dir, "vendor/manifest")
+	if _, err = os.Stat(path); err != nil {
+		return
+	}
+
+	file, err := os.Open(path)
+	if err != nil {
+		return
+	}
+	defer file.Close()
+
+	man := Manifest{}
+
+	dec := json.NewDecoder(file)
+	if err = dec.Decode(&man); err != nil {
+		return
+	}
+
+	seen := map[string]bool{}
+
+	for _, d := range man.Dependencies {
+		pkg, _ := util.NormalizeName(d.Importpath)
+		if !seen[pkg] {
+			seen[pkg] = true
+			dep := &cfg.Dependency{
+				Name:       pkg,
+				Repository: d.Repository,
+			}
+			m = append(m, dep)
+			l.Imports = append(l.Imports, &cfg.Lock{Name: pkg, Revision: d.Revision})
+		}
+	}
+	return
+}
diff --git a/glide.go b/glide.go
index 79616b1..b7b2260 100644
--- a/glide.go
+++ b/glide.go
@@ -1,21 +1,39 @@
-// Glide is a command line utility that manages Go project dependencies.
+// Glide is a command line utility that manages Go project dependencies and
+// your GOPATH.
 //
-// Configuration of where to start is managed via a glide.yaml in the root of a
-// project. The yaml
+// Dependencies are managed via a glide.yaml in the root of a project. The yaml
+//
+// Params:
+// 	- filename (string): The name of the glide YAML file. Default is glide.yaml.
+// 	- project (string): The name of the project. Default is 'main'.
+// file lets you specify projects, versions (tags, branches, or references),
+// and even alias one location in as other one. Aliasing is useful when supporting
+// forks without needing to rewrite the imports in a codebase.
 //
 // A glide.yaml file looks like:
 //
 //		package: github.com/Masterminds/glide
 //		imports:
-//		- package: github.com/Masterminds/cookoo
-//		- package: github.com/kylelemons/go-gypsy
-//		  subpackages:
-//		  - yaml
+//			- package: github.com/Masterminds/cookoo
+//			  vcs: git
+//			  ref: 1.1.0
+//			  subpackages: **
+//			- package: github.com/kylelemons/go-gypsy
+//			  subpackages: yaml
 //
 // Glide puts dependencies in a vendor directory. Go utilities require this to
-// be in your GOPATH. Glide makes this easy.
+// be in your GOPATH. Glide makes this easy. Use the `glide in` command to enter
+// a shell (your default) with the GOPATH set to the projects vendor directory.
+// To leave this shell simply exit it.
 //
-// For more information use the `glide help` command or see https://glide.sh
+// If your .bashrc, .zshrc, or other startup shell sets your GOPATH you many need
+// to optionally set it using something like:
+//
+//		if [ "" = "${GOPATH}" ]; then
+//		  export GOPATH="/some/dir"
+//		fi
+//
+// For more information use the `glide help` command or see https://github.com/Masterminds/glide
 package main
 
 import (
@@ -34,23 +52,25 @@
 	"os"
 )
 
-var version = "0.12.0-dev"
+var version = "0.11.0-dev"
 
-const usage = `Vendor Package Management for your Go projects.
+const usage = `The lightweight vendor package manager for your Go projects.
 
-   Each project should have a 'glide.yaml' file in the project directory. Files
-   look something like this:
+Each project should have a 'glide.yaml' file in the project directory. Files
+look something like this:
 
-       package: github.com/Masterminds/glide
-       imports:
-       - package: github.com/Masterminds/cookoo
-         version: 1.1.0
-       - package: github.com/kylelemons/go-gypsy
-         subpackages:
-         - yaml
+	package: github.com/Masterminds/glide
+	imports:
+		- package: github.com/Masterminds/cookoo
+		  vcs: git
+		  ref: 1.1.0
+		  subpackages: **
+		- package: github.com/kylelemons/go-gypsy
+		  subpackages: yaml
+			flatten: true
 
-   For more details on the 'glide.yaml' files see the documentation at
-   https://glide.sh/docs/glide.yaml
+NOTE: As of Glide 0.5, the commands 'into', 'gopath', 'status', and 'env'
+no longer exist.
 `
 
 // VendorDir default vendor directory name
@@ -72,6 +92,10 @@
 			Usage: "Quiet (no info or debug messages)",
 		},
 		cli.BoolFlag{
+			Name:  "verbose",
+			Usage: "Print detailed informational messages",
+		},
+		cli.BoolFlag{
 			Name:  "debug",
 			Usage: "Print debug verbose informational messages",
 		},
@@ -81,12 +105,6 @@
 			Usage:  "The location of Glide files",
 			EnvVar: "GLIDE_HOME",
 		},
-		cli.StringFlag{
-			Name:   "tmp",
-			Value:  "",
-			Usage:  "The temp directory to use. Defaults to systems temp",
-			EnvVar: "GLIDE_TMP",
-		},
 		cli.BoolFlag{
 			Name:  "no-color",
 			Usage: "Turn off colored output for log messages",
@@ -137,9 +155,8 @@
 					Usage: "Disable interactive prompts.",
 				},
 			},
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				action.Create(".", c.Bool("skip-import"), c.Bool("non-interactive"))
-				return nil
 			},
 		},
 		{
@@ -149,9 +166,8 @@
 			Description: `Glide will analyze a projects glide.yaml file and the imported
 		projects to find ways the glide.yaml file can potentially be improved. It
 		will then interactively make suggestions that you can skip or accept.`,
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				action.ConfigWizard(".")
-				return nil
 			},
 		},
 		{
@@ -160,129 +176,71 @@
 			Description: `Gets one or more package (like 'go get') and then adds that file
    to the glide.yaml file. Multiple package names can be specified on one line.
 
-       $ glide get github.com/Masterminds/cookoo/web
+   	$ glide get github.com/Masterminds/cookoo/web
 
    The above will install the project github.com/Masterminds/cookoo and add
    the subpackage 'web'.
 
    If a fetched dependency has a glide.yaml file, configuration from Godep,
-   GPM, GOM, or GB Glide that configuration will be used to find the dependencies
+   GPM, or GB Glide that configuration will be used to find the dependencies
    and versions to fetch. If those are not available the dependent packages will
    be fetched as either a version specified elsewhere or the latest version.
 
    When adding a new dependency Glide will perform an update to work out the
-   the versions for the dependencies of this dependency (transitive ones). This
-   will generate an updated glide.lock file with specific locked versions to use.
+   the versions to use from the dependency tree. This will generate an updated
+   glide.lock file with specific locked versions to use.
 
-   The '--strip-vendor' flag will remove any nested 'vendor' folders and
+   If you are storing the outside dependencies in your version control system
+   (VCS), also known as vendoring, there are a few flags that may be useful.
+   The '--update-vendored' flag will cause Glide to update packages when VCS
+   information is unavailable. This can be used with the '--strip-vcs' flag which
+   will strip VCS data found in the vendor directory. This is useful for
+   removing VCS data from transitive dependencies and initial setups. The
+   '--strip-vendor' flag will remove any nested 'vendor' folders and
    'Godeps/_workspace' folders after an update (along with undoing any Godep
    import rewriting). Note, The Godeps specific functionality is deprecated and
    will be removed when most Godeps users have migrated to using the vendor
    folder.`,
 			Flags: []cli.Flag{
-				cli.BoolFlag{
-					Name:  "test",
-					Usage: "Add test dependencies.",
-				},
-				cli.BoolFlag{
-					Name:  "insecure",
-					Usage: "Use http:// rather than https:// to retrieve pacakges.",
-				},
-				cli.BoolFlag{
-					Name:  "no-recursive, quick",
-					Usage: "Disable updating dependencies' dependencies.",
-				},
-				cli.BoolFlag{
-					Name:  "force",
-					Usage: "If there was a change in the repo or VCS switch to new one. Warning, changes will be lost.",
-				},
-				cli.BoolFlag{
-					Name:  "all-dependencies",
-					Usage: "This will resolve all dependencies for all packages, not just those directly used.",
-				},
-				cli.BoolFlag{
-					Name:   "update-vendored, u",
-					Usage:  "Update vendored packages (without local VCS repo). Warning, changes will be lost.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "cache",
-					Usage:  "When downloading dependencies attempt to cache them.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "cache-gopath",
-					Usage:  "When downloading dependencies attempt to put them in the GOPATH, too.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "use-gopath",
-					Usage:  "Copy dependencies from the GOPATH if they exist there.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:  "resolve-current",
-					Usage: "Resolve dependencies for only the current system rather than all build modes.",
-				},
-				cli.BoolFlag{
-					Name:   "strip-vcs, s",
-					Usage:  "Removes version control metadata (e.g, .git directory) from the vendor folder.",
-					Hidden: true,
-				},
+				//cli.BoolFlag{
+				//Name:  "insecure",
+				//Usage: "Use http:// rather than https:// to retrieve packages.",
+				//},
+				//cli.BoolFlag{
+				//Name:  "cache-gopath",
+				//Usage: "When downloading dependencies attempt to put them in the GOPATH, too.",
+				//},
+				//cli.BoolFlag{
+				//Name:  "resolve-current",
+				//Usage: "Resolve dependencies for only the current system rather than all build modes.",
+				//},
 				cli.BoolFlag{
 					Name:  "strip-vendor, v",
-					Usage: "Removes nested vendor and Godeps/_workspace directories.",
+					Usage: "Removes nested vendor and Godeps/_workspace directories. Requires --strip-vcs.",
 				},
 				cli.BoolFlag{
 					Name:  "non-interactive",
 					Usage: "Disable interactive prompts.",
 				},
-				cli.BoolFlag{
-					Name:  "skip-test",
-					Usage: "Resolve dependencies in test files.",
-				},
 			},
-			Action: func(c *cli.Context) error {
-				if c.Bool("delete") {
-					msg.Warn("The --delete flag is deprecated. This now works by default.")
-				}
-				if c.Bool("update-vendored") {
-					msg.Warn("The --update-vendored flag is deprecated. This now works by default.")
-				}
-				if c.String("file") != "" {
-					msg.Warn("The --flag flag is deprecated.")
-				}
-				if c.Bool("cache") {
-					msg.Warn("The --cache flag is deprecated. This now works by default.")
-				}
-				if c.Bool("cache-gopath") {
-					msg.Warn("The --cache-gopath flag is deprecated.")
-				}
-				if c.Bool("use-gopath") {
-					msg.Warn("The --use-gopath flag is deprecated. Please see overrides.")
-				}
-				if c.Bool("strip-vcs") {
-					msg.Warn("The --strip-vcs flag is deprecated. This now works by default.")
-				}
-
+			Action: func(c *cli.Context) {
 				if len(c.Args()) < 1 {
 					fmt.Println("Oops! Package name is required.")
 					os.Exit(1)
 				}
 
-				if c.Bool("resolve-current") {
-					util.ResolveCurrent = true
-					msg.Warn("Only resolving dependencies for the current OS/Arch")
-				}
+				//if c.Bool("resolve-current") {
+				//util.ResolveCurrent = true
+				//msg.Warn("Only resolving dependencies for the current OS/Arch")
+				//}
 
 				inst := repo.NewInstaller()
-				inst.Force = c.Bool("force")
-				inst.ResolveAllFiles = c.Bool("all-dependencies")
-				inst.ResolveTest = !c.Bool("skip-test")
+				inst.Home = gpath.Home()
+				//inst.UseCacheGopath = c.Bool("cache-gopath")
+				//inst.ResolveAllFiles = c.Bool("all-dependencies")
 				packages := []string(c.Args())
-				insecure := c.Bool("insecure")
-				action.Get(packages, inst, insecure, c.Bool("no-recursive"), c.Bool("strip-vendor"), c.Bool("non-interactive"), c.Bool("test"))
-				return nil
+				//insecure := c.Bool("insecure")
+				action.Get(packages, inst, c.Bool("strip-vendor"), c.Bool("non-interactive"))
 			},
 		},
 		{
@@ -290,14 +248,18 @@
 			ShortName: "rm",
 			Usage:     "Remove a package from the glide.yaml file, and regenerate the lock file.",
 			Description: `This takes one or more package names, and removes references from the glide.yaml file.
-   This will rebuild the glide lock file re-resolving the depencies.`,
+   This will rebuild the glide lock file with the following constraints:
+
+   - Dependencies are re-negotiated. Any that are no longer used are left out of the lock.
+   - Minor version re-nogotiation is performed on remaining dependencies.
+   - No updates are peformed. You may want to run 'glide up' to accomplish that.`,
 			Flags: []cli.Flag{
 				cli.BoolFlag{
 					Name:  "delete,d",
 					Usage: "Also delete from vendor/ any packages that are no longer used.",
 				},
 			},
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				if len(c.Args()) < 1 {
 					fmt.Println("Oops! At least one package name is required.")
 					os.Exit(1)
@@ -311,7 +273,6 @@
 				inst.Force = c.Bool("force")
 				packages := []string(c.Args())
 				action.Remove(packages, inst)
-				return nil
 			},
 		},
 		{
@@ -327,9 +288,8 @@
 							Usage: "Save all of the discovered dependencies to a Glide YAML file.",
 						},
 					},
-					Action: func(c *cli.Context) error {
+					Action: func(c *cli.Context) {
 						action.ImportGodep(c.String("file"))
-						return nil
 					},
 				},
 				{
@@ -341,9 +301,8 @@
 							Usage: "Save all of the discovered dependencies to a Glide YAML file.",
 						},
 					},
-					Action: func(c *cli.Context) error {
+					Action: func(c *cli.Context) {
 						action.ImportGPM(c.String("file"))
-						return nil
 					},
 				},
 				{
@@ -355,9 +314,8 @@
 							Usage: "Save all of the discovered dependencies to a Glide YAML file.",
 						},
 					},
-					Action: func(c *cli.Context) error {
+					Action: func(c *cli.Context) {
 						action.ImportGB(c.String("file"))
-						return nil
 					},
 				},
 				{
@@ -369,9 +327,8 @@
 							Usage: "Save all of the discovered dependencies to a Glide YAML file.",
 						},
 					},
-					Action: func(c *cli.Context) error {
+					Action: func(c *cli.Context) {
 						action.ImportGom(c.String("file"))
-						return nil
 					},
 				},
 			},
@@ -380,9 +337,8 @@
 			Name:        "name",
 			Usage:       "Print the name of this project.",
 			Description: `Read the glide.yaml file and print the name given on the 'package' line.`,
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				action.Name()
-				return nil
 			},
 		},
 		{
@@ -404,71 +360,40 @@
 					Usage: "Specify this to prevent nv from append '/...' to all directories.",
 				},
 			},
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				action.NoVendor(c.String("dir"), true, !c.Bool("no-subdir"))
-				return nil
 			},
 		},
 		{
 			Name:  "rebuild",
 			Usage: "Rebuild ('go build') the dependencies",
-			Description: `(Deprecated) This rebuilds the packages' '.a' files. On some systems
+			Description: `This rebuilds the packages' '.a' files. On some systems
 	this can improve performance on subsequent 'go run' and 'go build' calls.`,
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				action.Rebuild()
-				return nil
 			},
 		},
 		{
 			Name:      "install",
 			ShortName: "i",
 			Usage:     "Install a project's dependencies",
-			Description: `This uses the native VCS of each packages to install
-   the appropriate version. There are two ways a projects dependencies can
-   be installed. When there is a glide.yaml file defining the dependencies but
-   no lock file (glide.lock) the dependencies are installed using the "update"
-   command and a glide.lock file is generated pinning all dependencies. If a
-   glide.lock file is already present the dependencies are installed or updated
-   from the lock file.`,
+			Description: `This uses the native VCS of each package to install the appropriate version into
+   the vendor directory adjacent to glide.yaml. Installs are always performed
+   from a lock file, which contains pinned, immutable versions. If no lock file
+   exists, glide will compute one first, then run the install (unless
+   --install-only is passed).`,
 			Flags: []cli.Flag{
 				cli.BoolFlag{
-					Name:   "delete",
-					Usage:  "Delete vendor packages not specified in config.",
-					Hidden: true,
+					Name:  "install-only",
+					Usage: "Install only if a glide.lock file already exists; otherwise, an error is thrown.",
 				},
 				cli.BoolFlag{
-					Name:  "force",
-					Usage: "If there was a change in the repo or VCS switch to new one. Warning: changes will be lost.",
+					Name:  "synced-only",
+					Usage: "Install only if the glide.lock file is in sync with the glide.yaml, otherwise exit with an error. (Implies --install-only)",
 				},
 				cli.BoolFlag{
-					Name:   "update-vendored, u",
-					Usage:  "Update vendored packages (without local VCS repo). Warning: this may destroy local modifications to vendor/.",
-					Hidden: true,
-				},
-				cli.StringFlag{
-					Name:   "file, f",
-					Usage:  "Save all of the discovered dependencies to a Glide YAML file. (DEPRECATED: This has no impact.)",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "cache",
-					Usage:  "When downloading dependencies attempt to cache them.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "cache-gopath",
-					Usage:  "When downloading dependencies attempt to put them in the GOPATH, too.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "use-gopath",
-					Usage:  "Copy dependencies from the GOPATH if they exist there.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "strip-vcs, s",
-					Usage:  "Removes version control metadata (e.g, .git directory) from the vendor folder.",
-					Hidden: true,
+					Name:  "cache-gopath",
+					Usage: "When downloading dependencies attempt to put them in the GOPATH, too.",
 				},
 				cli.BoolFlag{
 					Name:  "strip-vendor, v",
@@ -479,116 +404,69 @@
 					Usage: "Resolve dependencies in test files.",
 				},
 			},
-			Action: func(c *cli.Context) error {
-				if c.Bool("delete") {
-					msg.Warn("The --delete flag is deprecated. This now works by default.")
-				}
-				if c.Bool("update-vendored") {
-					msg.Warn("The --update-vendored flag is deprecated. This now works by default.")
-				}
-				if c.String("file") != "" {
-					msg.Warn("The --flag flag is deprecated.")
-				}
-				if c.Bool("cache") {
-					msg.Warn("The --cache flag is deprecated. This now works by default.")
-				}
-				if c.Bool("cache-gopath") {
-					msg.Warn("The --cache-gopath flag is deprecated.")
-				}
-				if c.Bool("use-gopath") {
-					msg.Warn("The --use-gopath flag is deprecated. Please see overrides.")
-				}
-				if c.Bool("strip-vcs") {
-					msg.Warn("The --strip-vcs flag is deprecated. This now works by default.")
-				}
-
+			Action: func(c *cli.Context) {
 				installer := repo.NewInstaller()
-				installer.Force = c.Bool("force")
-				installer.Home = c.GlobalString("home")
-				installer.ResolveTest = !c.Bool("skip-test")
+				installer.UseCacheGopath = c.Bool("cache-gopath")
+				installer.Home = gpath.Home()
 
-				action.Install(installer, c.Bool("strip-vendor"))
-				return nil
+				action.Install(installer, c.Bool("install-only"), c.Bool("synced-only"), c.Bool("strip-vendor"))
 			},
 		},
 		{
 			Name:      "update",
 			ShortName: "up",
 			Usage:     "Update a project's dependencies",
-			Description: `This updates the dependencies by scanning the codebase
-   to determine the needed dependencies and fetching them following the rules
-   in the glide.yaml file. When no rules exist the tip of the default branch
-   is used. For more details see https://glide.sh/docs/glide.yaml
+			Description: `This uses the native VCS of each package to try to
+   pull the most applicable updates. If no arguments are provided, then glide
+   will attempt to update all dependencies. If package names are provided, then
+   glide will attempt to find a solution where only those packages are changed.
 
    If a dependency has a glide.yaml file, update will read that file and
-   use the information contained there. Those dependencies are maintained in
-   the top level 'vendor/' directory. 'vendor/foo/bar' will have its
+   update those dependencies accordingly. Those dependencies are maintained in
+   a the top level 'vendor/' directory. 'vendor/foo/bar' will have its
    dependencies stored in 'vendor/'. This behavior can be disabled with
    '--no-recursive'. When this behavior is skipped a glide.lock file is not
    generated because the full dependency tree cannot be known.
 
-   Glide will also import Godep, GB, GOM, and GPM files as it finds them in dependencies.
+   Glide will also import Godep, GB, and GPM files as it finds them in dependencies.
    It will create a glide.yaml file from the Godeps data, and then update. This
    has no effect if '--no-recursive' is set.
 
-   The '--strip-vendor' flag will remove any nested 'vendor' folders and
+   If you are storing the outside dependencies in your version control system
+   (VCS), also known as vendoring, there are a few flags that may be useful.
+   The '--update-vendored' flag will cause Glide to update packages when VCS
+   information is unavailable. This can be used with the '--strip-vcs' flag which
+   will strip VCS data found in the vendor directory. This is useful for
+   removing VCS data from transitive dependencies and initial setups. The
+   '--strip-vendor' flag will remove any nested 'vendor' folders and
    'Godeps/_workspace' folders after an update (along with undoing any Godep
-   import rewriting). Note, the Godeps specific functionality is deprecated and
+   import rewriting). Note, The Godeps specific functionality is deprecated and
    will be removed when most Godeps users have migrated to using the vendor
-   folder.`,
+   folder.
+
+   Note, Glide detects vendored dependencies. With the '--update-vendored' flag
+   Glide will update vendored dependencies leaving them in a vendored state.
+   Tertiary dependencies will not be vendored automatically unless the
+   '--strip-vcs' flag is used along with it.
+
+   By default, packages that are discovered are considered transient, and are
+   not stored in the glide.yaml file. The --file=NAME.yaml flag allows you
+   to save the discovered dependencies to a YAML file.
+   `,
 			Flags: []cli.Flag{
 				cli.BoolFlag{
-					Name:   "delete",
-					Usage:  "Delete vendor packages not specified in config.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:  "no-recursive, quick",
-					Usage: "Disable updating dependencies' dependencies. Only update things in glide.yaml.",
-				},
-				cli.BoolFlag{
-					Name:  "force",
-					Usage: "If there was a change in the repo or VCS switch to new one. Warning, changes will be lost.",
-				},
-				cli.BoolFlag{
 					Name:  "all-dependencies",
 					Usage: "This will resolve all dependencies for all packages, not just those directly used.",
 				},
 				cli.BoolFlag{
-					Name:   "update-vendored, u",
-					Usage:  "Update vendored packages (without local VCS repo). Warning, changes will be lost.",
-					Hidden: true,
-				},
-				cli.StringFlag{
-					Name:   "file, f",
-					Usage:  "Save all of the discovered dependencies to a Glide YAML file.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "cache",
-					Usage:  "When downloading dependencies attempt to cache them.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "cache-gopath",
-					Usage:  "When downloading dependencies attempt to put them in the GOPATH, too.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
-					Name:   "use-gopath",
-					Usage:  "Copy dependencies from the GOPATH if they exist there.",
-					Hidden: true,
+					Name:  "cache-gopath",
+					Usage: "When downloading dependencies attempt to put them in the GOPATH, too.",
 				},
 				cli.BoolFlag{
 					Name:  "resolve-current",
 					Usage: "Resolve dependencies for only the current system rather than all build modes.",
 				},
 				cli.BoolFlag{
-					Name:   "strip-vcs, s",
-					Usage:  "Removes version control metadata (e.g, .git directory) from the vendor folder.",
-					Hidden: true,
-				},
-				cli.BoolFlag{
 					Name:  "strip-vendor, v",
 					Usage: "Removes nested vendor and Godeps/_workspace directories.",
 				},
@@ -597,60 +475,31 @@
 					Usage: "Resolve dependencies in test files.",
 				},
 			},
-			Action: func(c *cli.Context) error {
-				if c.Bool("delete") {
-					msg.Warn("The --delete flag is deprecated. This now works by default.")
-				}
-				if c.Bool("update-vendored") {
-					msg.Warn("The --update-vendored flag is deprecated. This now works by default.")
-				}
-				if c.String("file") != "" {
-					msg.Warn("The --flag flag is deprecated.")
-				}
-				if c.Bool("cache") {
-					msg.Warn("The --cache flag is deprecated. This now works by default.")
-				}
-				if c.Bool("cache-gopath") {
-					msg.Warn("The --cache-gopath flag is deprecated.")
-				}
-				if c.Bool("use-gopath") {
-					msg.Warn("The --use-gopath flag is deprecated. Please see overrides.")
-				}
-				if c.Bool("strip-vcs") {
-					msg.Warn("The --strip-vcs flag is deprecated. This now works by default.")
-				}
-
+			Action: func(c *cli.Context) {
 				if c.Bool("resolve-current") {
 					util.ResolveCurrent = true
 					msg.Warn("Only resolving dependencies for the current OS/Arch")
 				}
 
 				installer := repo.NewInstaller()
-				installer.Force = c.Bool("force")
+				installer.UseCacheGopath = c.Bool("cache-gopath")
 				installer.ResolveAllFiles = c.Bool("all-dependencies")
-				installer.Home = c.GlobalString("home")
-				installer.ResolveTest = !c.Bool("skip-test")
+				installer.Home = gpath.Home()
 
-				action.Update(installer, c.Bool("no-recursive"), c.Bool("strip-vendor"))
-
-				return nil
+				action.Update(installer, c.Bool("strip-vendor"), []string(c.Args()))
 			},
 		},
 		{
 			Name:  "tree",
-			Usage: "(Deprecated) Tree prints the dependencies of this project as a tree.",
+			Usage: "Tree prints the dependencies of this project as a tree.",
 			Description: `This scans a project's source files and builds a tree
    representation of the import graph.
 
    It ignores testdata/ and directories that begin with . or _. Packages in
    vendor/ are only included if they are referenced by the main project or
-   one of its dependencies.
-
-   Note, for large projects this can display a large list tens of thousands of
-   lines long.`,
-			Action: func(c *cli.Context) error {
+   one of its dependencies.`,
+			Action: func(c *cli.Context) {
 				action.Tree(".", false)
-				return nil
 			},
 		},
 		{
@@ -663,9 +512,8 @@
 
    Directories that begin with . or _ are ignored, as are testdata directories. Packages in
    vendor are only included if they are used by the project.`,
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				action.List(".", true, c.String("output"))
-				return nil
 			},
 			Flags: []cli.Flag{
 				cli.StringFlag{
@@ -709,114 +557,27 @@
 
        glide info -f "%n - %d - %h - %l"
           prints 'foo - Some example description - https://example.com - MIT'`,
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				if c.IsSet("format") {
 					action.Info(c.String("format"))
 				} else {
 					cli.ShowCommandHelp(c, c.Command.Name)
 				}
-				return nil
 			},
 		},
 		{
 			Name:      "cache-clear",
 			ShortName: "cc",
 			Usage:     "Clears the Glide cache.",
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				action.CacheClear()
-				return nil
 			},
 		},
 		{
 			Name:  "about",
 			Usage: "Learn about Glide",
-			Action: func(c *cli.Context) error {
+			Action: func(c *cli.Context) {
 				action.About()
-				return nil
-			},
-		},
-		{
-			Name:  "mirror",
-			Usage: "Manage mirrors",
-			Description: `Mirrors provide the ability to replace a repo location with
-   another location that's a mirror of the original. This is useful when you want
-   to have a cache for your continuous integration (CI) system or if you want to
-   work on a dependency in a local location.
-
-   The mirrors are stored in an mirrors.yaml file in your GLIDE_HOME.
-
-   The three commands to manager mirrors are 'list', 'set', and 'remove'.
-
-   Use 'set' in the form:
-
-       glide mirror set [original] [replacement]
-
-   or
-
-       glide mirror set [original] [replacement] --vcs [type]
-
-   for example,
-
-       glide mirror set https://github.com/example/foo https://git.example.com/example/foo.git
-
-       glide mirror set https://github.com/example/foo file:///path/to/local/repo --vcs git
-
-   Use 'remove' in the form:
-
-       glide mirror remove [original]
-
-   for example,
-
-       glide mirror remove https://github.com/example/foo`,
-			Subcommands: []cli.Command{
-				{
-					Name:  "list",
-					Usage: "List the current mirrors",
-					Action: func(c *cli.Context) error {
-						return action.MirrorsList()
-					},
-				},
-				{
-					Name:  "set",
-					Usage: "Set a mirror. This overwrites an existing entry if one exists",
-					Description: `Use 'set' in the form:
-
-       glide mirror set [original] [replacement]
-
-   or
-
-       glide mirror set [original] [replacement] --vcs [type]
-
-   for example,
-
-       glide mirror set https://github.com/example/foo https://git.example.com/example/foo.git
-
-       glide mirror set https://github.com/example/foo file:///path/to/local/repo --vcs git`,
-					Flags: []cli.Flag{
-						cli.StringFlag{
-							Name:  "vcs",
-							Usage: "The VCS type to use. Autodiscovery is attempted when not supplied. Can be one of git, svn, bzr, or hg",
-						},
-					},
-					Action: func(c *cli.Context) error {
-						return action.MirrorsSet(c.Args().Get(0), c.Args().Get(1), c.String("vcs"))
-					},
-				},
-				{
-					Name:      "remove",
-					ShortName: "rm",
-					Usage:     "Remove an mirror",
-					Description: `Use 'remove' in the form:
-
-       glide mirror remove [original]
-
-   for example,
-
-       glide mirror remove https://github.com/example/foo`,
-					Action: func(c *cli.Context) error {
-						return action.MirrorsRemove(c.Args().Get(0))
-					},
-				},
 			},
 		},
 	}
@@ -828,11 +589,11 @@
 // so it can be used by any Glide command.
 func startup(c *cli.Context) error {
 	action.Debug(c.Bool("debug"))
+	action.Verbose(c.Bool("verbose"))
 	action.NoColor(c.Bool("no-color"))
 	action.Quiet(c.Bool("quiet"))
 	action.Init(c.String("yaml"), c.String("home"))
 	action.EnsureGoVendor()
-	gpath.Tmp = c.String("tmp")
 	return nil
 }
 
diff --git a/glide.lock b/glide.lock
index 2d3fc9c..fff4efe 100644
--- a/glide.lock
+++ b/glide.lock
@@ -1,12 +1,24 @@
-hash: 67c5571c33bfcb663d32d2b40b9ce1f2a05a3fa2e9f442077277c2782195729c
-updated: 2016-08-11T14:22:17.773372627-04:00
+hash: e12d18f87508f2f53e2981b52a02ed23d135f59ab90f3afca813727c0685eec0
+updated: 2016-09-27T23:50:39.744887915-04:00
 imports:
+- name: github.com/armon/go-radix
+  branch: master
+  revision: 4239b77079c7b5d1243b7b4736304ce8ddb6f0f2
 - name: github.com/codegangsta/cli
-  version: 1efa31f08b9333f1bd4882d61f9d668a70cd902e
+  version: v1.14.0
+  revision: 71f57d300dd6a780ac1856c005c4b518cfd498ec
 - name: github.com/Masterminds/semver
-  version: 8d0431362b544d1a3536cca26684828866a7de09
+  branch: 2.x
+  revision: b3ef6b1808e9889dfb8767ce7068db923a3d07de
 - name: github.com/Masterminds/vcs
-  version: fbe9fb6ad5b5f35b3e82a7c21123cfc526cbf895
+  version: v1.8.0
+  revision: fbe9fb6ad5b5f35b3e82a7c21123cfc526cbf895
+- name: github.com/sdboyer/gps
+  branch: master
+  revision: 44255835bcf52ec1dfacf207dbbb4c1bffe378d0
+- name: github.com/termie/go-shutil
+  revision: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
 - name: gopkg.in/yaml.v2
-  version: e4d366fc3c7938e2958e662b4258c7a89e1f0e3e
+  branch: v2
+  revision: 31c299268d302dd0aa9a0dcf765a3d58971ac83f
 testImports: []
diff --git a/glide.yaml b/glide.yaml
index f293f35..feacb64 100644
--- a/glide.yaml
+++ b/glide.yaml
@@ -8,11 +8,14 @@
 - name: Matt Farina
   email: matt@mattfarina.com
   homepage: https://www.mattfarina.com/
-import:
+dependencies:
 - package: gopkg.in/yaml.v2
+  branch: v2
 - package: github.com/Masterminds/vcs
   version: ^1.8.0
 - package: github.com/codegangsta/cli
   version: ^1.16.0
 - package: github.com/Masterminds/semver
-  version: ^1.1.1
+  branch: 2.x
+- package: github.com/sdboyer/gps
+  branch: master
diff --git a/godep/godep.go b/godep/godep.go
index 71291b3..1a358aa 100644
--- a/godep/godep.go
+++ b/godep/godep.go
@@ -7,7 +7,6 @@
 	"encoding/json"
 	"os"
 	"path/filepath"
-	"strings"
 
 	"github.com/Masterminds/glide/cfg"
 	"github.com/Masterminds/glide/msg"
@@ -75,23 +74,10 @@
 
 	seen := map[string]bool{}
 	for _, d := range godeps.Deps {
-		pkg, sub := util.NormalizeName(d.ImportPath)
-		if _, ok := seen[pkg]; ok {
-			if len(sub) == 0 {
-				continue
-			}
-			// Modify existing dep with additional subpackages.
-			for _, dep := range buf {
-				if dep.Name == pkg {
-					dep.Subpackages = append(dep.Subpackages, sub)
-				}
-			}
-		} else {
+		pkg, _ := util.NormalizeName(d.ImportPath)
+		if !seen[pkg] {
 			seen[pkg] = true
-			dep := &cfg.Dependency{Name: pkg, Reference: d.Rev}
-			if sub != "" {
-				dep.Subpackages = []string{sub}
-			}
+			dep := &cfg.Dependency{Name: pkg, Version: d.Rev}
 			buf = append(buf, dep)
 		}
 	}
@@ -99,28 +85,48 @@
 	return buf, nil
 }
 
-// RemoveGodepSubpackages strips subpackages from a cfg.Config dependencies that
-// contain "Godeps/_workspace/src" as part of the path.
-func RemoveGodepSubpackages(c *cfg.Config) *cfg.Config {
-	for _, d := range c.Imports {
-		n := []string{}
-		for _, v := range d.Subpackages {
-			if !strings.HasPrefix(v, "Godeps/_workspace/src") {
-				n = append(n, v)
-			}
-		}
-		d.Subpackages = n
+func AsMetadataPair(dir string) ([]*cfg.Dependency, *cfg.Lockfile, error) {
+	path := filepath.Join(dir, "Godeps/Godeps.json")
+	if _, err := os.Stat(path); err != nil {
+		return nil, nil, err
 	}
 
-	for _, d := range c.DevImports {
-		n := []string{}
-		for _, v := range d.Subpackages {
-			if !strings.HasPrefix(v, "Godeps/_workspace/src") {
-				n = append(n, v)
-			}
-		}
-		d.Subpackages = n
+	var m []*cfg.Dependency
+	l := &cfg.Lockfile{}
+	godeps := &Godeps{}
+
+	// Get a handle to the file.
+	file, err := os.Open(path)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer file.Close()
+
+	dec := json.NewDecoder(file)
+	if err := dec.Decode(godeps); err != nil {
+		return nil, nil, err
 	}
 
-	return c
+	seen := map[string]bool{}
+	for _, d := range godeps.Deps {
+		pkg, _ := util.NormalizeName(d.ImportPath)
+		if _, ok := seen[pkg]; !ok {
+			seen[pkg] = true
+
+			// Place no real *actual* constraint on the project; instead, we
+			// rely on gps using the 'preferred' version mechanism by
+			// working from the lock file. Without this, users would end up with
+			// the same mind-numbing diamond dep problems as currently exist.
+			// This approach does make for an uncomfortably wide possibility
+			// space where deps aren't getting what they expect, but that's
+			// better than just having the solver give up completely.
+			m = append(m, &cfg.Dependency{Name: pkg})
+			l.Imports = append(l.Imports, &cfg.Lock{Name: pkg, Revision: d.Rev})
+
+			// TODO this fails to differentiate between dev and non-dev imports;
+			// need static analysis for that
+		}
+	}
+
+	return m, l, nil
 }
diff --git a/gom/gom.go b/gom/gom.go
index 51910b0..a33601d 100644
--- a/gom/gom.go
+++ b/gom/gom.go
@@ -9,6 +9,7 @@
 	"github.com/Masterminds/glide/msg"
 	gpath "github.com/Masterminds/glide/path"
 	"github.com/Masterminds/glide/util"
+	"github.com/sdboyer/gps"
 )
 
 // Has returns true if this dir has a Gomfile.
@@ -55,33 +56,21 @@
 			}
 		}
 
-		pkg, sub := util.NormalizeName(gom.name)
+		pkg, _ := util.NormalizeName(gom.name)
 
 		dep := &cfg.Dependency{
 			Name: pkg,
 		}
 
-		if len(sub) > 0 {
-			dep.Subpackages = []string{sub}
-		}
-
 		// Check for a specific revision
 		if val, ok := gom.options["commit"]; ok {
-			dep.Reference = val.(string)
+			dep.Version = val.(string)
 		}
 		if val, ok := gom.options["tag"]; ok {
-			dep.Reference = val.(string)
+			dep.Version = val.(string)
 		}
 		if val, ok := gom.options["branch"]; ok {
-			dep.Reference = val.(string)
-		}
-
-		// Parse goos and goarch
-		if val, ok := gom.options["goos"]; ok {
-			dep.Os = toStringSlice(val)
-		}
-		if val, ok := gom.options["goarch"]; ok {
-			dep.Arch = toStringSlice(val)
+			dep.Branch = val.(string)
 		}
 
 		buf = append(buf, dep)
@@ -90,6 +79,98 @@
 	return buf, nil
 }
 
+// AsMetadataPair attempts to extract manifest and lock data from gom metadata.
+func AsMetadataPair(dir string) (gps.Manifest, gps.Lock, error) {
+	path := filepath.Join(dir, "Gomfile")
+	if _, err := os.Stat(path); err != nil {
+		return nil, nil, err
+	}
+
+	goms, err := parseGomfile(path)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var l gps.SimpleLock
+	m := gps.SimpleManifest{}
+
+	for _, gom := range goms {
+		// Do we need to skip this dependency?
+		if val, ok := gom.options["skipdep"]; ok && val.(string) == "true" {
+			continue
+		}
+
+		// Check for custom cloning command
+		if _, ok := gom.options["command"]; ok {
+			return nil, nil, errors.New("Glide does not support custom Gomfile commands")
+		}
+
+		// Check for groups/environments
+		if val, ok := gom.options["group"]; ok {
+			groups := toStringSlice(val)
+			if !stringsContain(groups, "development") && !stringsContain(groups, "production") {
+				// right now we only support development and production
+				continue
+			}
+		}
+
+		pkg, _ := util.NormalizeName(gom.name)
+
+		dep := gps.ProjectConstraint{
+			Ident: gps.ProjectIdentifier{
+				ProjectRoot: gps.ProjectRoot(pkg),
+			},
+		}
+
+		// Our order of preference for things to put in the manifest are
+		//   - Semver
+		//   - Version
+		//   - Branch
+		//   - Revision
+
+		var v gps.UnpairedVersion
+		if val, ok := gom.options["tag"]; ok {
+			body := val.(string)
+			v = gps.NewVersion(body)
+			c, err := gps.NewSemverConstraint(body)
+			if err != nil {
+				c = gps.NewVersion(body)
+			}
+			dep.Constraint = c
+		} else if val, ok := gom.options["branch"]; ok {
+			body := val.(string)
+			v = gps.NewBranch(body)
+			dep.Constraint = gps.NewBranch(body)
+		}
+
+		id := gps.ProjectIdentifier{
+			ProjectRoot: gps.ProjectRoot(dir),
+		}
+		var version gps.Version
+		if val, ok := gom.options["commit"]; ok {
+			body := val.(string)
+			if v != nil {
+				version = v.Is(gps.Revision(body))
+			} else {
+				// As with the other third-party system integrations, we're
+				// going to choose not to put revisions into a manifest, even
+				// though gom has a lot more information than most and the
+				// argument could be made for it.
+				dep.Constraint = gps.Any()
+				version = gps.Revision(body)
+			}
+		} else if v != nil {
+			// This is kinda uncomfortable - lock w/no immut - but OK
+			version = v
+		}
+		l = append(l, gps.NewLockedProject(id, version, nil))
+
+		// TODO We ignore GOOS, GOARCH for now
+	}
+
+	return m, l, nil
+}
+
 func stringsContain(v []string, key string) bool {
 	for _, s := range v {
 		if s == key {
diff --git a/gpm/gpm.go b/gpm/gpm.go
index e58a81d..55bf6d9 100644
--- a/gpm/gpm.go
+++ b/gpm/gpm.go
@@ -5,6 +5,7 @@
 
 import (
 	"bufio"
+	"fmt"
 	"os"
 	"path/filepath"
 	"strings"
@@ -45,7 +46,7 @@
 		if ok {
 			dep := &cfg.Dependency{Name: parts[0]}
 			if len(parts) > 1 {
-				dep.Reference = parts[1]
+				dep.Version = parts[1]
 			}
 			buf = append(buf, dep)
 		}
@@ -58,6 +59,41 @@
 	return buf, nil
 }
 
+func AsMetadataPair(dir string) ([]*cfg.Dependency, *cfg.Lockfile, error) {
+	path := filepath.Join(dir, "Godeps")
+	if i, err := os.Stat(path); err != nil {
+		return nil, nil, err
+	} else if i.IsDir() {
+		return nil, nil, fmt.Errorf("Found a Godeps dir, rather than it being a file")
+	}
+
+	var m []*cfg.Dependency
+	l := &cfg.Lockfile{}
+
+	file, err := os.Open(path)
+	if err != nil {
+		return nil, nil, err
+	}
+	scanner := bufio.NewScanner(file)
+	for scanner.Scan() {
+		parts, ok := parseGodepsLine(scanner.Text())
+		if ok {
+			// Place no actual constraint on the project; rely instead on
+			// gps's 'preferred version' reasoning from deps' lock
+			// files...if we have one at all.
+			if len(parts) > 1 {
+				l.Imports = append(l.Imports, &cfg.Lock{Name: parts[0], Version: parts[1]})
+			}
+			m = append(m, &cfg.Dependency{Name: parts[0]})
+		}
+	}
+	if err := scanner.Err(); err != nil {
+		return nil, nil, err
+	}
+
+	return m, l, nil
+}
+
 func parseGodepsLine(line string) ([]string, bool) {
 	line = strings.TrimSpace(line)
 
diff --git a/importer/importer.go b/importer/importer.go
index 9d5d2ac..9da292b 100644
--- a/importer/importer.go
+++ b/importer/importer.go
@@ -44,7 +44,7 @@
 		if err != nil {
 			return false, []*cfg.Dependency{}, err
 		}
-		conf, err := cfg.ConfigFromYaml(yml)
+		conf, _, err := cfg.ConfigFromYaml(yml)
 		if err != nil {
 			return false, []*cfg.Dependency{}, err
 		}
diff --git a/repo/installer.go b/repo/installer.go
index 8f02cc3..8a7e97c 100644
--- a/repo/installer.go
+++ b/repo/installer.go
@@ -2,13 +2,10 @@
 
 import (
 	"fmt"
-	"io/ioutil"
 	"os"
 	"path/filepath"
-	"runtime"
 	"strings"
 	"sync"
-	"syscall"
 	"time"
 
 	"github.com/Masterminds/glide/cache"
@@ -35,6 +32,20 @@
 	// Vendor contains the path to put the vendor packages
 	Vendor string
 
+	// Use a cache
+	UseCache bool
+	// Use Gopath to cache
+	UseCacheGopath bool
+	// Use Gopath as a source to read from
+	UseGopath bool
+
+	// UpdateVendored instructs the environment to update in a way that is friendly
+	// to packages that have been "vendored in" (e.g. are copies of source, not repos)
+	UpdateVendored bool
+
+	// DeleteUnused deletes packages that are unused, but found in the vendor dir.
+	DeleteUnused bool
+
 	// ResolveAllFiles enables a resolver that will examine the dependencies
 	// of every file of every package, rather than only following imported
 	// packages.
@@ -71,6 +82,11 @@
 // Install installs the dependencies from a Lockfile.
 func (i *Installer) Install(lock *cfg.Lockfile, conf *cfg.Config) (*cfg.Config, error) {
 
+	cwd, err := gpath.Vendor()
+	if err != nil {
+		return conf, err
+	}
+
 	// Create a config setup based on the Lockfile data to process with
 	// existing commands.
 	newConf := &cfg.Config{}
@@ -95,13 +111,9 @@
 
 	msg.Info("Downloading dependencies. Please wait...")
 
-	err := LazyConcurrentUpdate(newConf.Imports, i, newConf)
-	if err != nil {
-		return newConf, err
-	}
-	err = LazyConcurrentUpdate(newConf.DevImports, i, newConf)
-
-	return newConf, err
+	LazyConcurrentUpdate(newConf.Imports, cwd, i, newConf)
+	LazyConcurrentUpdate(newConf.DevImports, cwd, i, newConf)
+	return newConf, nil
 }
 
 // Checkout reads the config file and checks out all dependencies mentioned there.
@@ -110,14 +122,16 @@
 // vendor directory based on changed config.
 func (i *Installer) Checkout(conf *cfg.Config) error {
 
+	dest := i.VendorPath()
+
 	msg.Info("Downloading dependencies. Please wait...")
 
-	if err := ConcurrentUpdate(conf.Imports, i, conf); err != nil {
+	if err := ConcurrentUpdate(conf.Imports, dest, i, conf); err != nil {
 		return err
 	}
 
 	if i.ResolveTest {
-		return ConcurrentUpdate(conf.DevImports, i, conf)
+		return ConcurrentUpdate(conf.DevImports, dest, i, conf)
 	}
 
 	return nil
@@ -132,22 +146,30 @@
 // In other words, all versions in the Lockfile will be empty.
 func (i *Installer) Update(conf *cfg.Config) error {
 	base := "."
+	vpath := i.VendorPath()
 
 	ic := newImportCache()
 
 	m := &MissingPackageHandler{
-		home:    i.Home,
-		force:   i.Force,
-		Config:  conf,
-		Use:     ic,
-		updated: i.Updated,
+		destination: vpath,
+
+		cache:          i.UseCache,
+		cacheGopath:    i.UseCacheGopath,
+		useGopath:      i.UseGopath,
+		home:           i.Home,
+		force:          i.Force,
+		updateVendored: i.UpdateVendored,
+		Config:         conf,
+		Use:            ic,
+		updated:        i.Updated,
 	}
 
 	v := &VersionHandler{
-		Use:       ic,
-		Imported:  make(map[string]bool),
-		Conflicts: make(map[string]bool),
-		Config:    conf,
+		Destination: vpath,
+		Use:         ic,
+		Imported:    make(map[string]bool),
+		Conflicts:   make(map[string]bool),
+		Config:      conf,
 	}
 
 	// Update imports
@@ -173,19 +195,13 @@
 		if conf.HasIgnore(n) {
 			continue
 		}
-		rt, sub := util.NormalizeName(n)
-		if sub == "" {
-			sub = "."
-		}
+		rt, _ := util.NormalizeName(n)
 		d := deps.Get(rt)
 		if d == nil {
 			nd := &cfg.Dependency{
-				Name:        rt,
-				Subpackages: []string{sub},
+				Name: rt,
 			}
 			deps = append(deps, nd)
-		} else if !d.HasSubpackage(sub) {
-			d.Subpackages = append(d.Subpackages, sub)
 		}
 	}
 	if i.ResolveTest {
@@ -194,22 +210,16 @@
 			if conf.HasIgnore(n) {
 				continue
 			}
-			rt, sub := util.NormalizeName(n)
-			if sub == "" {
-				sub = "."
-			}
+			rt, _ := util.NormalizeName(n)
 			d := deps.Get(rt)
 			if d == nil {
 				d = tdeps.Get(rt)
 			}
 			if d == nil {
 				nd := &cfg.Dependency{
-					Name:        rt,
-					Subpackages: []string{sub},
+					Name: rt,
 				}
 				tdeps = append(tdeps, nd)
-			} else if !d.HasSubpackage(sub) {
-				d.Subpackages = append(d.Subpackages, sub)
 			}
 		}
 	}
@@ -229,13 +239,13 @@
 
 	msg.Info("Downloading dependencies. Please wait...")
 
-	err = ConcurrentUpdate(conf.Imports, i, conf)
+	err = ConcurrentUpdate(conf.Imports, vpath, i, conf)
 	if err != nil {
 		return err
 	}
 
 	if i.ResolveTest {
-		err = ConcurrentUpdate(conf.DevImports, i, conf)
+		err = ConcurrentUpdate(conf.DevImports, vpath, i, conf)
 		if err != nil {
 			return err
 		}
@@ -244,163 +254,19 @@
 	return nil
 }
 
-// Export from the cache to the vendor directory
-func (i *Installer) Export(conf *cfg.Config) error {
-	tempDir, err := ioutil.TempDir(gpath.Tmp, "glide-vendor")
-	if err != nil {
-		return err
-	}
-	defer func() {
-		err = os.RemoveAll(tempDir)
-		if err != nil {
-			msg.Err(err.Error())
-		}
-	}()
-
-	vp := filepath.Join(tempDir, "vendor")
-	err = os.MkdirAll(vp, 0755)
-
-	msg.Info("Exporting resolved dependencies...")
-	done := make(chan struct{}, concurrentWorkers)
-	in := make(chan *cfg.Dependency, concurrentWorkers)
-	var wg sync.WaitGroup
-	var lock sync.Mutex
-	var returnErr error
-
-	for ii := 0; ii < concurrentWorkers; ii++ {
-		go func(ch <-chan *cfg.Dependency) {
-			for {
-				select {
-				case dep := <-ch:
-					loc := dep.Remote()
-					key, err := cache.Key(loc)
-					if err != nil {
-						msg.Die(err.Error())
-					}
-					cache.Lock(key)
-
-					cdir := filepath.Join(cache.Location(), "src", key)
-					repo, err := dep.GetRepo(cdir)
-					if err != nil {
-						msg.Die(err.Error())
-					}
-					msg.Info("--> Exporting %s", dep.Name)
-					if err := repo.ExportDir(filepath.Join(vp, filepath.ToSlash(dep.Name))); err != nil {
-						msg.Err("Export failed for %s: %s\n", dep.Name, err)
-						// Capture the error while making sure the concurrent
-						// operations don't step on each other.
-						lock.Lock()
-						if returnErr == nil {
-							returnErr = err
-						} else {
-							returnErr = cli.NewMultiError(returnErr, err)
-						}
-						lock.Unlock()
-					}
-					cache.Unlock(key)
-					wg.Done()
-				case <-done:
-					return
-				}
-			}
-		}(in)
-	}
-
-	for _, dep := range conf.Imports {
-		if !conf.HasIgnore(dep.Name) {
-			err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755)
-			if err != nil {
-				lock.Lock()
-				if returnErr == nil {
-					returnErr = err
-				} else {
-					returnErr = cli.NewMultiError(returnErr, err)
-				}
-				lock.Unlock()
-			}
-			wg.Add(1)
-			in <- dep
-		}
-	}
-
-	if i.ResolveTest {
-		for _, dep := range conf.DevImports {
-			if !conf.HasIgnore(dep.Name) {
-				err = os.MkdirAll(filepath.Join(vp, filepath.ToSlash(dep.Name)), 0755)
-				if err != nil {
-					lock.Lock()
-					if returnErr == nil {
-						returnErr = err
-					} else {
-						returnErr = cli.NewMultiError(returnErr, err)
-					}
-					lock.Unlock()
-				}
-				wg.Add(1)
-				in <- dep
-			}
-		}
-	}
-
-	wg.Wait()
-
-	// Close goroutines setting the version
-	for ii := 0; ii < concurrentWorkers; ii++ {
-		done <- struct{}{}
-	}
-
-	if returnErr != nil {
-		return returnErr
-	}
-
-	msg.Info("Replacing existing vendor dependencies")
-	err = os.RemoveAll(i.VendorPath())
-	if err != nil {
-		return err
-	}
-
-	err = os.Rename(vp, i.VendorPath())
-
-	if err != nil {
-		// When there are different physical devices we cannot rename cross device.
-		// Instead we copy.
-		switch terr := err.(type) {
-		case *os.LinkError:
-			// syscall.EXDEV is the common name for the cross device link error
-			// which has varying output text across different operating systems.
-			if terr.Err == syscall.EXDEV {
-				msg.Debug("Cross link err, trying manual copy: %s", err)
-				return gpath.CopyDir(vp, i.VendorPath())
-			} else if runtime.GOOS == "windows" {
-				// In windows it can drop down to an operating system call that
-				// returns an operating system error with a different number and
-				// message. Checking for that as a fall back.
-				noerr, ok := terr.Err.(syscall.Errno)
-				// 0x11 (ERROR_NOT_SAME_DEVICE) is the windows error.
-				// See https://msdn.microsoft.com/en-us/library/cc231199.aspx
-				if ok && noerr == 0x11 {
-					msg.Debug("Cross link err on Windows, trying manual copy: %s", err)
-					return gpath.CopyDir(vp, i.VendorPath())
-				}
-			}
-		}
-	}
-
-	return err
-
-}
-
 // List resolves the complete dependency tree and returns a list of dependencies.
 func (i *Installer) List(conf *cfg.Config) []*cfg.Dependency {
 	base := "."
+	vpath := i.VendorPath()
 
 	ic := newImportCache()
 
 	v := &VersionHandler{
-		Use:       ic,
-		Imported:  make(map[string]bool),
-		Conflicts: make(map[string]bool),
-		Config:    conf,
+		Destination: vpath,
+		Use:         ic,
+		Imported:    make(map[string]bool),
+		Conflicts:   make(map[string]bool),
+		Config:      conf,
 	}
 
 	// Update imports
@@ -433,17 +299,11 @@
 // LazyConcurrentUpdate updates only deps that are not already checkout out at the right version.
 //
 // This is only safe when updating from a lock file.
-func LazyConcurrentUpdate(deps []*cfg.Dependency, i *Installer, c *cfg.Config) error {
+func LazyConcurrentUpdate(deps []*cfg.Dependency, cwd string, i *Installer, c *cfg.Config) error {
 
 	newDeps := []*cfg.Dependency{}
 	for _, dep := range deps {
-
-		key, err := cache.Key(dep.Remote())
-		if err != nil {
-			newDeps = append(newDeps, dep)
-			continue
-		}
-		destPath := filepath.Join(cache.Location(), "src", key)
+		destPath := filepath.Join(i.VendorPath(), dep.Name)
 
 		// Get a VCS object for this directory
 		repo, err := dep.GetRepo(destPath)
@@ -457,26 +317,27 @@
 			newDeps = append(newDeps, dep)
 			continue
 		}
-		if dep.Reference != "" {
-			ci, err := repo.CommitInfo(dep.Reference)
-			if err == nil && ci.Commit == dep.Reference {
-				msg.Info("--> Found desired version locally %s %s!", dep.Name, dep.Reference)
-				continue
-			}
+
+		// TODO(sdboyer) it wants a rev only here, but the lock conversion to a
+		// dep will prefer to put in the version, not the rev. ...fortunately,
+		// this whole func should be removed soon(?)
+		if ver == dep.Version {
+			msg.Info("--> Found desired version %s %s!", dep.Name, dep.GetConstraint())
+			continue
 		}
 
-		msg.Debug("--> Queue %s for update (%s != %s).", dep.Name, ver, dep.Reference)
+		msg.Debug("--> Queue %s for update (%s != %s).", dep.Name, ver, dep.GetConstraint())
 		newDeps = append(newDeps, dep)
 	}
 	if len(newDeps) > 0 {
-		return ConcurrentUpdate(newDeps, i, c)
+		return ConcurrentUpdate(newDeps, cwd, i, c)
 	}
 
 	return nil
 }
 
 // ConcurrentUpdate takes a list of dependencies and updates in parallel.
-func ConcurrentUpdate(deps []*cfg.Dependency, i *Installer, c *cfg.Config) error {
+func ConcurrentUpdate(deps []*cfg.Dependency, cwd string, i *Installer, c *cfg.Config) error {
 	done := make(chan struct{}, concurrentWorkers)
 	in := make(chan *cfg.Dependency, concurrentWorkers)
 	var wg sync.WaitGroup
@@ -488,13 +349,19 @@
 			for {
 				select {
 				case dep := <-ch:
-					loc := dep.Remote()
+					var loc string
+					if dep.Repository != "" {
+						loc = dep.Repository
+					} else {
+						loc = "https://" + dep.Name
+					}
 					key, err := cache.Key(loc)
 					if err != nil {
 						msg.Die(err.Error())
 					}
 					cache.Lock(key)
-					if err := VcsUpdate(dep, i.Force, i.Updated); err != nil {
+					dest := filepath.Join(i.VendorPath(), dep.Name)
+					if err := VcsUpdate(dep, dest, i.Home, i.UseCache, i.UseCacheGopath, i.UseGopath, i.Force, i.UpdateVendored, i.Updated); err != nil {
 						msg.Err("Update failed for %s: %s\n", dep.Name, err)
 						// Capture the error while making sure the concurrent
 						// operations don't step on each other.
@@ -560,82 +427,122 @@
 //
 // When a package is found on the GOPATH, this notifies the user.
 type MissingPackageHandler struct {
-	home    string
-	force   bool
-	Config  *cfg.Config
-	Use     *importCache
-	updated *UpdateTracker
+	destination                                          string
+	home                                                 string
+	cache, cacheGopath, useGopath, force, updateVendored bool
+	Config                                               *cfg.Config
+	Use                                                  *importCache
+	updated                                              *UpdateTracker
 }
 
-// NotFound attempts to retrieve a package when not found in the local cache
+// NotFound attempts to retrieve a package when not found in the local vendor/
 // folder. It will attempt to get it from the remote location info.
 func (m *MissingPackageHandler) NotFound(pkg string, addTest bool) (bool, error) {
-	err := m.fetchToCache(pkg, addTest)
-	if err != nil {
-		return false, err
+	root := util.GetRootFromPackage(pkg)
+	// Skip any references to the root package.
+	if root == m.Config.Name {
+		return false, nil
 	}
 
-	return true, err
+	dest := filepath.Join(m.destination, root)
+
+	// This package may have been placed on the list to look for when it wasn't
+	// downloaded but it has since been downloaded before coming to this entry.
+	if _, err := os.Stat(dest); err == nil {
+		// Make sure the location contains files. It may be an empty directory.
+		empty, err := gpath.IsDirectoryEmpty(dest)
+		if err != nil {
+			return false, err
+		}
+		if empty {
+			msg.Warn("%s is an existing location with no files. Fetching a new copy of the dependency.", dest)
+			msg.Debug("Removing empty directory %s", dest)
+			err := os.RemoveAll(dest)
+			if err != nil {
+				msg.Debug("Installer error removing directory %s: %s", dest, err)
+				return false, err
+			}
+		} else {
+			msg.Debug("Found %s", dest)
+			return true, nil
+		}
+	}
+
+	msg.Info("Fetching %s into %s", pkg, m.destination)
+
+	d := m.Config.Imports.Get(root)
+	if d == nil && addTest {
+		d = m.Config.DevImports.Get(root)
+	}
+
+	// If the dependency is nil it means the Config doesn't yet know about it.
+	if d == nil {
+		d, _ = m.Use.Get(root)
+		// We don't know about this dependency so we create a basic instance.
+		if d == nil {
+			d = &cfg.Dependency{Name: root}
+		}
+		if addTest {
+			m.Config.DevImports = append(m.Config.DevImports, d)
+		} else {
+			m.Config.Imports = append(m.Config.Imports, d)
+		}
+	}
+	if err := VcsGet(d, dest, m.home, m.cache, m.cacheGopath, m.useGopath); err != nil {
+		return false, err
+	}
+	return true, nil
 }
 
 // OnGopath will either copy a package, already found in the GOPATH, to the
 // vendor/ directory or download it from the internet. This is dependent if
 // useGopath on the installer is set to true to copy from the GOPATH.
 func (m *MissingPackageHandler) OnGopath(pkg string, addTest bool) (bool, error) {
-
-	err := m.fetchToCache(pkg, addTest)
-	if err != nil {
-		return false, err
+	// If useGopath is false, we fall back to the strategy of fetching from
+	// remote.
+	if !m.useGopath {
+		return m.NotFound(pkg, addTest)
 	}
 
-	return true, err
+	root := util.GetRootFromPackage(pkg)
+
+	// Skip any references to the root package.
+	if root == m.Config.Name {
+		return false, nil
+	}
+
+	msg.Info("Copying package %s from the GOPATH.", pkg)
+	dest := filepath.Join(m.destination, pkg)
+	// Find package on Gopath
+	for _, gp := range gpath.Gopaths() {
+		src := filepath.Join(gp, pkg)
+		// FIXME: Should probably check if src is a dir or symlink.
+		if _, err := os.Stat(src); err == nil {
+			if err := os.MkdirAll(dest, os.ModeDir|0755); err != nil {
+				return false, err
+			}
+			if err := gpath.CopyDir(src, dest); err != nil {
+				return false, err
+			}
+			return true, nil
+		}
+	}
+
+	msg.Err("Could not locate %s on the GOPATH, though it was found before.", pkg)
+	return false, nil
 }
 
 // InVendor updates a package in the vendor/ directory to make sure the latest
 // is available.
 func (m *MissingPackageHandler) InVendor(pkg string, addTest bool) error {
-	return m.fetchToCache(pkg, addTest)
-}
-
-// PkgPath resolves the location on the filesystem where the package should be.
-// This handles making sure to use the cache location.
-func (m *MissingPackageHandler) PkgPath(pkg string) string {
-	root, sub := util.NormalizeName(pkg)
-
-	// For the parent applications source skip the cache.
-	if root == m.Config.Name {
-		pth := gpath.Basepath()
-		return filepath.Join(pth, filepath.FromSlash(sub))
-	}
-
-	d := m.Config.Imports.Get(root)
-	if d == nil {
-		d = m.Config.DevImports.Get(root)
-	}
-
-	if d == nil {
-		d, _ = m.Use.Get(root)
-
-		if d == nil {
-			d = &cfg.Dependency{Name: root}
-		}
-	}
-
-	key, err := cache.Key(d.Remote())
-	if err != nil {
-		msg.Die("Error generating cache key for %s", d.Name)
-	}
-
-	return filepath.Join(cache.Location(), "src", key, filepath.FromSlash(sub))
-}
-
-func (m *MissingPackageHandler) fetchToCache(pkg string, addTest bool) error {
 	root := util.GetRootFromPackage(pkg)
 	// Skip any references to the root package.
 	if root == m.Config.Name {
 		return nil
 	}
 
+	dest := filepath.Join(m.destination, root)
+
 	d := m.Config.Imports.Get(root)
 	if d == nil && addTest {
 		d = m.Config.DevImports.Get(root)
@@ -656,7 +563,15 @@
 		}
 	}
 
-	return VcsUpdate(d, m.force, m.updated)
+	if err := VcsUpdate(d, dest, m.home, m.cache, m.cacheGopath, m.useGopath, m.force, m.updateVendored, m.updated); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *MissingPackageHandler) PkgPath(pkg string) string {
+	return pkg
 }
 
 // VersionHandler handles setting the proper version in the VCS.
@@ -669,6 +584,9 @@
 	// Cache if importing scan has already occurred here.
 	Imported map[string]bool
 
+	// Where the packages exist to set the version on.
+	Destination string
+
 	Config *cfg.Config
 
 	// There's a problem where many sub-packages have been asked to set a version
@@ -691,14 +609,14 @@
 	// Should we look in places other than the root of the project?
 	if d.Imported[root] == false {
 		d.Imported[root] = true
-		p := d.pkgPath(root)
+		p := filepath.Join(d.Destination, root)
 		f, deps, err := importer.Import(p)
 		if f && err == nil {
 			for _, dep := range deps {
 
 				// The fist one wins. Would something smater than this be better?
 				exists, _ := d.Use.Get(dep.Name)
-				if exists == nil && (dep.Reference != "" || dep.Repository != "") {
+				if exists == nil && (dep.IsUnconstrained() || dep.Repository != "") {
 					d.Use.Add(dep.Name, dep, root)
 				}
 			}
@@ -733,8 +651,8 @@
 			// There are import chains (because the import tree is resolved
 			// before the test tree) that can cause this.
 			tempD := d.Config.DevImports.Get(root)
-			if tempD.Reference != v.Reference {
-				msg.Warn("Using import %s (version %s) for test instead of testImport (version %s).", v.Name, v.Reference, tempD.Reference)
+			if !tempD.ConstraintsEq(*v) {
+				msg.Warn("Using import %s (version %s) for test instead of testImport (version %s).", v.Name, v.GetConstraint(), tempD.GetConstraint())
 			}
 			// TODO(mattfarina): Note repo difference in a warning.
 		}
@@ -742,13 +660,14 @@
 
 	dep, req := d.Use.Get(root)
 	if dep != nil && v != nil {
-		if v.Reference == "" && dep.Reference != "" {
-			v.Reference = dep.Reference
+		if v.IsUnconstrained() && !dep.IsUnconstrained() {
+			v.Version = dep.Version
+			v.Branch = dep.Branch
 			// Clear the pin, if set, so the new version can be used.
-			v.Pin = ""
+			//v.Pin = ""
 			dep = v
-		} else if v.Reference != "" && dep.Reference != "" && v.Reference != dep.Reference {
-			dest := d.pkgPath(pkg)
+		} else if !dep.ConstraintsEq(*v) && !dep.IsUnconstrained() && !v.IsUnconstrained() { // constraints are not eq and non-empty
+			dest := filepath.Join(d.Destination, filepath.FromSlash(v.Name))
 			dep = determineDependency(v, dep, dest, req)
 		} else {
 			dep = v
@@ -766,13 +685,10 @@
 		}
 	} else {
 		// If we've gotten here we don't have any depenency objects.
-		r, sp := util.NormalizeName(pkg)
+		r, _ := util.NormalizeName(pkg)
 		dep = &cfg.Dependency{
 			Name: r,
 		}
-		if sp != "" {
-			dep.Subpackages = []string{sp}
-		}
 		if addTest {
 			d.Config.DevImports = append(d.Config.DevImports, dep)
 		} else {
@@ -780,153 +696,127 @@
 		}
 	}
 
-	err := VcsVersion(dep)
+	err := VcsVersion(dep, d.Destination)
 	if err != nil {
-		msg.Warn("Unable to set version on %s to %s. Err: %s", root, dep.Reference, err)
+		msg.Warn("Unable to set version on %s to %s. Err: %s", root, dep.GetConstraint(), err)
 		e = err
 	}
 
 	return
 }
 
-func (d *VersionHandler) pkgPath(pkg string) string {
-	root, sub := util.NormalizeName(pkg)
-
-	// For the parent applications source skip the cache.
-	if root == d.Config.Name {
-		pth := gpath.Basepath()
-		return filepath.Join(pth, filepath.FromSlash(sub))
-	}
-
-	dep := d.Config.Imports.Get(root)
-	if dep == nil {
-		dep = d.Config.DevImports.Get(root)
-	}
-
-	if dep == nil {
-		dep, _ = d.Use.Get(root)
-
-		if dep == nil {
-			dep = &cfg.Dependency{Name: root}
-		}
-	}
-
-	key, err := cache.Key(dep.Remote())
-	if err != nil {
-		msg.Die("Error generating cache key for %s", dep.Name)
-	}
-
-	return filepath.Join(cache.Location(), "src", key, filepath.FromSlash(sub))
-}
-
 func determineDependency(v, dep *cfg.Dependency, dest, req string) *cfg.Dependency {
 	repo, err := v.GetRepo(dest)
 	if err != nil {
 		singleWarn("Unable to access repo for %s\n", v.Name)
-		singleInfo("Keeping %s %s", v.Name, v.Reference)
+		singleInfo("Keeping %s %s", v.Name, v.GetConstraint())
 		return v
 	}
 
-	vIsRef := repo.IsReference(v.Reference)
-	depIsRef := repo.IsReference(dep.Reference)
+	dc, vc := v.GetConstraint(), dep.GetConstraint()
+	vIsRef := repo.IsReference(vc.String())
+	depIsRef := repo.IsReference(dc.String())
 
 	// Both are references and they are different ones.
 	if vIsRef && depIsRef {
-		singleWarn("Conflict: %s rev is currently %s, but %s wants %s\n", v.Name, v.Reference, req, dep.Reference)
+		singleWarn("Conflict: %s rev is currently %s, but %s wants %s\n", v.Name, vc, req, dep.GetConstraint())
 
 		displayCommitInfo(repo, v)
 		displayCommitInfo(repo, dep)
 
-		singleInfo("Keeping %s %s", v.Name, v.Reference)
+		singleInfo("Keeping %s %s", v.Name, vc)
 		return v
 	} else if vIsRef {
 		// The current one is a reference and the suggestion is a SemVer constraint.
-		con, err := semver.NewConstraint(dep.Reference)
+		con, err := semver.NewConstraint(dep.Version)
 		if err != nil {
-			singleWarn("Version issue for %s: '%s' is neither a reference or semantic version constraint\n", dep.Name, dep.Reference)
-			singleInfo("Keeping %s %s", v.Name, v.Reference)
+			singleWarn("Version issue for %s: '%s' is neither a reference or semantic version constraint\n", dep.Name, dep.GetConstraint())
+			singleInfo("Keeping %s %s", v.Name, vc)
 			return v
 		}
 
-		ver, err := semver.NewVersion(v.Reference)
+		ver, err := semver.NewVersion(v.Version)
 		if err != nil {
 			// The existing version is not a semantic version.
-			singleWarn("Conflict: %s version is %s, but also asked for %s\n", v.Name, v.Reference, dep.Reference)
+			singleWarn("Conflict: %s version is %s, but also asked for %s\n", v.Name, vc, dc)
 			displayCommitInfo(repo, v)
-			singleInfo("Keeping %s %s", v.Name, v.Reference)
+			singleInfo("Keeping %s %s", v.Name, vc)
 			return v
 		}
 
-		if con.Check(ver) {
-			singleInfo("Keeping %s %s because it fits constraint '%s'", v.Name, v.Reference, dep.Reference)
+		if con.Matches(ver) == nil {
+			singleInfo("Keeping %s %s because it fits constraint '%s'", v.Name, vc, dc)
 			return v
 		}
-		singleWarn("Conflict: %s version is %s but does not meet constraint '%s'\n", v.Name, v.Reference, dep.Reference)
-		singleInfo("Keeping %s %s", v.Name, v.Reference)
+		singleWarn("Conflict: %s version is %s but does not meet constraint '%s'\n", v.Name, vc, dc)
+		singleInfo("Keeping %s %s", v.Name, vc)
 		return v
 	} else if depIsRef {
 
-		con, err := semver.NewConstraint(v.Reference)
+		con, err := semver.NewConstraint(v.Version)
 		if err != nil {
-			singleWarn("Version issue for %s: '%s' is neither a reference or semantic version constraint\n", v.Name, v.Reference)
-			singleInfo("Keeping %s %s", v.Name, v.Reference)
+			singleWarn("Version issue for %s: '%s' is neither a reference or semantic version constraint\n", v.Name, vc)
+			singleInfo("Keeping %s %s", v.Name, vc)
 			return v
 		}
 
-		ver, err := semver.NewVersion(dep.Reference)
+		ver, err := semver.NewVersion(dep.Version)
 		if err != nil {
-			singleWarn("Conflict: %s version is %s, but also asked for %s\n", v.Name, v.Reference, dep.Reference)
+			singleWarn("Conflict: %s version is %s, but also asked for %s\n", v.Name, vc, dc)
 			displayCommitInfo(repo, dep)
-			singleInfo("Keeping %s %s", v.Name, v.Reference)
+			singleInfo("Keeping %s %s", v.Name, vc)
 			return v
 		}
 
-		if con.Check(ver) {
-			v.Reference = dep.Reference
-			singleInfo("Using %s %s because it fits constraint '%s'", v.Name, v.Reference, v.Reference)
+		if con.Matches(ver) == nil {
+			singleInfo("Using %s %s because it fits constraint '%s'", v.Name, dc, vc)
+			v.Version = dep.Version
+			v.Branch = dep.Branch
+			vc = dc
 			return v
 		}
-		singleWarn("Conflict: %s semantic version constraint is %s but '%s' does not meet the constraint\n", v.Name, v.Reference, v.Reference)
-		singleInfo("Keeping %s %s", v.Name, v.Reference)
+		singleWarn("Conflict: %s semantic version constraint is %s but '%s' does not meet the constraint\n", v.Name, dc, vc)
+		singleInfo("Keeping %s %s", v.Name, dc)
 		return v
 	}
 	// Neither is a vcs reference and both could be semantic version
 	// constraints that are different.
 
-	_, err = semver.NewConstraint(dep.Reference)
+	_, err = semver.NewConstraint(dc.String())
 	if err != nil {
-		// dd.Reference is not a reference or a valid constraint.
-		singleWarn("Version %s %s is not a reference or valid semantic version constraint\n", dep.Name, dep.Reference)
-		singleInfo("Keeping %s %s", v.Name, v.Reference)
+		// dd.Constraint is not a reference or a valid constraint.
+		singleWarn("Version %s %s is not a reference or valid semantic version constraint\n", dep.Name, dc)
+		singleInfo("Keeping %s %s", v.Name, vc)
 		return v
 	}
 
-	_, err = semver.NewConstraint(v.Reference)
+	_, err = semver.NewConstraint(vc.String())
 	if err != nil {
-		// existing.Reference is not a reference or a valid constraint.
+		// existing.Constraint is not a reference or a valid constraint.
 		// We really should never end up here.
-		singleWarn("Version %s %s is not a reference or valid semantic version constraint\n", v.Name, v.Reference)
+		singleWarn("Version %s %s is not a reference or valid semantic version constraint\n", v.Name, vc)
 
-		v.Reference = dep.Reference
-		v.Pin = ""
-		singleInfo("Using %s %s because it is a valid version", v.Name, v.Reference)
+		v.Version = dep.Version
+		v.Branch = dep.Branch
+		//v.Pin = ""
+		singleInfo("Using %s %s because it is a valid version", v.Name, dc)
 		return v
 	}
 
 	// Both versions are constraints. Try to merge them.
 	// If either comparison has an || skip merging. That's complicated.
-	ddor := strings.Index(dep.Reference, "||")
-	eor := strings.Index(v.Reference, "||")
+	ddor := strings.Index(dep.Version, "||")
+	eor := strings.Index(v.Version, "||")
 	if ddor == -1 && eor == -1 {
 		// Add the comparisons together.
-		newRef := v.Reference + ", " + dep.Reference
-		v.Reference = newRef
-		v.Pin = ""
-		singleInfo("Combining %s semantic version constraints %s and %s", v.Name, v.Reference, dep.Reference)
+		// TODO(sdboyer) this all just reeeeeally needs to go
+		v.Version = v.GetConstraint().Intersect(dep.GetConstraint()).String()
+		//v.Pin = ""
+		singleInfo("Combining %s semantic version constraints %s and %s", v.Name, vc, dc)
 		return v
 	}
-	singleWarn("Conflict: %s version is %s, but also asked for %s\n", v.Name, v.Reference, dep.Reference)
-	singleInfo("Keeping %s %s", v.Name, v.Reference)
+	singleWarn("Conflict: %s version is %s, but also asked for %s\n", v.Name, vc, dc)
+	singleInfo("Keeping %s %s", v.Name, vc)
 	return v
 }
 
@@ -984,13 +874,13 @@
 	displayCommitInfoPrefix + "- subject (first line): %s\n"
 
 func displayCommitInfo(repo vcs.Repo, dep *cfg.Dependency) {
-	c, err := repo.CommitInfo(dep.Reference)
-	ref := dep.Reference
+	ref := dep.GetConstraint().String()
+	c, err := repo.CommitInfo(ref)
 
 	if err == nil {
 		tgs, err2 := repo.TagsFromCommit(c.Commit)
 		if err2 == nil && len(tgs) > 0 {
-			if tgs[0] != dep.Reference {
+			if tgs[0] != ref {
 				ref = ref + " (" + tgs[0] + ")"
 			}
 		}
diff --git a/repo/set_reference.go b/repo/set_reference.go
index d9899ec..ff3d4c3 100644
--- a/repo/set_reference.go
+++ b/repo/set_reference.go
@@ -6,6 +6,7 @@
 	"github.com/Masterminds/glide/cache"
 	"github.com/Masterminds/glide/cfg"
 	"github.com/Masterminds/glide/msg"
+	gpath "github.com/Masterminds/glide/path"
 	"github.com/codegangsta/cli"
 )
 
@@ -13,6 +14,11 @@
 // a project.
 func SetReference(conf *cfg.Config, resolveTest bool) error {
 
+	cwd, err := gpath.Vendor()
+	if err != nil {
+		return err
+	}
+
 	if len(conf.Imports) == 0 && len(conf.DevImports) == 0 {
 		msg.Info("No references set.\n")
 		return nil
@@ -29,7 +35,6 @@
 			for {
 				select {
 				case dep := <-ch:
-
 					var loc string
 					if dep.Repository != "" {
 						loc = dep.Repository
@@ -41,8 +46,8 @@
 						msg.Die(err.Error())
 					}
 					cache.Lock(key)
-					if err := VcsVersion(dep); err != nil {
-						msg.Err("Failed to set version on %s to %s: %s\n", dep.Name, dep.Reference, err)
+					if err := VcsVersion(dep, cwd); err != nil {
+						msg.Err("Failed to set version on %s to %s: %s\n", dep.Name, dep.GetConstraint(), err)
 
 						// Capture the error while making sure the concurrent
 						// operations don't step on each other.
diff --git a/repo/vcs.go b/repo/vcs.go
index 57ae33c..e0806d3 100644
--- a/repo/vcs.go
+++ b/repo/vcs.go
@@ -21,14 +21,14 @@
 )
 
 // VcsUpdate updates to a particular checkout based on the VCS setting.
-func VcsUpdate(dep *cfg.Dependency, force bool, updated *UpdateTracker) error {
+func VcsUpdate(dep *cfg.Dependency, dest, home string, cache, cacheGopath, useGopath, force, updateVendored bool, updated *UpdateTracker) error {
 
 	// If the dependency has already been pinned we can skip it. This is a
 	// faster path so we don't need to resolve it again.
-	if dep.Pin != "" {
-		msg.Debug("Dependency %s has already been pinned. Fetching updates skipped.", dep.Name)
-		return nil
-	}
+	//if dep.Pin != "" {
+	//msg.Debug("Dependency %s has already been pinned. Fetching updates skipped.", dep.Name)
+	//return nil
+	//}
 
 	if updated.Check(dep.Name) {
 		msg.Debug("%s was already updated, skipping.", dep.Name)
@@ -36,28 +36,21 @@
 	}
 	updated.Add(dep.Name)
 
+	msg.Info("--> Fetching updates for %s.", dep.Name)
+
 	if filterArchOs(dep) {
 		msg.Info("%s is not used for %s/%s.\n", dep.Name, runtime.GOOS, runtime.GOARCH)
 		return nil
 	}
 
-	key, err := cp.Key(dep.Remote())
-	if err != nil {
-		msg.Die("Cache key generation error: %s", err)
-	}
-	location := cp.Location()
-	dest := filepath.Join(location, "src", key)
-
 	// If destination doesn't exist we need to perform an initial checkout.
 	if _, err := os.Stat(dest); os.IsNotExist(err) {
-		msg.Info("--> Fetching %s.", dep.Name)
-		if err = VcsGet(dep); err != nil {
+		if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil {
 			msg.Warn("Unable to checkout %s\n", dep.Name)
 			return err
 		}
 	} else {
 		// At this point we have a directory for the package.
-		msg.Info("--> Fetching updates for %s.", dep.Name)
 
 		// When the directory is not empty and has no VCS directory it's
 		// a vendored files situation.
@@ -66,18 +59,40 @@
 			return err
 		}
 		_, err = v.DetectVcsFromFS(dest)
-		if empty == true && err == v.ErrCannotDetectVCS {
-			msg.Warn("Cached version of %s is an empty directory. Fetching a new copy of the dependency.", dep.Name)
+		if updateVendored == false && empty == false && err == v.ErrCannotDetectVCS {
+			msg.Warn("%s appears to be a vendored package. Unable to update. Consider the '--update-vendored' flag.\n", dep.Name)
+		} else if updateVendored == false && empty == true && err == v.ErrCannotDetectVCS {
+			msg.Warn("%s is an empty directory. Fetching a new copy of the dependency.", dep.Name)
 			msg.Debug("Removing empty directory %s", dest)
 			err := os.RemoveAll(dest)
 			if err != nil {
 				return err
 			}
-			if err = VcsGet(dep); err != nil {
+			if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil {
 				msg.Warn("Unable to checkout %s\n", dep.Name)
 				return err
 			}
 		} else {
+
+			if updateVendored == true && empty == false && err == v.ErrCannotDetectVCS {
+				// A vendored package, no repo, and updating the vendored packages
+				// has been opted into.
+				msg.Info("%s is a vendored package. Updating.", dep.Name)
+				err = os.RemoveAll(dest)
+				if err != nil {
+					msg.Err("Unable to update vendored dependency %s.\n", dep.Name)
+					return err
+				}
+				//dep.UpdateAsVendored = true
+
+				if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil {
+					msg.Warn("Unable to checkout %s\n", dep.Name)
+					return err
+				}
+
+				return nil
+			}
+
 			repo, err := dep.GetRepo(dest)
 
 			// Tried to checkout a repo to a path that does not work. Either the
@@ -86,14 +101,19 @@
 			// Warning, any changes in the old location will be deleted.
 			// TODO: Put dirty checking in on the existing local checkout.
 			if (err == v.ErrWrongVCS || err == v.ErrWrongRemote) && force == true {
-				newRemote := dep.Remote()
+				var newRemote string
+				if len(dep.Repository) > 0 {
+					newRemote = dep.Repository
+				} else {
+					newRemote = "https://" + dep.Name
+				}
 
 				msg.Warn("Replacing %s with contents from %s\n", dep.Name, newRemote)
 				rerr := os.RemoveAll(dest)
 				if rerr != nil {
 					return rerr
 				}
-				if err = VcsGet(dep); err != nil {
+				if err = VcsGet(dep, dest, home, cache, cacheGopath, useGopath); err != nil {
 					msg.Warn("Unable to checkout %s\n", dep.Name)
 					return err
 				}
@@ -108,29 +128,21 @@
 				return fmt.Errorf("%s contains uncommitted changes. Skipping update", dep.Name)
 			}
 
-			ver := dep.Reference
-			if ver == "" {
-				ver = defaultBranch(repo)
-			}
 			// Check if the current version is a tag or commit id. If it is
 			// and that version is already checked out we can skip updating
 			// which is faster than going out to the Internet to perform
 			// an update.
-			if ver != "" {
+			if !dep.IsUnconstrained() {
 				version, err := repo.Version()
 				if err != nil {
 					return err
 				}
-				ib, err := isBranch(ver, repo)
-				if err != nil {
-					return err
-				}
 
 				// If the current version equals the ref and it's not a
 				// branch it's a tag or commit id so we can skip
 				// performing an update.
-				if version == ver && !ib {
-					msg.Debug("%s is already set to version %s. Skipping update.", dep.Name, dep.Reference)
+				if version == dep.Version {
+					msg.Debug("%s is already set to version %s. Skipping update.", dep.Name, dep.GetConstraint())
 					return nil
 				}
 			}
@@ -146,33 +158,28 @@
 }
 
 // VcsVersion set the VCS version for a checkout.
-func VcsVersion(dep *cfg.Dependency) error {
+func VcsVersion(dep *cfg.Dependency, vend string) error {
 
 	// If the dependency has already been pinned we can skip it. This is a
 	// faster path so we don't need to resolve it again.
-	if dep.Pin != "" {
-		msg.Debug("Dependency %s has already been pinned. Setting version skipped.", dep.Name)
-		return nil
-	}
+	//if dep.Pin != "" {
+	//msg.Debug("Dependency %s has already been pinned. Setting version skipped.", dep.Name)
+	//return nil
+	//}
 
-	key, err := cp.Key(dep.Remote())
-	if err != nil {
-		msg.Die("Cache key generation error: %s", err)
-	}
-	location := cp.Location()
-	cwd := filepath.Join(location, "src", key)
+	cwd := filepath.Join(vend, dep.Name)
 
 	// If there is no reference configured there is nothing to set.
-	if dep.Reference == "" {
+	if dep.IsUnconstrained() {
 		// Before exiting update the pinned version
-		repo, err := dep.GetRepo(cwd)
-		if err != nil {
-			return err
-		}
-		dep.Pin, err = repo.Version()
-		if err != nil {
-			return err
-		}
+		//_, err := dep.GetRepo(cwd)
+		//if err != nil {
+		//return err
+		//}
+		//dep.Pin, err = repo.Version()
+		//if err != nil {
+		//return err
+		//}
 		return nil
 	}
 
@@ -184,65 +191,64 @@
 	}
 	_, err = v.DetectVcsFromFS(cwd)
 	if empty == false && err == v.ErrCannotDetectVCS {
-		return fmt.Errorf("Cache directory missing VCS information for %s", dep.Name)
-	}
-
-	repo, err := dep.GetRepo(cwd)
-	if err != nil {
-		return err
-	}
-
-	ver := dep.Reference
-	// References in Git can begin with a ^ which is similar to semver.
-	// If there is a ^ prefix we assume it's a semver constraint rather than
-	// part of the git/VCS commit id.
-	if repo.IsReference(ver) && !strings.HasPrefix(ver, "^") {
-		msg.Info("--> Setting version for %s to %s.\n", dep.Name, ver)
+		msg.Warn("%s appears to be a vendored package. Unable to set new version. Consider the '--update-vendored' flag.\n", dep.Name)
 	} else {
-
-		// Create the constraint first to make sure it's valid before
-		// working on the repo.
-		constraint, err := semver.NewConstraint(ver)
-
-		// Make sure the constriant is valid. At this point it's not a valid
-		// reference so if it's not a valid constrint we can exit early.
-		if err != nil {
-			msg.Warn("The reference '%s' is not valid\n", ver)
-			return err
-		}
-
-		// Get the tags and branches (in that order)
-		refs, err := getAllVcsRefs(repo)
+		repo, err := dep.GetRepo(cwd)
 		if err != nil {
 			return err
 		}
 
-		// Convert and filter the list to semver.Version instances
-		semvers := getSemVers(refs)
+		ver := dep.GetConstraint().String()
+		// References in Git can begin with a ^ which is similar to semver.
+		// If there is a ^ prefix we assume it's a semver constraint rather than
+		// part of the git/VCS commit id.
+		if repo.IsReference(ver) && !strings.HasPrefix(ver, "^") {
+			msg.Info("--> Setting version for %s to %s.\n", dep.Name, ver)
+		} else {
+			// Create the constraint first to make sure it's valid before
+			// working on the repo.
+			constraint, err := semver.NewConstraint(ver)
 
-		// Sort semver list
-		sort.Sort(sort.Reverse(semver.Collection(semvers)))
-		found := false
-		for _, v := range semvers {
-			if constraint.Check(v) {
-				found = true
-				// If the constrint passes get the original reference
-				ver = v.Original()
-				break
+			// Make sure the constriant is valid. At this point it's not a valid
+			// reference so if it's not a valid constrint we can exit early.
+			if err != nil {
+				msg.Warn("The reference '%s' is not valid\n", ver)
+				return err
+			}
+
+			// Get the tags and branches (in that order)
+			refs, err := getAllVcsRefs(repo)
+			if err != nil {
+				return err
+			}
+
+			// Convert and filter the list to semver.Version instances
+			semvers := getSemVers(refs)
+
+			// Sort semver list
+			sort.Sort(sort.Reverse(semver.Collection(semvers)))
+			found := false
+			for _, v := range semvers {
+				if constraint.Matches(v) == nil {
+					found = true
+					// If the constrint passes get the original reference
+					ver = v.Original()
+					break
+				}
+			}
+			if found {
+				msg.Info("--> Detected semantic version. Setting version for %s to %s.", dep.Name, ver)
+			} else {
+				msg.Warn("--> Unable to find semantic version for constraint %s %s", dep.Name, ver)
 			}
 		}
-		if found {
-			msg.Info("--> Detected semantic version. Setting version for %s to %s.", dep.Name, ver)
-		} else {
-			msg.Warn("--> Unable to find semantic version for constraint %s %s", dep.Name, ver)
+		if err := repo.UpdateVersion(ver); err != nil {
+			return err
 		}
-	}
-	if err := repo.UpdateVersion(ver); err != nil {
-		return err
-	}
-	dep.Pin, err = repo.Version()
-	if err != nil {
-		return err
+		//dep.Pin, err = repo.Version()
+		//if err != nil {
+		//return err
+		//}
 	}
 
 	return nil
@@ -250,47 +256,212 @@
 
 // VcsGet figures out how to fetch a dependency, and then gets it.
 //
-// VcsGet installs into the cache.
-func VcsGet(dep *cfg.Dependency) error {
+// VcsGet installs into the dest.
+func VcsGet(dep *cfg.Dependency, dest, home string, cache, cacheGopath, useGopath bool) error {
+	// When not skipping the $GOPATH look in it for a copy of the package
+	if useGopath {
+		// Check if the $GOPATH has a viable version to use and if so copy to vendor
+		gps := gpath.Gopaths()
+		for _, p := range gps {
+			d := filepath.Join(p, "src", dep.Name)
+			if _, err := os.Stat(d); err == nil {
+				empty, err := gpath.IsDirectoryEmpty(d)
+				if empty || err != nil {
+					continue
+				}
 
-	key, err := cp.Key(dep.Remote())
-	if err != nil {
-		msg.Die("Cache key generation error: %s", err)
+				repo, err := dep.GetRepo(d)
+				if err != nil {
+					continue
+				}
+
+				// Dirty repos have uncommitted changes.
+				if repo.IsDirty() {
+					continue
+				}
+
+				// Having found a repo we copy it to vendor and update it.
+				msg.Info("Copying package %s from the GOPATH.", dep.Name)
+				msg.Debug("Found %s in GOPATH at %s. Copying to %s", dep.Name, d, dest)
+				err = gpath.CopyDir(d, dest)
+				if err != nil {
+					return err
+				}
+
+				// Update the repo in the vendor directory
+				msg.Debug("Updating %s, now in the vendor path at %s", dep.Name, dest)
+				repo, err = dep.GetRepo(dest)
+				if err != nil {
+					return err
+				}
+				err = repo.Update()
+				if err != nil {
+					return err
+				}
+
+				// If there is no reference set on the dep we try to checkout
+				// the default branch.
+				if dep.IsUnconstrained() {
+					db := defaultBranch(repo, home)
+					if db != "" {
+						err = repo.UpdateVersion(db)
+						if err != nil && msg.Default.IsDebugging {
+							msg.Debug("Attempting to set the version on %s to %s failed. Error %s", dep.Name, db, err)
+						}
+					}
+				}
+				return nil
+			}
+		}
 	}
-	location := cp.Location()
-	d := filepath.Join(location, "src", key)
 
-	repo, err := dep.GetRepo(d)
+	// When opting in to cache in the GOPATH attempt to do put a copy there.
+	if cacheGopath {
+
+		// Since we didn't find an existing copy in the GOPATHs try to clone there.
+		gp := gpath.Gopath()
+		if gp != "" {
+			d := filepath.Join(gp, "src", dep.Name)
+			if _, err := os.Stat(d); os.IsNotExist(err) {
+				// Empty directory so we checkout out the code here.
+				msg.Debug("Retrieving %s to %s before copying to vendor", dep.Name, d)
+				repo, err := dep.GetRepo(d)
+				if err != nil {
+					return err
+				}
+				repo.Get()
+
+				branch := findCurrentBranch(repo)
+				if branch != "" {
+					// we know the default branch so we can store it in the cache
+					var loc string
+					if dep.Repository != "" {
+						loc = dep.Repository
+					} else {
+						loc = "https://" + dep.Name
+					}
+					key, err := cp.Key(loc)
+					if err == nil {
+						msg.Debug("Saving default branch for %s", repo.Remote())
+						c := cp.RepoInfo{DefaultBranch: branch}
+						err = cp.SaveRepoData(key, c)
+						if msg.Default.IsDebugging && err == cp.ErrCacheDisabled {
+							msg.Debug("Unable to cache default branch because caching is disabled")
+						}
+					}
+				}
+
+				msg.Debug("Copying %s from GOPATH at %s to %s", dep.Name, d, dest)
+				err = gpath.CopyDir(d, dest)
+				if err != nil {
+					return err
+				}
+
+				return nil
+			}
+		}
+	}
+
+	// If opting in to caching attempt to put it in the cache folder
+	if cache {
+		// Check if the cache has a viable version and try to use that.
+		var loc string
+		if dep.Repository != "" {
+			loc = dep.Repository
+		} else {
+			loc = "https://" + dep.Name
+		}
+		key, err := cp.Key(loc)
+		if err == nil {
+			location := cp.Location()
+			d := filepath.Join(location, "src", key)
+
+			repo, err := dep.GetRepo(d)
+			if err != nil {
+				return err
+			}
+			// If the directory does not exist this is a first cache.
+			if _, err = os.Stat(d); os.IsNotExist(err) {
+				msg.Debug("Adding %s to the cache for the first time", dep.Name)
+				err = repo.Get()
+				if err != nil {
+					return err
+				}
+				branch := findCurrentBranch(repo)
+				if branch != "" {
+					// we know the default branch so we can store it in the cache
+					var loc string
+					if dep.Repository != "" {
+						loc = dep.Repository
+					} else {
+						loc = "https://" + dep.Name
+					}
+					key, err := cp.Key(loc)
+					if err == nil {
+						msg.Debug("Saving default branch for %s", repo.Remote())
+						c := cp.RepoInfo{DefaultBranch: branch}
+						err = cp.SaveRepoData(key, c)
+						if err == cp.ErrCacheDisabled {
+							msg.Debug("Unable to cache default branch because caching is disabled")
+						} else if err != nil {
+							msg.Debug("Error saving %s to cache. Error: %s", repo.Remote(), err)
+						}
+					}
+				}
+
+			} else {
+				msg.Debug("Updating %s in the cache", dep.Name)
+				err = repo.Update()
+				if err != nil {
+					return err
+				}
+			}
+
+			msg.Debug("Copying %s from the cache to %s", dep.Name, dest)
+			err = gpath.CopyDir(d, dest)
+			if err != nil {
+				return err
+			}
+
+			return nil
+		}
+
+		msg.Warn("Cache key generation error: %s", err)
+	}
+
+	// If unable to cache pull directly into the vendor/ directory.
+	repo, err := dep.GetRepo(dest)
 	if err != nil {
 		return err
 	}
-	// If the directory does not exist this is a first cache.
-	if _, err = os.Stat(d); os.IsNotExist(err) {
-		msg.Debug("Adding %s to the cache for the first time", dep.Name)
-		err = repo.Get()
-		if err != nil {
-			return err
-		}
-		branch := findCurrentBranch(repo)
-		if branch != "" {
-			msg.Debug("Saving default branch for %s", repo.Remote())
-			c := cp.RepoInfo{DefaultBranch: branch}
-			err = cp.SaveRepoData(key, c)
-			if err == cp.ErrCacheDisabled {
-				msg.Debug("Unable to cache default branch because caching is disabled")
-			} else if err != nil {
-				msg.Debug("Error saving %s to cache. Error: %s", repo.Remote(), err)
+
+	gerr := repo.Get()
+
+	// Attempt to cache the default branch
+	if cache {
+		if branch := findCurrentBranch(repo); branch != "" {
+			// we know the default branch so we can store it in the cache
+			var loc string
+			if dep.Repository != "" {
+				loc = dep.Repository
+			} else {
+				loc = "https://" + dep.Name
 			}
-		}
-	} else {
-		msg.Debug("Updating %s in the cache", dep.Name)
-		err = repo.Update()
-		if err != nil {
-			return err
+			key, err := cp.Key(loc)
+			if err == nil {
+				msg.Debug("Saving default branch for %s", repo.Remote())
+				c := cp.RepoInfo{DefaultBranch: branch}
+				err = cp.SaveRepoData(key, c)
+				if err == cp.ErrCacheDisabled {
+					msg.Debug("Unable to cache default branch because caching is disabled")
+				} else if err != nil {
+					msg.Debug("Error saving %s to cache - Error: %s", repo.Remote(), err)
+				}
+			}
 		}
 	}
 
-	return nil
+	return gerr
 }
 
 // filterArchOs indicates a dependency should be filtered out because it is
@@ -298,32 +469,6 @@
 //
 // FIXME: Should this be moved to the dependency package?
 func filterArchOs(dep *cfg.Dependency) bool {
-	found := false
-	if len(dep.Arch) > 0 {
-		for _, a := range dep.Arch {
-			if a == runtime.GOARCH {
-				found = true
-			}
-		}
-		// If it's not found, it should be filtered out.
-		if !found {
-			return true
-		}
-	}
-
-	found = false
-	if len(dep.Os) > 0 {
-		for _, o := range dep.Os {
-			if o == runtime.GOOS {
-				found = true
-			}
-		}
-		if !found {
-			return true
-		}
-
-	}
-
 	return false
 }
 
@@ -344,7 +489,7 @@
 // defaultBranch tries to ascertain the default branch for the given repo.
 // Some repos will have multiple branches in them (e.g. Git) while others
 // (e.g. Svn) will not.
-func defaultBranch(repo v.Repo) string {
+func defaultBranch(repo v.Repo, home string) string {
 
 	// Svn and Bzr use different locations (paths or entire locations)
 	// for branches so we won't have a default branch.
diff --git a/repo/vendored_cleanup.go b/repo/vendored_cleanup.go
new file mode 100644
index 0000000..1db6de9
--- /dev/null
+++ b/repo/vendored_cleanup.go
@@ -0,0 +1,42 @@
+package repo
+
+import (
+	"os"
+	"path/filepath"
+
+	"github.com/Masterminds/glide/cfg"
+	"github.com/Masterminds/glide/msg"
+	gpath "github.com/Masterminds/glide/path"
+)
+
+// VendoredCleanup cleans up vendored codebases after an update.
+//
+// This should _only_ be run for installations that do not want VCS repos inside
+// of the vendor/ directory.
+func VendoredCleanup(conf *cfg.Config) error {
+	vend, err := gpath.Vendor()
+	if err != nil {
+		return err
+	}
+
+	for _, dep := range conf.Imports {
+		//if dep.UpdateAsVendored == true {
+		msg.Info("Cleaning up vendored package %s\n", dep.Name)
+
+		// Remove the VCS directory
+		cwd := filepath.Join(vend, dep.Name)
+		repo, err := dep.GetRepo(cwd)
+		if err != nil {
+			msg.Err("Error cleaning up %s:%s", dep.Name, err)
+			continue
+		}
+		t := repo.Vcs()
+		err = os.RemoveAll(cwd + string(os.PathSeparator) + "." + string(t))
+		if err != nil {
+			msg.Err("Error cleaning up VCS dir for %s:%s", dep.Name, err)
+		}
+		//}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md
index c3808ea..2555067 100644
--- a/vendor/github.com/Masterminds/semver/CHANGELOG.md
+++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md
@@ -1,13 +1,9 @@
-# Release 1.1.1 (2016-06-30)
+# Release 1.x.x (xxxx-xx-xx)
 
-## Changed
 - Issue #9: Speed up version comparison performance (thanks @sdboyer)
 - Issue #8: Added benchmarks (thanks @sdboyer)
-- Updated Go Report Card URL to new location
-- Updated Readme to add code snippet formatting (thanks @mh-cbon)
-- Updating tagging to v[SemVer] structure for compatibility with other tools.
 
-# Release 1.1.0 (2016-03-11)
+# Release 1.1.0 (2015-03-11)
 
 - Issue #2: Implemented validation to provide reasons a versions failed a
   constraint.
diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md
index 1edec7a..aa133ea 100644
--- a/vendor/github.com/Masterminds/semver/README.md
+++ b/vendor/github.com/Masterminds/semver/README.md
@@ -7,15 +7,13 @@
 * Check if a semantic version fits within a set of constraints
 * Optionally work with a `v` prefix
 
-[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.png)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
+[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.png)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](http://goreportcard.com/badge/Masterminds/semver)](http://goreportcard.com/report/Masterminds/semver)
 
 ## Parsing Semantic Versions
 
 To parse a semantic version use the `NewVersion` function. For example,
 
-```go
     v, err := semver.NewVersion("1.2.3-beta.1+build345")
-```
 
 If there is an error the version wasn't parseable. The version object has methods
 to get the parts of the version, compare it to other versions, convert the
@@ -27,7 +25,6 @@
 A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/)
 package from the standard library. For example,
 
-```go
     raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
     vs := make([]*semver.Version, len(raw))
 	for i, r := range raw {
@@ -40,14 +37,12 @@
 	}
 
 	sort.Sort(semver.Collection(vs))
-```
 
 ## Checking Version Constraints
 
 Checking a version against version constraints is one of the most featureful
 parts of the package.
 
-```go
     c, err := semver.NewConstraint(">= 1.2.3")
     if err != nil {
         // Handle constraint not being parseable.
@@ -59,7 +54,6 @@
     }
     // Check if the version meets the constraints. The a variable will be true.
     a := c.Check(v)
-```
 
 ## Basic Comparisons
 
@@ -125,7 +119,6 @@
 against a constraint. When validation fails a slice of errors containing why a
 version didn't meet the constraint is returned. For example,
 
-```go
     c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
     if err != nil {
         // Handle constraint not being parseable.
@@ -146,7 +139,6 @@
         // "1.3 is greater than 1.2.3"
         // "1.3 is less than 1.4"
     }
-```
 
 # Contribute
 
diff --git a/vendor/github.com/Masterminds/semver/benchmark_test.go b/vendor/github.com/Masterminds/semver/benchmark_test.go
index 58a5c28..5a76f6a 100644
--- a/vendor/github.com/Masterminds/semver/benchmark_test.go
+++ b/vendor/github.com/Masterminds/semver/benchmark_test.go
@@ -1,16 +1,53 @@
-package semver_test
+package semver
 
-import (
-	"testing"
+import "testing"
 
-	"github.com/Masterminds/semver"
+func init() {
+	// disable constraint and version creation caching
+	CacheConstraints = false
+	CacheVersions = false
+}
+
+var (
+	rc1 = rangeConstraint{
+		min:        newV(1, 5, 0),
+		max:        newV(2, 0, 0),
+		includeMax: true,
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	rc3 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc4 = rangeConstraint{
+		min: newV(1, 7, 0),
+		max: newV(4, 0, 0),
+	}
+	rc5 = rangeConstraint{
+		min: newV(2, 7, 0),
+		max: newV(3, 0, 0),
+	}
+	rc6 = rangeConstraint{
+		min: newV(3, 0, 1),
+		max: newV(3, 0, 4),
+	}
+	rc7 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(1, 2, 0),
+	}
+	// Two fully non-overlapping unions
+	u1 = rc1.Union(rc7)
+	u2 = rc5.Union(rc6)
 )
 
 /* Constraint creation benchmarks */
 
 func benchNewConstraint(c string, b *testing.B) {
 	for i := 0; i < b.N; i++ {
-		semver.NewConstraint(c)
+		NewConstraint(c)
 	}
 }
 
@@ -38,52 +75,17 @@
 	benchNewConstraint("~2.0.0 || =3.1.0", b)
 }
 
-/* Check benchmarks */
-
-func benchCheckVersion(c, v string, b *testing.B) {
-	version, _ := semver.NewVersion(v)
-	constraint, _ := semver.NewConstraint(c)
-
-	for i := 0; i < b.N; i++ {
-		constraint.Check(version)
-	}
-}
-
-func BenchmarkCheckVersionUnary(b *testing.B) {
-	benchCheckVersion("=2.0", "2.0.0", b)
-}
-
-func BenchmarkCheckVersionTilde(b *testing.B) {
-	benchCheckVersion("~2.0.0", "2.0.5", b)
-}
-
-func BenchmarkCheckVersionCaret(b *testing.B) {
-	benchCheckVersion("^2.0.0", "2.1.0", b)
-}
-
-func BenchmarkCheckVersionWildcard(b *testing.B) {
-	benchCheckVersion("1.x", "1.4.0", b)
-}
-
-func BenchmarkCheckVersionRange(b *testing.B) {
-	benchCheckVersion(">=2.1.x, <3.1.0", "2.4.5", b)
-}
-
-func BenchmarkCheckVersionUnion(b *testing.B) {
-	benchCheckVersion("~2.0.0 || =3.1.0", "3.1.0", b)
-}
+/* Validate benchmarks, including fails */
 
 func benchValidateVersion(c, v string, b *testing.B) {
-	version, _ := semver.NewVersion(v)
-	constraint, _ := semver.NewConstraint(c)
+	version, _ := NewVersion(v)
+	constraint, _ := NewConstraint(c)
 
 	for i := 0; i < b.N; i++ {
-		constraint.Validate(version)
+		constraint.Matches(version)
 	}
 }
 
-/* Validate benchmarks, including fails */
-
 func BenchmarkValidateVersionUnary(b *testing.B) {
 	benchValidateVersion("=2.0", "2.0.0", b)
 }
@@ -136,7 +138,7 @@
 
 func benchNewVersion(v string, b *testing.B) {
 	for i := 0; i < b.N; i++ {
-		semver.NewVersion(v)
+		NewVersion(v)
 	}
 }
 
@@ -155,3 +157,103 @@
 func BenchmarkNewVersionMetaDash(b *testing.B) {
 	benchNewVersion("1.0.0+metadata-dash", b)
 }
+
+/* Union benchmarks */
+
+func BenchmarkAdjacentRangeUnion(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Union(rc1, rc2)
+	}
+}
+
+func BenchmarkAdjacentRangeUnionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc1.Union(rc2)
+	}
+}
+
+func BenchmarkDisjointRangeUnion(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Union(rc2, rc3)
+	}
+}
+
+func BenchmarkDisjointRangeUnionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc2.Union(rc3)
+	}
+}
+
+func BenchmarkOverlappingRangeUnion(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Union(rc1, rc4)
+	}
+}
+
+func BenchmarkOverlappingRangeUnionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc1.Union(rc4)
+	}
+}
+
+func BenchmarkUnionUnion(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Union(u1, u2)
+	}
+}
+
+func BenchmarkUnionUnionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		u1.Union(u2)
+	}
+}
+
+/* Intersection benchmarks */
+
+func BenchmarkSubsetRangeIntersection(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Intersection(rc2, rc4)
+	}
+}
+
+func BenchmarkSubsetRangeIntersectionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc2.Intersect(rc4)
+	}
+}
+
+func BenchmarkDisjointRangeIntersection(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Intersection(rc2, rc3)
+	}
+}
+
+func BenchmarkDisjointRangeIntersectionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc2.Intersect(rc3)
+	}
+}
+
+func BenchmarkOverlappingRangeIntersection(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Intersection(rc1, rc4)
+	}
+}
+
+func BenchmarkOverlappingRangeIntersectionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		rc1.Intersect(rc4)
+	}
+}
+
+func BenchmarkUnionIntersection(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		Intersection(u1, u2)
+	}
+}
+
+func BenchmarkUnionIntersectionMethod(b *testing.B) {
+	for i := 0; i < b.N; i++ {
+		u1.Intersect(u2)
+	}
+}
diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go
index b63f5f6..bf2f500 100644
--- a/vendor/github.com/Masterminds/semver/constraints.go
+++ b/vendor/github.com/Masterminds/semver/constraints.go
@@ -1,126 +1,39 @@
 package semver
 
 import (
-	"errors"
 	"fmt"
 	"regexp"
+	"sort"
 	"strings"
+	"sync"
 )
 
-// Constraints is one or more constraint that a semantic version can be
-// checked against.
-type Constraints struct {
-	constraints [][]*constraint
-}
-
-// NewConstraint returns a Constraints instance that a Version instance can
-// be checked against. If there is a parse error it will be returned.
-func NewConstraint(c string) (*Constraints, error) {
-
-	// Rewrite - ranges into a comparison operation.
-	c = rewriteRange(c)
-
-	ors := strings.Split(c, "||")
-	or := make([][]*constraint, len(ors))
-	for k, v := range ors {
-		cs := strings.Split(v, ",")
-		result := make([]*constraint, len(cs))
-		for i, s := range cs {
-			pc, err := parseConstraint(s)
-			if err != nil {
-				return nil, err
-			}
-
-			result[i] = pc
-		}
-		or[k] = result
-	}
-
-	o := &Constraints{constraints: or}
-	return o, nil
-}
-
-// Check tests if a version satisfies the constraints.
-func (cs Constraints) Check(v *Version) bool {
-	// loop over the ORs and check the inner ANDs
-	for _, o := range cs.constraints {
-		joy := true
-		for _, c := range o {
-			if !c.check(v) {
-				joy = false
-				break
-			}
-		}
-
-		if joy {
-			return true
-		}
-	}
-
-	return false
-}
-
-// Validate checks if a version satisfies a constraint. If not a slice of
-// reasons for the failure are returned in addition to a bool.
-func (cs Constraints) Validate(v *Version) (bool, []error) {
-	// loop over the ORs and check the inner ANDs
-	var e []error
-	for _, o := range cs.constraints {
-		joy := true
-		for _, c := range o {
-			if !c.check(v) {
-				em := fmt.Errorf(c.msg, v, c.orig)
-				e = append(e, em)
-				joy = false
-			}
-		}
-
-		if joy {
-			return true, []error{}
-		}
-	}
-
-	return false, e
-}
-
-var constraintOps map[string]cfunc
-var constraintMsg map[string]string
 var constraintRegex *regexp.Regexp
+var constraintRangeRegex *regexp.Regexp
+
+const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
+	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
 
 func init() {
-	constraintOps = map[string]cfunc{
-		"":   constraintTildeOrEqual,
-		"=":  constraintTildeOrEqual,
-		"!=": constraintNotEqual,
-		">":  constraintGreaterThan,
-		"<":  constraintLessThan,
-		">=": constraintGreaterThanEqual,
-		"=>": constraintGreaterThanEqual,
-		"<=": constraintLessThanEqual,
-		"=<": constraintLessThanEqual,
-		"~":  constraintTilde,
-		"~>": constraintTilde,
-		"^":  constraintCaret,
-	}
-
-	constraintMsg = map[string]string{
-		"":   "%s is not equal to %s",
-		"=":  "%s is not equal to %s",
-		"!=": "%s is equal to %s",
-		">":  "%s is less than or equal to %s",
-		"<":  "%s is greater than or equal to %s",
-		">=": "%s is less than %s",
-		"=>": "%s is less than %s",
-		"<=": "%s is greater than %s",
-		"=<": "%s is greater than %s",
-		"~":  "%s does not have same major and minor version as %s",
-		"~>": "%s does not have same major and minor version as %s",
-		"^":  "%s does not have same major version as %s",
+	constraintOps := []string{
+		"",
+		"=",
+		"!=",
+		">",
+		"<",
+		">=",
+		"=>",
+		"<=",
+		"=<",
+		"~",
+		"~>",
+		"^",
 	}
 
 	ops := make([]string, 0, len(constraintOps))
-	for k := range constraintOps {
-		ops = append(ops, regexp.QuoteMeta(k))
+	for _, op := range constraintOps {
+		ops = append(ops, regexp.QuoteMeta(op))
 	}
 
 	constraintRegex = regexp.MustCompile(fmt.Sprintf(
@@ -133,210 +46,250 @@
 		cvRegex, cvRegex))
 }
 
-// An individual constraint
-type constraint struct {
-	// The callback function for the restraint. It performs the logic for
-	// the constraint.
-	function cfunc
+type Constraint interface {
+	// Constraints compose the fmt.Stringer interface. Printing a constraint
+	// will yield a string that, if passed to NewConstraint(), will produce the
+	// original constraint. (Bidirectional serialization)
+	fmt.Stringer
 
-	msg string
+	// Matches checks that a version satisfies the constraint. If it does not,
+	// an error is returned indcating the problem; if it does, the error is nil.
+	Matches(v *Version) error
 
-	// The version used in the constraint check. For example, if a constraint
-	// is '<= 2.0.0' the con a version instance representing 2.0.0.
-	con *Version
+	// Intersect computes the intersection between the receiving Constraint and
+	// passed Constraint, and returns a new Constraint representing the result.
+	Intersect(Constraint) Constraint
 
-	// The original parsed version (e.g., 4.x from != 4.x)
-	orig string
+	// Union computes the union between the receiving Constraint and the passed
+	// Constraint, and returns a new Constraint representing the result.
+	Union(Constraint) Constraint
 
-	// When an x is used as part of the version (e.g., 1.x)
-	minorDirty bool
-	dirty      bool
+	// MatchesAny returns a bool indicating whether there exists any version that
+	// satisfies both the receiver constraint, and the passed Constraint.
+	//
+	// In other words, this reports whether an intersection would be non-empty.
+	MatchesAny(Constraint) bool
+
+	// Restrict implementation of this interface to this package. We need the
+	// flexibility of an interface, but we cover all possibilities here; closing
+	// off the interface to external implementation lets us safely do tricks
+	// with types for magic types (none and any)
+	_private()
 }
 
-// Check if a version meets the constraint
-func (c *constraint) check(v *Version) bool {
-	return c.function(v, c)
+// realConstraint is used internally to differentiate between any, none, and
+// unionConstraints, vs. Version and rangeConstraints.
+type realConstraint interface {
+	Constraint
+	_real()
 }
 
-type cfunc func(v *Version, c *constraint) bool
+// Controls whether or not parsed constraints are cached
+var CacheConstraints = true
+var constraintCache = make(map[string]ccache)
+var constraintCacheLock sync.RWMutex
 
-func parseConstraint(c string) (*constraint, error) {
-	m := constraintRegex.FindStringSubmatch(c)
-	if m == nil {
-		return nil, fmt.Errorf("improper constraint: %s", c)
-	}
-
-	ver := m[2]
-	orig := ver
-	minorDirty := false
-	dirty := false
-	if isX(m[3]) {
-		ver = "0.0.0"
-		dirty = true
-	} else if isX(strings.TrimPrefix(m[4], ".")) {
-		minorDirty = true
-		dirty = true
-		ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
-	} else if isX(strings.TrimPrefix(m[5], ".")) {
-		dirty = true
-		ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
-	}
-
-	con, err := NewVersion(ver)
-	if err != nil {
-
-		// The constraintRegex should catch any regex parsing errors. So,
-		// we should never get here.
-		return nil, errors.New("constraint Parser Error")
-	}
-
-	cs := &constraint{
-		function:   constraintOps[m[1]],
-		msg:        constraintMsg[m[1]],
-		con:        con,
-		orig:       orig,
-		minorDirty: minorDirty,
-		dirty:      dirty,
-	}
-	return cs, nil
+type ccache struct {
+	c   Constraint
+	err error
 }
 
-// Constraint functions
-func constraintNotEqual(v *Version, c *constraint) bool {
-	if c.dirty {
-		if c.con.Major() != v.Major() {
-			return true
+// NewConstraint takes a string representing a set of semver constraints, and
+// returns a corresponding Constraint object. Constraints are suitable
+// for checking Versions for admissibility, or combining with other Constraint
+// objects.
+//
+// If an invalid constraint string is passed, more information is provided in
+// the returned error string.
+func NewConstraint(in string) (Constraint, error) {
+	if CacheConstraints {
+		constraintCacheLock.RLock()
+		if final, exists := constraintCache[in]; exists {
+			constraintCacheLock.RUnlock()
+			return final.c, final.err
 		}
-		if c.con.Minor() != v.Minor() && !c.minorDirty {
-			return true
-		} else if c.minorDirty {
-			return false
+		constraintCacheLock.RUnlock()
+	}
+
+	// Rewrite - ranges into a comparison operation.
+	c := rewriteRange(in)
+
+	ors := strings.Split(c, "||")
+	or := make([]Constraint, len(ors))
+	for k, v := range ors {
+		cs := strings.Split(v, ",")
+		result := make([]Constraint, len(cs))
+		for i, s := range cs {
+			pc, err := parseConstraint(s)
+			if err != nil {
+				if CacheConstraints {
+					constraintCacheLock.Lock()
+					constraintCache[in] = ccache{err: err}
+					constraintCacheLock.Unlock()
+				}
+				return nil, err
+			}
+
+			result[i] = pc
+		}
+		or[k] = Intersection(result...)
+	}
+
+	final := Union(or...)
+
+	if CacheConstraints {
+		constraintCacheLock.Lock()
+		constraintCache[in] = ccache{c: final}
+		constraintCacheLock.Unlock()
+	}
+
+	return final, nil
+}
+
+// Intersection computes the intersection between N Constraints, returning as
+// compact a representation of the intersection as possible.
+//
+// No error is indicated if all the sets are collectively disjoint; you must inspect the
+// return value to see if the result is the empty set (by calling IsNone() on
+// it).
+func Intersection(cg ...Constraint) Constraint {
+	// If there's zero or one constraints in the group, we can quit fast
+	switch len(cg) {
+	case 0:
+		// Zero members, only sane thing to do is return none
+		return None()
+	case 1:
+		// Just one member means that's our final constraint
+		return cg[0]
+	}
+
+	car, cdr := cg[0], cg[1:]
+	for _, c := range cdr {
+		if IsNone(car) {
+			return None()
+		}
+		car = car.Intersect(c)
+	}
+
+	return car
+}
+
+// Union takes a variable number of constraints, and returns the most compact
+// possible representation of those constraints.
+//
+// This effectively ORs together all the provided constraints. If any of the
+// included constraints are the set of all versions (any), that supercedes
+// everything else.
+func Union(cg ...Constraint) Constraint {
+	// If there's zero or one constraints in the group, we can quit fast
+	switch len(cg) {
+	case 0:
+		// Zero members, only sane thing to do is return none
+		return None()
+	case 1:
+		// One member, so the result will just be that
+		return cg[0]
+	}
+
+	// Preliminary pass to look for 'any' in the current set (and bail out early
+	// if found), but also construct a []realConstraint for everything else
+	var real constraintList
+
+	for _, c := range cg {
+		switch tc := c.(type) {
+		case any:
+			return c
+		case none:
+			continue
+		case *Version:
+			//if tc != nil {
+			//heap.Push(&real, tc)
+			//}
+			real = append(real, tc)
+		case rangeConstraint:
+			//heap.Push(&real, tc)
+			real = append(real, tc)
+		case unionConstraint:
+			real = append(real, tc...)
+			//for _, c2 := range tc {
+			//heap.Push(&real, c2)
+			//}
+		default:
+			panic("unknown constraint type")
+		}
+	}
+	// TODO wtf why isn't heap working...so, ugh, have to do this
+
+	// Sort both the versions and ranges into ascending order
+	sort.Sort(real)
+
+	// Iteratively merge the constraintList elements
+	var nuc unionConstraint
+	for _, c := range real {
+		if len(nuc) == 0 {
+			nuc = append(nuc, c)
+			continue
 		}
 
-		return false
+		last := nuc[len(nuc)-1]
+		switch lt := last.(type) {
+		case *Version:
+			switch ct := c.(type) {
+			case *Version:
+				// Two versions in a row; only append if they're not equal
+				if !lt.Equal(ct) {
+					nuc = append(nuc, ct)
+				}
+			case rangeConstraint:
+				// Last was version, current is range. constraintList sorts by
+				// min version, so it's guaranteed that the version will be less
+				// than the range's min, guaranteeing that these are disjoint.
+				//
+				// ...almost. If the min of the range is the same as the
+				// version, then a union should merge the two by making the
+				// range inclusive at the bottom.
+				if lt.Equal(ct.min) {
+					ct.includeMin = true
+					nuc[len(nuc)-1] = ct
+				} else {
+					nuc = append(nuc, c)
+				}
+			}
+		case rangeConstraint:
+			switch ct := c.(type) {
+			case *Version:
+				// Last was range, current is version. constraintList sort invariants guarantee
+				// that the version will be greater than the min, so we have to
+				// determine if the version is less than the max. If it is, we
+				// subsume it into the range with a Union call.
+				//
+				// Lazy version: just union them and let rangeConstraint figure
+				// it out, then switch on the result type.
+				c2 := lt.Union(ct)
+				if crc, ok := c2.(realConstraint); ok {
+					nuc[len(nuc)-1] = crc
+				} else {
+					// Otherwise, all it can be is a union constraint. First
+					// item in the union will be the same range, second will be the
+					// version, so append onto nuc from one back from the end
+					nuc = append(nuc[:len(nuc)-1], c2.(unionConstraint)...)
+				}
+			case rangeConstraint:
+				if lt.MatchesAny(ct) || areAdjacent(lt, ct) {
+					// If the previous range overlaps or is adjacent to the
+					// current range, we know they'll be able to merge together,
+					// so overwrite the last item in nuc with the result of that
+					// merge (which is what Union will produce)
+					nuc[len(nuc)-1] = lt.Union(ct).(realConstraint)
+				} else {
+					nuc = append(nuc, c)
+				}
+			}
+		}
 	}
 
-	return !v.Equal(c.con)
-}
-
-func constraintGreaterThan(v *Version, c *constraint) bool {
-	return v.Compare(c.con) == 1
-}
-
-func constraintLessThan(v *Version, c *constraint) bool {
-	if !c.dirty {
-		return v.Compare(c.con) < 0
+	if len(nuc) == 1 {
+		return nuc[0]
 	}
-
-	if v.Major() > c.con.Major() {
-		return false
-	} else if v.Minor() > c.con.Minor() && !c.minorDirty {
-		return false
-	}
-
-	return true
-}
-
-func constraintGreaterThanEqual(v *Version, c *constraint) bool {
-	return v.Compare(c.con) >= 0
-}
-
-func constraintLessThanEqual(v *Version, c *constraint) bool {
-	if !c.dirty {
-		return v.Compare(c.con) <= 0
-	}
-
-	if v.Major() > c.con.Major() {
-		return false
-	} else if v.Minor() > c.con.Minor() && !c.minorDirty {
-		return false
-	}
-
-	return true
-}
-
-// ~*, ~>* --> >= 0.0.0 (any)
-// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
-// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
-// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
-// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
-// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
-func constraintTilde(v *Version, c *constraint) bool {
-	if v.LessThan(c.con) {
-		return false
-	}
-
-	// ~0.0.0 is a special case where all constraints are accepted. It's
-	// equivalent to >= 0.0.0.
-	if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 {
-		return true
-	}
-
-	if v.Major() != c.con.Major() {
-		return false
-	}
-
-	if v.Minor() != c.con.Minor() && !c.minorDirty {
-		return false
-	}
-
-	return true
-}
-
-// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
-// it's a straight =
-func constraintTildeOrEqual(v *Version, c *constraint) bool {
-	if c.dirty {
-		c.msg = constraintMsg["~"]
-		return constraintTilde(v, c)
-	}
-
-	return v.Equal(c.con)
-}
-
-// ^* --> (any)
-// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0
-// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0
-// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0
-// ^1.2.3 --> >=1.2.3, <2.0.0
-// ^1.2.0 --> >=1.2.0, <2.0.0
-func constraintCaret(v *Version, c *constraint) bool {
-	if v.LessThan(c.con) {
-		return false
-	}
-
-	if v.Major() != c.con.Major() {
-		return false
-	}
-
-	return true
-}
-
-var constraintRangeRegex *regexp.Regexp
-
-const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
-	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
-	`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
-
-func isX(x string) bool {
-	switch x {
-	case "x", "*", "X":
-		return true
-	default:
-		return false
-	}
-}
-
-func rewriteRange(i string) string {
-	m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
-	if m == nil {
-		return i
-	}
-	o := i
-	for _, v := range m {
-		t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
-		o = strings.Replace(o, v[0], t, 1)
-	}
-
-	return o
+	return nuc
 }
diff --git a/vendor/github.com/Masterminds/semver/constraints_test.go b/vendor/github.com/Masterminds/semver/constraints_test.go
index 6dad455..6b09d73 100644
--- a/vendor/github.com/Masterminds/semver/constraints_test.go
+++ b/vendor/github.com/Masterminds/semver/constraints_test.go
@@ -1,27 +1,52 @@
 package semver
 
-import (
-	"reflect"
-	"testing"
-)
+import "testing"
 
 func TestParseConstraint(t *testing.T) {
 	tests := []struct {
 		in  string
-		f   cfunc
-		v   string
+		c   Constraint
 		err bool
 	}{
-		{">= 1.2", constraintGreaterThanEqual, "1.2.0", false},
-		{"1.0", constraintTildeOrEqual, "1.0.0", false},
-		{"foo", nil, "", true},
-		{"<= 1.2", constraintLessThanEqual, "1.2.0", false},
-		{"=< 1.2", constraintLessThanEqual, "1.2.0", false},
-		{"=> 1.2", constraintGreaterThanEqual, "1.2.0", false},
-		{"v1.2", constraintTildeOrEqual, "1.2.0", false},
-		{"=1.5", constraintTildeOrEqual, "1.5.0", false},
-		{"> 1.3", constraintGreaterThan, "1.3.0", false},
-		{"< 1.4.1", constraintLessThan, "1.4.1", false},
+		{"*", Any(), false},
+		{">= 1.2", rangeConstraint{
+			min:        newV(1, 2, 0),
+			includeMin: true,
+		}, false},
+		{"1.0", newV(1, 0, 0), false},
+		{"foo", nil, true},
+		{"<= 1.2", rangeConstraint{
+			max:        newV(1, 2, 0),
+			includeMax: true,
+		}, false},
+		{"=< 1.2", rangeConstraint{
+			max:        newV(1, 2, 0),
+			includeMax: true,
+		}, false},
+		{"=> 1.2", rangeConstraint{
+			min:        newV(1, 2, 0),
+			includeMin: true,
+		}, false},
+		{"v1.2", newV(1, 2, 0), false},
+		{"=1.5", newV(1, 5, 0), false},
+		{"> 1.3", rangeConstraint{
+			min: newV(1, 3, 0),
+		}, false},
+		{"< 1.4.1", rangeConstraint{
+			max: newV(1, 4, 1),
+		}, false},
+		{"~1.1.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			max:        newV(1, 2, 0),
+			includeMin: true,
+			includeMax: false,
+		}, false},
+		{"^1.1.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+			includeMax: false,
+		}, false},
 	}
 
 	for _, tc := range tests {
@@ -29,7 +54,7 @@
 		if tc.err && err == nil {
 			t.Errorf("Expected error for %s didn't occur", tc.in)
 		} else if !tc.err && err != nil {
-			t.Errorf("Unexpected error for %s", tc.in)
+			t.Errorf("Unexpected error %q for %s", err, tc.in)
 		}
 
 		// If an error was expected continue the loop and don't try the other
@@ -38,15 +63,84 @@
 			continue
 		}
 
-		if tc.v != c.con.String() {
+		if !constraintEq(tc.c, c) {
 			t.Errorf("Incorrect version found on %s", tc.in)
 		}
+	}
+}
 
-		f1 := reflect.ValueOf(tc.f)
-		f2 := reflect.ValueOf(c.function)
-		if f1 != f2 {
-			t.Errorf("Wrong constraint found for %s", tc.in)
+func constraintEq(c1, c2 Constraint) bool {
+	switch tc1 := c1.(type) {
+	case any:
+		if _, ok := c2.(any); !ok {
+			return false
 		}
+		return true
+	case none:
+		if _, ok := c2.(none); !ok {
+			return false
+		}
+		return true
+	case *Version:
+		if tc2, ok := c2.(*Version); ok {
+			return tc1.Equal(tc2)
+		}
+		return false
+	case rangeConstraint:
+		if tc2, ok := c2.(rangeConstraint); ok {
+			if len(tc1.excl) != len(tc2.excl) {
+				return false
+			}
+
+			if tc1.min != nil {
+				if !(tc1.includeMin == tc2.includeMin && tc1.min.Equal(tc2.min)) {
+					return false
+				}
+			} else if tc2.min != nil {
+				return false
+			}
+
+			if tc1.max != nil {
+				if !(tc1.includeMax == tc2.includeMax && tc1.max.Equal(tc2.max)) {
+					return false
+				}
+			} else if tc2.max != nil {
+				return false
+			}
+
+			for k, e := range tc1.excl {
+				if !e.Equal(tc2.excl[k]) {
+					return false
+				}
+			}
+			return true
+		}
+		return false
+	case unionConstraint:
+		if tc2, ok := c2.(unionConstraint); ok {
+			if len(tc1) != len(tc2) {
+				return false
+			}
+
+			for k, c := range tc1 {
+				if !constraintEq(c, tc2[k]) {
+					return false
+				}
+			}
+			return true
+		}
+		return false
+	}
+
+	panic("unknown type")
+}
+
+// newV is a helper to create a new Version object.
+func newV(major, minor, patch int64) *Version {
+	return &Version{
+		major: major,
+		minor: minor,
+		patch: patch,
 	}
 }
 
@@ -72,9 +166,28 @@
 		{"<=1.1", "0.1.0", true},
 		{"<=1.1", "1.1.0", true},
 		{"<=1.1", "1.1.1", false},
+		{"<=1.1-alpha1", "1.1", false},
+		{"<=2.x", "3.0.0", false},
+		{"<=2.x", "2.9.9", true},
+		{"<2.x", "2.0.0", false},
+		{"<2.x", "1.9.9", true},
+		{">=2.x", "3.0.0", true},
+		{">=2.x", "2.9.9", true},
+		{">=2.x", "1.9.9", false},
+		{">2.x", "3.0.0", true},
+		{">2.x", "2.9.9", false},
+		{">2.x", "1.9.9", false},
+		// TODO these are all pending the changes in #10
+		//{"<=2.x-beta1", "3.0.0-alpha2", false},
+		//{">2.x-beta1", "3.0.0-alpha2", true},
+		//{"<2.0.0", "2.0.0-alpha1", false},
+		//{"<=2.0.0", "2.0.0-alpha1", true},
 	}
 
 	for _, tc := range tests {
+		if testing.Verbose() {
+			t.Logf("Testing if %q allows %q", tc.constraint, tc.version)
+		}
 		c, err := parseConstraint(tc.constraint)
 		if err != nil {
 			t.Errorf("err: %s", err)
@@ -87,9 +200,13 @@
 			continue
 		}
 
-		a := c.check(v)
+		a := c.Matches(v) == nil
 		if a != tc.check {
-			t.Errorf("Constraint '%s' failing", tc.constraint)
+			if tc.check {
+				t.Errorf("%q should have matched %q", tc.constraint, tc.version)
+			} else {
+				t.Errorf("%q should not have matched %q", tc.constraint, tc.version)
+			}
 		}
 	}
 }
@@ -97,22 +214,74 @@
 func TestNewConstraint(t *testing.T) {
 	tests := []struct {
 		input string
-		ors   int
-		count int
+		c     Constraint
 		err   bool
 	}{
-		{">= 1.1", 1, 1, false},
-		{"2.0", 1, 1, false},
-		{">= bar", 0, 0, true},
-		{">= 1.2.3, < 2.0", 1, 2, false},
-		{">= 1.2.3, < 2.0 || => 3.0, < 4", 2, 2, false},
-
-		// The 3-4 should be broken into 2 by the range rewriting
-		{"3-4 || => 3.0, < 4", 2, 2, false},
+		{">= 1.1", rangeConstraint{
+			min:        newV(1, 1, 0),
+			includeMin: true,
+		}, false},
+		{"2.0", newV(2, 0, 0), false},
+		{">= bar", nil, true},
+		{"^1.1.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+		}, false},
+		{">= 1.2.3, < 2.0 || => 3.0, < 4", unionConstraint{
+			rangeConstraint{
+				min:        newV(1, 2, 3),
+				max:        newV(2, 0, 0),
+				includeMin: true,
+			},
+			rangeConstraint{
+				min:        newV(3, 0, 0),
+				max:        newV(4, 0, 0),
+				includeMin: true,
+			},
+		}, false},
+		{"3-4 || => 1.0, < 2", Union(
+			rangeConstraint{
+				min:        newV(3, 0, 0),
+				max:        newV(4, 0, 0),
+				includeMin: true,
+				includeMax: true,
+			},
+			rangeConstraint{
+				min:        newV(1, 0, 0),
+				max:        newV(2, 0, 0),
+				includeMin: true,
+			},
+		), false},
+		// demonstrates union compression
+		{"3-4 || => 3.0, < 4", rangeConstraint{
+			min:        newV(3, 0, 0),
+			max:        newV(4, 0, 0),
+			includeMin: true,
+			includeMax: true,
+		}, false},
+		{">=1.1.0, <2.0.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+			includeMax: false,
+		}, false},
+		{"!=1.4.0", rangeConstraint{
+			excl: []*Version{
+				newV(1, 4, 0),
+			},
+		}, false},
+		{">=1.1.0, !=1.4.0", rangeConstraint{
+			min:        newV(1, 1, 0),
+			includeMin: true,
+			excl: []*Version{
+				newV(1, 4, 0),
+			},
+		}, false},
 	}
 
 	for _, tc := range tests {
-		v, err := NewConstraint(tc.input)
+		c, err := NewConstraint(tc.input)
 		if tc.err && err == nil {
 			t.Errorf("expected but did not get error for: %s", tc.input)
 			continue
@@ -124,16 +293,8 @@
 			continue
 		}
 
-		l := len(v.constraints)
-		if tc.ors != l {
-			t.Errorf("Expected %s to have %d ORs but got %d",
-				tc.input, tc.ors, l)
-		}
-
-		l = len(v.constraints[0])
-		if tc.count != l {
-			t.Errorf("Expected %s to have %d constraints but got %d",
-				tc.input, tc.count, l)
+		if !constraintEq(tc.c, c) {
+			t.Errorf("%q produced constraint %q, but expected %q", tc.input, c, tc.c)
 		}
 	}
 }
@@ -145,7 +306,9 @@
 		check      bool
 	}{
 		{"*", "1.2.3", true},
-		{"~0.0.0", "1.2.3", true},
+		{"~0.0.0", "1.2.3", false}, // npm allows this weird thing, but we don't
+		{"~0.0.0", "0.1.9", false},
+		{"~0.0.0", "0.0.9", true},
 		{"= 2.0", "1.2.3", false},
 		{"= 2.0", "2.0.0", true},
 		{"4.1", "4.1.0", true},
@@ -162,10 +325,12 @@
 		{"<1.1", "0.1.0", true},
 		{"<1.1", "1.1.0", false},
 		{"<1.1", "1.1.1", false},
-		{"<1.x", "1.1.1", true},
+		{"<1.x", "1.1.1", false},
+		{"<1.x", "0.9.1", true},
 		{"<1.x", "2.1.1", false},
 		{"<1.1.x", "1.2.1", false},
-		{"<1.1.x", "1.1.500", true},
+		{"<1.1.x", "1.1.500", false},
+		{"<1.1.x", "1.0.500", true},
 		{"<1.2.x", "1.1.1", true},
 		{">=1.1", "4.1.0", true},
 		{">=1.1", "1.1.0", true},
@@ -215,9 +380,52 @@
 			continue
 		}
 
-		a := c.Check(v)
+		a := c.Matches(v) == nil
 		if a != tc.check {
-			t.Errorf("Constraint '%s' failing with '%s'", tc.constraint, tc.version)
+			if a {
+				t.Errorf("Input %q produced constraint %q; should not have admitted %q, but did", tc.constraint, c, tc.version)
+			} else {
+				t.Errorf("Input %q produced constraint %q; should have admitted %q, but did not", tc.constraint, c, tc.version)
+			}
+		}
+	}
+}
+
+func TestBidirectionalSerialization(t *testing.T) {
+	tests := []struct {
+		io string
+		eq bool
+	}{
+		{"*", true},         // any
+		{"~0.0.0", false},   // tildes expand into ranges
+		{"^2.0", false},     // carets expand into ranges
+		{"=2.0", false},     // abbreviated versions print as full
+		{"4.1.x", false},    // wildcards expand into ranges
+		{">= 1.1.0", false}, // does not produce spaces on ranges
+		{"4.1.0", true},
+		{"!=4.1.0", true},
+		{">=1.1.0", true},
+		{">=1.1.0, <2.0.0", true},
+		{">1.0.0, <=1.1.0", true},
+		{"<=1.1.0", true},
+		{">=1.1.0, <2.0.0, !=1.2.3", true},
+		{">=1.1.0, <2.0.0, !=1.2.3 || >3.0.0", true},
+		{">=1.1.0, <2.0.0, !=1.2.3 || >=3.0.0", true},
+	}
+
+	for _, fix := range tests {
+		c, err := NewConstraint(fix.io)
+		if err != nil {
+			t.Errorf("Valid constraint string produced unexpected error: %s", err)
+		}
+
+		eq := fix.io == c.String()
+		if eq != fix.eq {
+			if eq {
+				t.Errorf("Constraint %q should not have reproduced input string %q, but did", c, fix.io)
+			} else {
+				t.Errorf("Constraint should have reproduced input string %q, but instead produced %q", fix.io, c)
+			}
 		}
 	}
 }
@@ -261,168 +469,119 @@
 	}
 }
 
-func TestConstraintsValidate(t *testing.T) {
-	tests := []struct {
-		constraint string
-		version    string
-		check      bool
-	}{
-		{"*", "1.2.3", true},
-		{"~0.0.0", "1.2.3", true},
-		{"= 2.0", "1.2.3", false},
-		{"= 2.0", "2.0.0", true},
-		{"4.1", "4.1.0", true},
-		{"4.1.x", "4.1.3", true},
-		{"1.x", "1.4", true},
-		{"!=4.1", "4.1.0", false},
-		{"!=4.1", "5.1.0", true},
-		{"!=4.x", "5.1.0", true},
-		{"!=4.x", "4.1.0", false},
-		{"!=4.1.x", "4.2.0", true},
-		{"!=4.2.x", "4.2.3", false},
-		{">1.1", "4.1.0", true},
-		{">1.1", "1.1.0", false},
-		{"<1.1", "0.1.0", true},
-		{"<1.1", "1.1.0", false},
-		{"<1.1", "1.1.1", false},
-		{"<1.x", "1.1.1", true},
-		{"<1.x", "2.1.1", false},
-		{"<1.1.x", "1.2.1", false},
-		{"<1.1.x", "1.1.500", true},
-		{"<1.2.x", "1.1.1", true},
-		{">=1.1", "4.1.0", true},
-		{">=1.1", "1.1.0", true},
-		{">=1.1", "0.0.9", false},
-		{"<=1.1", "0.1.0", true},
-		{"<=1.1", "1.1.0", true},
-		{"<=1.x", "1.1.0", true},
-		{"<=2.x", "3.1.0", false},
-		{"<=1.1", "1.1.1", false},
-		{"<=1.1.x", "1.2.500", false},
-		{">1.1, <2", "1.1.1", true},
-		{">1.1, <3", "4.3.2", false},
-		{">=1.1, <2, !=1.2.3", "1.2.3", false},
-		{">=1.1, <2, !=1.2.3 || > 3", "3.1.2", true},
-		{">=1.1, <2, !=1.2.3 || >= 3", "3.0.0", true},
-		{">=1.1, <2, !=1.2.3 || > 3", "3.0.0", false},
-		{">=1.1, <2, !=1.2.3 || > 3", "1.2.3", false},
-		{"1.1 - 2", "1.1.1", true},
-		{"1.1-3", "4.3.2", false},
-		{"^1.1", "1.1.1", true},
-		{"^1.1", "4.3.2", false},
-		{"^1.x", "1.1.1", true},
-		{"^2.x", "1.1.1", false},
-		{"^1.x", "2.1.1", false},
-		{"~*", "2.1.1", true},
-		{"~1.x", "2.1.1", false},
-		{"~1.x", "1.3.5", true},
-		{"~1.x", "1.4", true},
-		{"~1.1", "1.1.1", true},
-		{"~1.2.3", "1.2.5", true},
-		{"~1.2.3", "1.2.2", false},
-		{"~1.2.3", "1.3.2", false},
-		{"~1.1", "1.2.3", false},
-		{"~1.3", "2.4.5", false},
+func TestUnionErr(t *testing.T) {
+	u1 := Union(
+		rangeConstraint{
+			min:        newV(3, 0, 0),
+			max:        newV(4, 0, 0),
+			includeMin: true,
+			includeMax: true,
+		},
+		rangeConstraint{
+			min:        newV(1, 0, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+		},
+	)
+	fail := u1.Matches(newV(2, 5, 0))
+	failstr := `2.5.0 is greater than or equal to the maximum of >=1.0.0, <2.0.0
+2.5.0 is less than the minimum of >=3.0.0, <=4.0.0`
+	if fail.Error() != failstr {
+		t.Errorf("Did not get expected failure message from union, got %q", fail)
+	}
+}
+
+func TestIsSuperset(t *testing.T) {
+	rc := []rangeConstraint{
+		rangeConstraint{
+			min:        newV(1, 2, 0),
+			max:        newV(2, 0, 0),
+			includeMin: true,
+		},
+		rangeConstraint{
+			min: newV(1, 2, 0),
+			max: newV(2, 1, 0),
+		},
+		rangeConstraint{
+			max: newV(1, 10, 0),
+		},
+		rangeConstraint{
+			min: newV(2, 0, 0),
+		},
+		rangeConstraint{
+			min:        newV(1, 2, 0),
+			max:        newV(2, 0, 0),
+			includeMax: true,
+		},
 	}
 
-	for _, tc := range tests {
-		c, err := NewConstraint(tc.constraint)
-		if err != nil {
-			t.Errorf("err: %s", err)
-			continue
+	for _, c := range rc {
+
+		// Superset comparison is not strict, so a range should always be a superset
+		// of itself.
+		if !c.isSupersetOf(c) {
+			t.Errorf("Ranges should be supersets of themselves; %s indicated it was not", c)
 		}
+	}
 
-		v, err := NewVersion(tc.version)
-		if err != nil {
-			t.Errorf("err: %s", err)
-			continue
+	pairs := []struct{ l, r rangeConstraint }{
+		{
+			// ensures lte is handled correctly (min side)
+			l: rc[0],
+			r: rc[1],
+		},
+		{
+			// ensures nil on min side works well
+			l: rc[0],
+			r: rc[2],
+		},
+		{
+			// ensures nil on max side works well
+			l: rc[0],
+			r: rc[3],
+		},
+		{
+			// ensures nils on both sides work well
+			l: rc[2],
+			r: rc[3],
+		},
+		{
+			// ensures gte is handled correctly (max side)
+			l: rc[2],
+			r: rc[4],
+		},
+	}
+
+	for _, p := range pairs {
+		if p.l.isSupersetOf(p.r) {
+			t.Errorf("%s is not a superset of %s", p.l, p.r)
 		}
-
-		a, msgs := c.Validate(v)
-		if a != tc.check {
-			t.Errorf("Constraint '%s' failing with '%s'", tc.constraint, tc.version)
-		} else if a == false && len(msgs) == 0 {
-			t.Errorf("%q failed with %q but no errors returned", tc.constraint, tc.version)
+		if p.r.isSupersetOf(p.l) {
+			t.Errorf("%s is not a superset of %s", p.r, p.l)
 		}
-
-		// if a == false {
-		// 	for _, m := range msgs {
-		// 		t.Errorf("%s", m)
-		// 	}
-		// }
 	}
 
-	v, err := NewVersion("1.2.3")
-	if err != nil {
-		t.Errorf("err: %s", err)
+	rc[1].max.minor = 0
+
+	if !rc[0].isSupersetOf(rc[1]) {
+		t.Errorf("%s is a superset of %s", rc[0], rc[1])
+	}
+	rc[1].includeMax = true
+	if rc[1].isSupersetOf(rc[0]) {
+		t.Errorf("%s is not a superset of %s", rc[1], rc[0])
+	}
+	rc[0].includeMin = false
+	if !rc[1].isSupersetOf(rc[0]) {
+		t.Errorf("%s is a superset of %s", rc[1], rc[0])
 	}
 
-	c, err := NewConstraint("!= 1.2.5, ^2, <= 1.1.x")
-	if err != nil {
-		t.Errorf("err: %s", err)
+	// isSupersetOf ignores excludes, so even though this would make rc[1] not a
+	// superset of rc[0] anymore, it should still say it is.
+	rc[1].excl = []*Version{
+		newV(1, 5, 0),
 	}
 
-	_, msgs := c.Validate(v)
-	if len(msgs) != 2 {
-		t.Error("Invalid number of validations found")
-	}
-	e := msgs[0].Error()
-	if e != "1.2.3 does not have same major version as 2" {
-		t.Error("Did not get expected message: 1.2.3 does not have same major version as 2")
-	}
-	e = msgs[1].Error()
-	if e != "1.2.3 is greater than 1.1.x" {
-		t.Error("Did not get expected message: 1.2.3 is greater than 1.1.x")
-	}
-
-	tests2 := []struct {
-		constraint, version, msg string
-	}{
-		{"= 2.0", "1.2.3", "1.2.3 is not equal to 2.0"},
-		{"!=4.1", "4.1.0", "4.1.0 is equal to 4.1"},
-		{"!=4.x", "4.1.0", "4.1.0 is equal to 4.x"},
-		{"!=4.2.x", "4.2.3", "4.2.3 is equal to 4.2.x"},
-		{">1.1", "1.1.0", "1.1.0 is less than or equal to 1.1"},
-		{"<1.1", "1.1.0", "1.1.0 is greater than or equal to 1.1"},
-		{"<1.1", "1.1.1", "1.1.1 is greater than or equal to 1.1"},
-		{"<1.x", "2.1.1", "2.1.1 is greater than or equal to 1.x"},
-		{"<1.1.x", "1.2.1", "1.2.1 is greater than or equal to 1.1.x"},
-		{">=1.1", "0.0.9", "0.0.9 is less than 1.1"},
-		{"<=2.x", "3.1.0", "3.1.0 is greater than 2.x"},
-		{"<=1.1", "1.1.1", "1.1.1 is greater than 1.1"},
-		{"<=1.1.x", "1.2.500", "1.2.500 is greater than 1.1.x"},
-		{">1.1, <3", "4.3.2", "4.3.2 is greater than or equal to 3"},
-		{">=1.1, <2, !=1.2.3", "1.2.3", "1.2.3 is equal to 1.2.3"},
-		{">=1.1, <2, !=1.2.3 || > 3", "3.0.0", "3.0.0 is greater than or equal to 2"},
-		{">=1.1, <2, !=1.2.3 || > 3", "1.2.3", "1.2.3 is equal to 1.2.3"},
-		{"1.1-3", "4.3.2", "4.3.2 is greater than 3"},
-		{"^1.1", "4.3.2", "4.3.2 does not have same major version as 1.1"},
-		{"^2.x", "1.1.1", "1.1.1 does not have same major version as 2.x"},
-		{"^1.x", "2.1.1", "2.1.1 does not have same major version as 1.x"},
-		{"~1.x", "2.1.1", "2.1.1 does not have same major and minor version as 1.x"},
-		{"~1.2.3", "1.2.2", "1.2.2 does not have same major and minor version as 1.2.3"},
-		{"~1.2.3", "1.3.2", "1.3.2 does not have same major and minor version as 1.2.3"},
-		{"~1.1", "1.2.3", "1.2.3 does not have same major and minor version as 1.1"},
-		{"~1.3", "2.4.5", "2.4.5 does not have same major and minor version as 1.3"},
-	}
-
-	for _, tc := range tests2 {
-		c, err := NewConstraint(tc.constraint)
-		if err != nil {
-			t.Errorf("err: %s", err)
-			continue
-		}
-
-		v, err := NewVersion(tc.version)
-		if err != nil {
-			t.Errorf("err: %s", err)
-			continue
-		}
-
-		_, msgs := c.Validate(v)
-		e := msgs[0].Error()
-		if e != tc.msg {
-			t.Errorf("Did not get expected message %q: %s", tc.msg, e)
-		}
+	if !rc[1].isSupersetOf(rc[0]) {
+		t.Errorf("%s is still a superset of %s, because isSupersetOf is supposed to ignore excluded versions", rc[1], rc[0])
 	}
 }
diff --git a/vendor/github.com/Masterminds/semver/error.go b/vendor/github.com/Masterminds/semver/error.go
new file mode 100644
index 0000000..4fb7345
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/error.go
@@ -0,0 +1,69 @@
+package semver
+
+import (
+	"bytes"
+	"fmt"
+)
+
+var rangeErrs = [...]string{
+	"%s is less than the minimum of %s",
+	"%s is less than or equal to the minimum of %s",
+	"%s is greater than the maximum of %s",
+	"%s is greater than or equal to the maximum of %s",
+	"%s is specifically disallowed in %s",
+}
+
+const (
+	rerrLT = iota
+	rerrLTE
+	rerrGT
+	rerrGTE
+	rerrNE
+)
+
+type MatchFailure interface {
+	error
+	Pair() (v *Version, c Constraint)
+}
+
+type RangeMatchFailure struct {
+	v   *Version
+	rc  rangeConstraint
+	typ int8
+}
+
+func (rce RangeMatchFailure) Error() string {
+	return fmt.Sprintf(rangeErrs[rce.typ], rce.v, rce.rc)
+}
+
+func (rce RangeMatchFailure) Pair() (v *Version, r Constraint) {
+	return rce.v, rce.rc
+}
+
+type VersionMatchFailure struct {
+	v, other *Version
+}
+
+func (vce VersionMatchFailure) Error() string {
+	return fmt.Sprintf("%s is not equal to %s", vce.v, vce.other)
+}
+
+func (vce VersionMatchFailure) Pair() (v *Version, r Constraint) {
+	return vce.v, vce.other
+}
+
+type MultiMatchFailure []MatchFailure
+
+func (mmf MultiMatchFailure) Error() string {
+	var buf bytes.Buffer
+
+	for k, e := range mmf {
+		if k < len(mmf)-1 {
+			fmt.Fprintf(&buf, "%s\n", e)
+		} else {
+			fmt.Fprintf(&buf, e.Error())
+		}
+	}
+
+	return buf.String()
+}
diff --git a/vendor/github.com/Masterminds/semver/magic.go b/vendor/github.com/Masterminds/semver/magic.go
new file mode 100644
index 0000000..9a8d353
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/magic.go
@@ -0,0 +1,99 @@
+package semver
+
+import "errors"
+
+var noneErr = errors.New("The 'None' constraint admits no versions.")
+
+// Any is a constraint that is satisfied by any valid semantic version.
+type any struct{}
+
+// Any creates a constraint that will match any version.
+func Any() Constraint {
+	return any{}
+}
+
+func (any) String() string {
+	return "*"
+}
+
+// Matches checks that a version satisfies the constraint. As all versions
+// satisfy Any, this always returns nil.
+func (any) Matches(v *Version) error {
+	return nil
+}
+
+// Intersect computes the intersection between two constraints.
+//
+// As Any is the set of all possible versions, any intersection with that
+// infinite set will necessarily be the entirety of the second set. Thus, this
+// simply returns the passed constraint.
+func (any) Intersect(c Constraint) Constraint {
+	return c
+}
+
+// MatchesAny indicates whether there exists any version that can satisfy both
+// this constraint, and the passed constraint. As all versions
+// satisfy Any, this is always true - unless none is passed.
+func (any) MatchesAny(c Constraint) bool {
+	if _, ok := c.(none); ok {
+		return false
+	}
+	return true
+}
+
+func (any) Union(c Constraint) Constraint {
+	return Any()
+}
+
+func (any) _private() {}
+
+// None is an unsatisfiable constraint - it represents the empty set.
+type none struct{}
+
+// None creates a constraint that matches no versions (the empty set).
+func None() Constraint {
+	return none{}
+}
+
+func (none) String() string {
+	return ""
+}
+
+// Matches checks that a version satisfies the constraint. As no version can
+// satisfy None, this always fails (returns an error).
+func (none) Matches(v *Version) error {
+	return noneErr
+}
+
+// Intersect computes the intersection between two constraints.
+//
+// None is the empty set of versions, and any intersection with the empty set is
+// necessarily the empty set. Thus, this always returns None.
+func (none) Intersect(Constraint) Constraint {
+	return None()
+}
+
+func (none) Union(c Constraint) Constraint {
+	return c
+}
+
+// MatchesAny indicates whether there exists any version that can satisfy the
+// constraint. As no versions satisfy None, this is always false.
+func (none) MatchesAny(c Constraint) bool {
+	return false
+}
+
+func (none) _private() {}
+
+// IsNone indicates if a constraint will match no versions - that is, the
+// constraint represents the empty set.
+func IsNone(c Constraint) bool {
+	_, ok := c.(none)
+	return ok
+}
+
+// IsAny indicates if a constraint will match any and all versions.
+func IsAny(c Constraint) bool {
+	_, ok := c.(any)
+	return ok
+}
diff --git a/vendor/github.com/Masterminds/semver/parse.go b/vendor/github.com/Masterminds/semver/parse.go
new file mode 100644
index 0000000..a6e6a97
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/parse.go
@@ -0,0 +1,217 @@
+package semver
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+)
+
+func rewriteRange(i string) string {
+	m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
+	if m == nil {
+		return i
+	}
+	o := i
+	for _, v := range m {
+		t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
+		o = strings.Replace(o, v[0], t, 1)
+	}
+
+	return o
+}
+
+func parseConstraint(c string) (Constraint, error) {
+	m := constraintRegex.FindStringSubmatch(c)
+	if m == nil {
+		return nil, fmt.Errorf("Malformed constraint: %s", c)
+	}
+
+	// Handle the full wildcard case first - easy!
+	if isX(m[3]) {
+		return any{}, nil
+	}
+
+	ver := m[2]
+	var wildPatch, wildMinor bool
+	if isX(strings.TrimPrefix(m[4], ".")) {
+		wildPatch = true
+		wildMinor = true
+		ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
+	} else if isX(strings.TrimPrefix(m[5], ".")) {
+		wildPatch = true
+		ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
+	}
+
+	v, err := NewVersion(ver)
+	if err != nil {
+		// The constraintRegex should catch any regex parsing errors. So,
+		// we should never get here.
+		return nil, errors.New("constraint Parser Error")
+	}
+
+	switch m[1] {
+	case "^":
+		// Caret always expands to a range
+		return expandCaret(v), nil
+	case "~":
+		// Tilde always expands to a range
+		return expandTilde(v, wildMinor), nil
+	case "!=":
+		// Not equals expands to a range if no element isX(); otherwise expands
+		// to a union of ranges
+		return expandNeq(v, wildMinor, wildPatch), nil
+	case "", "=":
+		if wildPatch || wildMinor {
+			// Equalling a wildcard has the same behavior as expanding tilde
+			return expandTilde(v, wildMinor), nil
+		}
+		return v, nil
+	case ">":
+		return expandGreater(v, wildMinor, wildPatch, false), nil
+	case ">=", "=>":
+		return expandGreater(v, wildMinor, wildPatch, true), nil
+	case "<":
+		return expandLess(v, wildMinor, wildPatch, false), nil
+	case "<=", "=<":
+		return expandLess(v, wildMinor, wildPatch, true), nil
+	default:
+		// Shouldn't be possible to get here, unless the regex is allowing
+		// predicate we don't know about...
+		return nil, fmt.Errorf("Unrecognized predicate %q", m[1])
+	}
+}
+
+func expandCaret(v *Version) Constraint {
+	maxv := &Version{
+		major: v.major + 1,
+		minor: 0,
+		patch: 0,
+	}
+
+	return rangeConstraint{
+		min:        v,
+		max:        maxv,
+		includeMin: true,
+		includeMax: false,
+	}
+}
+
+func expandTilde(v *Version, wildMinor bool) Constraint {
+	if wildMinor {
+		// When minor is wild on a tilde, behavior is same as caret
+		return expandCaret(v)
+	}
+
+	maxv := &Version{
+		major: v.major,
+		minor: v.minor + 1,
+		patch: 0,
+	}
+
+	return rangeConstraint{
+		min:        v,
+		max:        maxv,
+		includeMin: true,
+		includeMax: false,
+	}
+}
+
+// expandNeq expands a "not-equals" constraint.
+//
+// If the constraint has any wildcards, it will expand into a unionConstraint
+// (which is how we represent a disjoint set). If there are no wildcards, it
+// will expand to a rangeConstraint with no min or max, but having the one
+// exception.
+func expandNeq(v *Version, wildMinor, wildPatch bool) Constraint {
+	if !(wildMinor || wildPatch) {
+		return rangeConstraint{
+			excl: []*Version{v},
+		}
+	}
+
+	// Create the low range with no min, and the max as the floor admitted by
+	// the wildcard
+	lr := rangeConstraint{
+		max:        v,
+		includeMax: false,
+	}
+
+	// The high range uses the derived version (bumped depending on where the
+	// wildcards were) as the min, and is inclusive
+	minv := &Version{
+		major: v.major,
+		minor: v.minor,
+		patch: v.patch,
+	}
+
+	if wildMinor {
+		minv.major++
+	} else {
+		minv.minor++
+	}
+
+	hr := rangeConstraint{
+		min:        minv,
+		includeMin: true,
+	}
+
+	return Union(lr, hr)
+}
+
+func expandGreater(v *Version, wildMinor, wildPatch, eq bool) Constraint {
+	if (wildMinor || wildPatch) && !eq {
+		// wildcards negate the meaning of prerelease and other info
+		v = &Version{
+			major: v.major,
+			minor: v.minor,
+			patch: v.patch,
+		}
+
+		// Not equal but with wildcards is the weird case - we have to bump up
+		// the next version AND make it equal
+		if wildMinor {
+			v.major++
+		} else {
+			v.minor++
+		}
+		return rangeConstraint{
+			min:        v,
+			includeMin: true,
+		}
+	}
+
+	return rangeConstraint{
+		min:        v,
+		includeMin: eq,
+	}
+}
+
+func expandLess(v *Version, wildMinor, wildPatch, eq bool) Constraint {
+	if eq && (wildMinor || wildPatch) {
+		// wildcards negate the meaning of prerelease and other info
+		v = &Version{
+			major: v.major,
+			minor: v.minor,
+			patch: v.patch,
+		}
+		if wildMinor {
+			v.major++
+		} else if wildPatch {
+			v.minor++
+		}
+		return rangeConstraint{
+			max:        v,
+			includeMax: false,
+		}
+	}
+
+	return rangeConstraint{
+		max:        v,
+		includeMax: eq,
+	}
+}
+
+func isX(x string) bool {
+	l := strings.ToLower(x)
+	return l == "x" || l == "*"
+}
diff --git a/vendor/github.com/Masterminds/semver/range.go b/vendor/github.com/Masterminds/semver/range.go
new file mode 100644
index 0000000..0e0c6a8
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/range.go
@@ -0,0 +1,452 @@
+package semver
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+type rangeConstraint struct {
+	min, max               *Version
+	includeMin, includeMax bool
+	excl                   []*Version
+}
+
+func (rc rangeConstraint) Matches(v *Version) error {
+	var fail bool
+
+	rce := RangeMatchFailure{
+		v:  v,
+		rc: rc,
+	}
+
+	if rc.min != nil {
+		// TODO ensure sane handling of prerelease versions (which are strictly
+		// less than the normal version, but should be admitted in a geq range)
+		cmp := rc.min.Compare(v)
+		if rc.includeMin {
+			rce.typ = rerrLT
+			fail = cmp == 1
+		} else {
+			rce.typ = rerrLTE
+			fail = cmp != -1
+		}
+
+		if fail {
+			return rce
+		}
+	}
+
+	if rc.max != nil {
+		// TODO ensure sane handling of prerelease versions (which are strictly
+		// less than the normal version, but should be admitted in a geq range)
+		cmp := rc.max.Compare(v)
+		if rc.includeMax {
+			rce.typ = rerrGT
+			fail = cmp == -1
+		} else {
+			rce.typ = rerrGTE
+			fail = cmp != 1
+		}
+
+		if fail {
+			return rce
+		}
+	}
+
+	for _, excl := range rc.excl {
+		if excl.Equal(v) {
+			rce.typ = rerrNE
+			return rce
+		}
+	}
+
+	return nil
+}
+
+func (rc rangeConstraint) dup() rangeConstraint {
+	// Only need to do anything if there are some excludes
+	if len(rc.excl) == 0 {
+		return rc
+	}
+
+	var excl []*Version
+	excl = make([]*Version, len(rc.excl))
+	copy(excl, rc.excl)
+
+	return rangeConstraint{
+		min:        rc.min,
+		max:        rc.max,
+		includeMin: rc.includeMin,
+		includeMax: rc.includeMax,
+		excl:       excl,
+	}
+}
+
+func (rc rangeConstraint) Intersect(c Constraint) Constraint {
+	switch oc := c.(type) {
+	case any:
+		return rc
+	case none:
+		return None()
+	case unionConstraint:
+		return oc.Intersect(rc)
+	case *Version:
+		if err := rc.Matches(oc); err != nil {
+			return None()
+		} else {
+			return c
+		}
+	case rangeConstraint:
+		nr := rangeConstraint{
+			min:        rc.min,
+			max:        rc.max,
+			includeMin: rc.includeMin,
+			includeMax: rc.includeMax,
+		}
+
+		if oc.min != nil {
+			if nr.min == nil || nr.min.LessThan(oc.min) {
+				nr.min = oc.min
+				nr.includeMin = oc.includeMin
+			} else if oc.min.Equal(nr.min) && !oc.includeMin {
+				// intersection means we must follow the least inclusive
+				nr.includeMin = false
+			}
+		}
+
+		if oc.max != nil {
+			if nr.max == nil || nr.max.GreaterThan(oc.max) {
+				nr.max = oc.max
+				nr.includeMax = oc.includeMax
+			} else if oc.max.Equal(nr.max) && !oc.includeMax {
+				// intersection means we must follow the least inclusive
+				nr.includeMax = false
+			}
+		}
+
+		// Ensure any applicable excls from oc are included in nc
+		for _, e := range append(rc.excl, oc.excl...) {
+			if nr.Matches(e) == nil {
+				nr.excl = append(nr.excl, e)
+			}
+		}
+
+		if nr.min == nil || nr.max == nil {
+			return nr
+		}
+
+		if nr.min.Equal(nr.max) {
+			// min and max are equal. if range is inclusive, return that
+			// version; otherwise, none
+			if nr.includeMin && nr.includeMax {
+				return nr.min
+			}
+			return None()
+		}
+
+		if nr.min.GreaterThan(nr.max) {
+			// min is greater than max - not possible, so we return none
+			return None()
+		}
+
+		// range now fully validated, return what we have
+		return nr
+
+	default:
+		panic("unknown type")
+	}
+}
+
+func (rc rangeConstraint) Union(c Constraint) Constraint {
+	switch oc := c.(type) {
+	case any:
+		return Any()
+	case none:
+		return rc
+	case unionConstraint:
+		return Union(rc, oc)
+	case *Version:
+		if err := rc.Matches(oc); err == nil {
+			return rc
+		} else if len(rc.excl) > 0 { // TODO (re)checking like this is wasteful
+			// ensure we don't have an excl-specific mismatch; if we do, remove
+			// it and return that
+			for k, e := range rc.excl {
+				if e.Equal(oc) {
+					excl := make([]*Version, len(rc.excl)-1)
+
+					if k == len(rc.excl)-1 {
+						copy(excl, rc.excl[:k])
+					} else {
+						copy(excl, append(rc.excl[:k], rc.excl[k+1:]...))
+					}
+
+					return rangeConstraint{
+						min:        rc.min,
+						max:        rc.max,
+						includeMin: rc.includeMin,
+						includeMax: rc.includeMax,
+						excl:       excl,
+					}
+				}
+			}
+		}
+
+		if oc.LessThan(rc.min) {
+			return unionConstraint{oc, rc.dup()}
+		}
+		if areEq(oc, rc.min) {
+			ret := rc.dup()
+			ret.includeMin = true
+			return ret
+		}
+		if areEq(oc, rc.max) {
+			ret := rc.dup()
+			ret.includeMax = true
+			return ret
+		}
+		// Only possibility left is gt
+		return unionConstraint{rc.dup(), oc}
+	case rangeConstraint:
+		if (rc.min == nil && oc.max == nil) || (rc.max == nil && oc.min == nil) {
+			rcl, ocl := len(rc.excl), len(oc.excl)
+			// Quick check for open case
+			if rcl == 0 && ocl == 0 {
+				return Any()
+			}
+
+			// This is inefficient, but it's such an absurdly corner case...
+			if len(dedupeExcls(rc.excl, oc.excl)) == rcl+ocl {
+				// If deduped excludes are the same length as the individual
+				// excludes, then they have no overlapping elements, so the
+				// union knocks out the excludes and we're back to Any.
+				return Any()
+			}
+
+			// There's at least some dupes, which are all we need to include
+			nc := rangeConstraint{}
+			for _, e1 := range rc.excl {
+				for _, e2 := range oc.excl {
+					if e1.Equal(e2) {
+						nc.excl = append(nc.excl, e1)
+					}
+				}
+			}
+
+			return nc
+		} else if areAdjacent(rc, oc) {
+			// Receiver adjoins the input from below
+			nc := rc.dup()
+
+			nc.max = oc.max
+			nc.includeMax = oc.includeMax
+			nc.excl = append(nc.excl, oc.excl...)
+
+			return nc
+		} else if areAdjacent(oc, rc) {
+			// Input adjoins the receiver from below
+			nc := oc.dup()
+
+			nc.max = rc.max
+			nc.includeMax = rc.includeMax
+			nc.excl = append(nc.excl, rc.excl...)
+
+			return nc
+
+		} else if rc.MatchesAny(oc) {
+			// Receiver and input overlap; form a new range accordingly.
+			nc := rangeConstraint{}
+
+			// For efficiency, we simultaneously determine if either of the
+			// ranges are supersets of the other, while also selecting the min
+			// and max of the new range
+			var info uint8
+
+			const (
+				lminlt uint8             = 1 << iota // left (rc) min less than right
+				rminlt                               // right (oc) min less than left
+				lmaxgt                               // left max greater than right
+				rmaxgt                               // right max greater than left
+				lsupr  = lminlt | lmaxgt             // left is superset of right
+				rsupl  = rminlt | rmaxgt             // right is superset of left
+			)
+
+			// Pick the min
+			if rc.min != nil {
+				if oc.min == nil || rc.min.GreaterThan(oc.min) || (rc.min.Equal(oc.min) && !rc.includeMin && oc.includeMin) {
+					info |= rminlt
+					nc.min = oc.min
+					nc.includeMin = oc.includeMin
+				} else {
+					info |= lminlt
+					nc.min = rc.min
+					nc.includeMin = rc.includeMin
+				}
+			} else if oc.min != nil {
+				info |= lminlt
+				nc.min = rc.min
+				nc.includeMin = rc.includeMin
+			}
+
+			// Pick the max
+			if rc.max != nil {
+				if oc.max == nil || rc.max.LessThan(oc.max) || (rc.max.Equal(oc.max) && !rc.includeMax && oc.includeMax) {
+					info |= rmaxgt
+					nc.max = oc.max
+					nc.includeMax = oc.includeMax
+				} else {
+					info |= lmaxgt
+					nc.max = rc.max
+					nc.includeMax = rc.includeMax
+				}
+			} else if oc.max != nil {
+				info |= lmaxgt
+				nc.max = rc.max
+				nc.includeMax = rc.includeMax
+			}
+
+			// Reincorporate any excluded versions
+			if info&lsupr != lsupr {
+				// rc is not superset of oc, so must walk oc.excl
+				for _, e := range oc.excl {
+					if rc.Matches(e) != nil {
+						nc.excl = append(nc.excl, e)
+					}
+				}
+			}
+
+			if info&rsupl != rsupl {
+				// oc is not superset of rc, so must walk rc.excl
+				for _, e := range rc.excl {
+					if oc.Matches(e) != nil {
+						nc.excl = append(nc.excl, e)
+					}
+				}
+			}
+
+			return nc
+		} else {
+			// Don't call Union() here b/c it would duplicate work
+			uc := constraintList{rc, oc}
+			sort.Sort(uc)
+			return unionConstraint(uc)
+		}
+	}
+
+	panic("unknown type")
+}
+
+// isSupersetOf computes whether the receiver rangeConstraint is a superset of
+// the passed rangeConstraint.
+//
+// This is NOT a strict superset comparison, so identical ranges will both
+// report being supersets of each other.
+//
+// Note also that this does *not* compare excluded versions - it only compares
+// range endpoints.
+func (rc rangeConstraint) isSupersetOf(rc2 rangeConstraint) bool {
+	if rc.min != nil {
+		if rc2.min == nil || rc.min.GreaterThan(rc2.min) || (rc.min.Equal(rc2.min) && !rc.includeMin && rc2.includeMin) {
+			return false
+		}
+	}
+
+	if rc.max != nil {
+		if rc2.max == nil || rc.max.LessThan(rc2.max) || (rc.max.Equal(rc2.max) && !rc.includeMax && rc2.includeMax) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func (rc rangeConstraint) String() string {
+	// TODO express using caret or tilde, where applicable
+	var pieces []string
+	if rc.min != nil {
+		if rc.includeMin {
+			pieces = append(pieces, fmt.Sprintf(">=%s", rc.min))
+		} else {
+			pieces = append(pieces, fmt.Sprintf(">%s", rc.min))
+		}
+	}
+
+	if rc.max != nil {
+		if rc.includeMax {
+			pieces = append(pieces, fmt.Sprintf("<=%s", rc.max))
+		} else {
+			pieces = append(pieces, fmt.Sprintf("<%s", rc.max))
+		}
+	}
+
+	for _, e := range rc.excl {
+		pieces = append(pieces, fmt.Sprintf("!=%s", e))
+	}
+
+	return strings.Join(pieces, ", ")
+}
+
+// areAdjacent tests two constraints to determine if they are adjacent,
+// but non-overlapping.
+//
+// If either constraint is not a range, returns false. We still allow it at the
+// type level, however, to make the check convenient elsewhere.
+//
+// Assumes the first range is less than the second; it is incumbent on the
+// caller to arrange the inputs appropriately.
+func areAdjacent(c1, c2 Constraint) bool {
+	var rc1, rc2 rangeConstraint
+	var ok bool
+	if rc1, ok = c1.(rangeConstraint); !ok {
+		return false
+	}
+	if rc2, ok = c2.(rangeConstraint); !ok {
+		return false
+	}
+
+	if !areEq(rc1.max, rc2.min) {
+		return false
+	}
+
+	return (rc1.includeMax && !rc2.includeMin) ||
+		(!rc1.includeMax && rc2.includeMin)
+}
+
+func (rc rangeConstraint) MatchesAny(c Constraint) bool {
+	if _, ok := rc.Intersect(c).(none); ok {
+		return false
+	}
+	return true
+}
+
+func dedupeExcls(ex1, ex2 []*Version) []*Version {
+	// TODO stupid inefficient, but these are really only ever going to be
+	// small, so not worth optimizing right now
+	var ret []*Version
+oloop:
+	for _, e1 := range ex1 {
+		for _, e2 := range ex2 {
+			if e1.Equal(e2) {
+				continue oloop
+			}
+		}
+		ret = append(ret, e1)
+	}
+
+	return append(ret, ex2...)
+}
+
+func (rangeConstraint) _private() {}
+func (rangeConstraint) _real()    {}
+
+func areEq(v1, v2 *Version) bool {
+	if v1 == nil && v2 == nil {
+		return true
+	}
+
+	if v1 != nil && v2 != nil {
+		return v1.Equal(v2)
+	}
+	return false
+}
diff --git a/vendor/github.com/Masterminds/semver/set_ops_test.go b/vendor/github.com/Masterminds/semver/set_ops_test.go
new file mode 100644
index 0000000..363e848
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/set_ops_test.go
@@ -0,0 +1,914 @@
+package semver
+
+import "testing"
+
+func TestIntersection(t *testing.T) {
+	var actual Constraint
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = Intersection(); !IsNone(actual) {
+		t.Errorf("Intersection of nothing should always produce None; got %q", actual)
+	}
+
+	if actual = Intersection(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Intersection of one item should always return that item; got %q")
+	}
+
+	if actual = Intersection(rc1, None()); !IsNone(actual) {
+		t.Errorf("Intersection of anything with None should always produce None; got %q", actual)
+	}
+
+	if actual = Intersection(rc1, Any()); !constraintEq(actual, rc1) {
+		t.Errorf("Intersection of anything with Any should return self; got %q", actual)
+	}
+
+	v1 := newV(1, 5, 0)
+	if actual = Intersection(rc1, v1); !constraintEq(actual, v1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, v1)
+	}
+
+	rc2 := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 2, 0),
+	}
+	result := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = Intersection(rc1, rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	u1 := unionConstraint{
+		rangeConstraint{
+			min: newV(1, 2, 0),
+			max: newV(3, 0, 0),
+		},
+		newV(3, 1, 0),
+	}
+
+	if actual = Intersection(u1, rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = Intersection(rc1, newV(2, 0, 5), u1); !IsNone(actual) {
+		t.Errorf("First two are disjoint, should have gotten None but got %q", actual)
+	}
+}
+
+func TestRangeIntersection(t *testing.T) {
+	var actual Constraint
+	// Test magic cases
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	if actual = rc1.Intersect(Any()); !constraintEq(actual, rc1) {
+		t.Errorf("Intersection of anything with Any should return self; got %q", actual)
+	}
+	if actual = rc1.Intersect(None()); !IsNone(actual) {
+		t.Errorf("Intersection of anything with None should always produce None; got %q", actual)
+	}
+
+	// Test single version cases
+
+	// single v, in range
+	v1 := newV(1, 5, 0)
+
+	if actual = rc1.Intersect(v1); !constraintEq(actual, v1) {
+		t.Errorf("Intersection of version with matching range should return the version; got %q", actual)
+	}
+
+	// now exclude just that version
+	rc1.excl = []*Version{v1}
+	if actual = rc1.Intersect(v1); !IsNone(actual) {
+		t.Errorf("Intersection of version with range having specific exclude for that version should produce None; got %q", actual)
+	}
+
+	// and, of course, none if the version is out of range
+	v2 := newV(0, 5, 0)
+	if actual = rc1.Intersect(v2); !IsNone(actual) {
+		t.Errorf("Intersection of version with non-matching range should produce None; got %q", actual)
+	}
+
+	// Test basic overlap case
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 2, 0),
+	}
+	result := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// And with includes
+	rc1.includeMin = true
+	rc1.includeMax = true
+	rc2.includeMin = true
+	rc2.includeMax = true
+	result.includeMin = true
+	result.includeMax = true
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Overlaps with nils
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		max: newV(2, 2, 0),
+	}
+	result = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 2, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// And with includes
+	rc1.includeMin = true
+	rc2.includeMax = true
+	result.includeMin = true
+	result.includeMax = true
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test superset overlap case
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	result = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Make sure irrelevant includes don't leak in
+	rc2.includeMin = true
+	rc2.includeMax = true
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// But relevant includes get used
+	rc1.includeMin = true
+	rc1.includeMax = true
+	result.includeMin = true
+	result.includeMax = true
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test disjoint case
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(1, 6, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, None()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, None())
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, None()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, None())
+	}
+
+	// Test disjoint at gt/lt boundary (non-adjacent)
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, None()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, None())
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, None()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, None())
+	}
+
+	// Now, just have them touch at a single version
+	rc1.includeMax = true
+	rc2.includeMin = true
+
+	vresult := newV(2, 0, 0)
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, vresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, vresult)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, vresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, vresult)
+	}
+
+	// Test excludes in intersection range
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+		excl: []*Version{
+			newV(1, 6, 0),
+		},
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+
+	// Test excludes not in intersection range
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+		excl: []*Version{
+			newV(1, 1, 0),
+		},
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+
+	// Test min, and greater min
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min:        newV(1, 5, 0),
+		includeMin: true,
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test max, and lesser max
+	rc1 = rangeConstraint{
+		max: newV(1, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		max: newV(1, 5, 0),
+	}
+	result = rangeConstraint{
+		max: newV(1, 0, 0),
+	}
+
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Intersect(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Ensure pure excludes come through as they should
+	rc1 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 6, 0),
+		},
+	}
+
+	rc2 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 6, 0),
+			newV(1, 7, 0),
+		},
+	}
+
+	if actual = Any().Intersect(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc1.Intersect(Any()); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc1.Intersect(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+
+	// TODO test the pre-release special range stuff
+}
+
+func TestRangeUnion(t *testing.T) {
+	var actual Constraint
+	// Test magic cases
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	if actual = rc1.Union(Any()); !IsAny(actual) {
+		t.Errorf("Union of anything with Any should always produce Any; got %q", actual)
+	}
+	if actual = rc1.Union(None()); !constraintEq(actual, rc1) {
+		t.Errorf("Union of anything with None should return self; got %q", actual)
+	}
+
+	// Test single version cases
+
+	// single v, in range
+	v1 := newV(1, 5, 0)
+
+	if actual = rc1.Union(v1); !constraintEq(actual, rc1) {
+		t.Errorf("Union of version with matching range should return the range; got %q", actual)
+	}
+
+	// now exclude just that version
+	rc2 := rc1.dup()
+	rc2.excl = []*Version{v1}
+	if actual = rc2.Union(v1); !constraintEq(actual, rc1) {
+		t.Errorf("Union of version with range having specific exclude for that version should produce the range without that exclude; got %q", actual)
+	}
+
+	// and a union if the version is not within the range
+	v2 := newV(0, 5, 0)
+	uresult := unionConstraint{v2, rc1}
+	if actual = rc1.Union(v2); !constraintEq(actual, uresult) {
+		t.Errorf("Union of version with non-matching range should produce a unionConstraint with those two; got %q", actual)
+	}
+
+	// union with version at the min should ensure "oreq"
+	v2 = newV(1, 0, 0)
+	rc3 := rc1
+	rc3.includeMin = true
+
+	if actual = rc1.Union(v2); !constraintEq(actual, rc3) {
+		t.Errorf("Union of range with version at min end should add includeMin (%q), but got %q", rc3, actual)
+	}
+	if actual = v2.Union(rc1); !constraintEq(actual, rc3) {
+		t.Errorf("Union of range with version at min end should add includeMin (%q), but got %q", rc3, actual)
+	}
+
+	// same at max end
+	v2 = newV(2, 0, 0)
+	rc3.includeMin = false
+	rc3.includeMax = true
+
+	if actual = rc1.Union(v2); !constraintEq(actual, rc3) {
+		t.Errorf("Union of range with version at max end should add includeMax (%q), but got %q", rc3, actual)
+	}
+	if actual = v2.Union(rc1); !constraintEq(actual, rc3) {
+		t.Errorf("Union of range with version at max end should add includeMax (%q), but got %q", rc3, actual)
+	}
+
+	// Test basic overlap case
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 2, 0),
+	}
+	result := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 2, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// And with includes
+	rc1.includeMin = true
+	rc1.includeMax = true
+	rc2.includeMin = true
+	rc2.includeMax = true
+	result.includeMin = true
+	result.includeMax = true
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Overlaps with nils
+	rc1 = rangeConstraint{
+		min: newV(1, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		max: newV(2, 2, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, Any()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, Any())
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, Any()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, Any())
+	}
+
+	// Just one nil in overlap
+	rc1.max = newV(2, 0, 0)
+	result = rangeConstraint{
+		max: newV(2, 2, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	rc1.max = nil
+	rc2.min = newV(1, 5, 0)
+	result = rangeConstraint{
+		min: newV(1, 0, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test superset overlap case
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+
+	// Test disjoint case
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(1, 6, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	uresult = unionConstraint{rc1, rc2}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, uresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, uresult)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, uresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, uresult)
+	}
+
+	// Test disjoint at gt/lt boundary (non-adjacent)
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	uresult = unionConstraint{rc1, rc2}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, uresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, uresult)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, uresult) {
+		t.Errorf("Got constraint %q, but expected %q", actual, uresult)
+	}
+
+	// Now, just have them touch at a single version
+	rc1.includeMax = true
+	rc2.includeMin = true
+	result = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// And top-adjacent at that version
+	rc2.includeMin = false
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	// And bottom-adjacent at that version
+	rc1.includeMax = false
+	rc2.includeMin = true
+	if actual = rc1.Union(rc2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+
+	// Test excludes in overlapping range
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+		excl: []*Version{
+			newV(1, 6, 0),
+		},
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+
+	// Test excludes not in non-overlapping range
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(3, 0, 0),
+		excl: []*Version{
+			newV(1, 1, 0),
+		},
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, rc2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc2)
+	}
+
+	// Ensure pure excludes come through as they should
+	rc1 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 6, 0),
+		},
+	}
+
+	rc2 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 6, 0),
+			newV(1, 7, 0),
+		},
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+
+	rc1 = rangeConstraint{
+		excl: []*Version{
+			newV(1, 5, 0),
+		},
+	}
+
+	if actual = rc1.Union(rc2); !constraintEq(actual, Any()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, Any())
+	}
+	if actual = rc2.Union(rc1); !constraintEq(actual, Any()) {
+		t.Errorf("Got constraint %q, but expected %q", actual, Any())
+	}
+
+	// TODO test the pre-release special range stuff
+}
+
+func TestUnionIntersection(t *testing.T) {
+	var actual Constraint
+	// magic first
+	u1 := unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+		newV(1, 3, 0),
+	}
+	if actual = u1.Intersect(Any()); !constraintEq(actual, u1) {
+		t.Errorf("Intersection of anything with Any should return self; got %s", actual)
+	}
+	if actual = u1.Intersect(None()); !IsNone(actual) {
+		t.Errorf("Intersection of anything with None should always produce None; got %s", actual)
+	}
+	if u1.MatchesAny(None()) {
+		t.Errorf("Can't match any when intersected with None")
+	}
+
+	// intersect of unions with single versions
+	v1 := newV(1, 1, 0)
+	if actual = u1.Intersect(v1); !constraintEq(actual, v1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, v1)
+	}
+	if actual = v1.Intersect(u1); !constraintEq(actual, v1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, v1)
+	}
+
+	// intersect of range with union of versions
+	u1 = unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+		newV(1, 3, 0),
+	}
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = u1.Intersect(rc1); !constraintEq(actual, u1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u1)
+	}
+	if actual = rc1.Intersect(u1); !constraintEq(actual, u1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u1)
+	}
+
+	u2 := unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+	}
+
+	if actual = u1.Intersect(u2); !constraintEq(actual, u2) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u2)
+	}
+
+	// Overlapping sub/supersets
+	rc1 = rangeConstraint{
+		min: newV(1, 5, 0),
+		max: newV(1, 6, 0),
+	}
+	rc2 := rangeConstraint{
+		min: newV(2, 0, 0),
+		max: newV(3, 0, 0),
+	}
+	rc3 = rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	rc4 := rangeConstraint{
+		min: newV(2, 5, 0),
+		max: newV(2, 6, 0),
+	}
+	u1 = unionConstraint{rc1, rc2}
+	u2 = unionConstraint{rc3, rc4}
+	ur := unionConstraint{rc1, rc4}
+
+	if actual = u1.Intersect(u2); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = u2.Intersect(u1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+
+	// Ensure excludes carry as they should
+	rc1.excl = []*Version{newV(1, 5, 5)}
+	u1 = unionConstraint{rc1, rc2}
+	ur = unionConstraint{rc1, rc4}
+
+	if actual = u1.Intersect(u2); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = u2.Intersect(u1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+}
+
+func TestUnionUnion(t *testing.T) {
+	var actual Constraint
+	// magic first
+	u1 := unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+		newV(1, 3, 0),
+	}
+	if actual = u1.Union(Any()); !IsAny(actual) {
+		t.Errorf("Union of anything with Any should always return Any; got %s", actual)
+	}
+	if actual = u1.Union(None()); !constraintEq(actual, u1) {
+		t.Errorf("Union of anything with None should always return self; got %s", actual)
+	}
+
+	// union of uc with single versions
+	// already present
+	v1 := newV(1, 2, 0)
+	if actual = u1.Union(v1); !constraintEq(actual, u1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u1)
+	}
+	if actual = v1.Union(u1); !constraintEq(actual, u1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, u1)
+	}
+
+	// not present
+	v2 := newV(1, 4, 0)
+	ur := append(u1, v2)
+	if actual = u1.Union(v2); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = v2.Union(u1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+
+	// union of uc with uc, all versions
+	u2 := unionConstraint{
+		newV(1, 3, 0),
+		newV(1, 4, 0),
+		newV(1, 5, 0),
+	}
+	ur = unionConstraint{
+		newV(1, 1, 0),
+		newV(1, 2, 0),
+		newV(1, 3, 0),
+		newV(1, 4, 0),
+		newV(1, 5, 0),
+	}
+
+	if actual = u1.Union(u2); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = u2.Union(u1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+
+	// union that should compress versions into range
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+
+	if actual = u1.Union(rc1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+	if actual = rc1.Union(u1); !constraintEq(actual, rc1) {
+		t.Errorf("Got constraint %q, but expected %q", actual, rc1)
+	}
+
+	rc1.max = newV(1, 4, 5)
+	u3 := append(u2, newV(1, 7, 0))
+	ur = unionConstraint{
+		rc1,
+		newV(1, 5, 0),
+		newV(1, 7, 0),
+	}
+
+	if actual = u3.Union(rc1); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+	if actual = rc1.Union(u3); !constraintEq(actual, ur) {
+		t.Errorf("Got constraint %q, but expected %q", actual, ur)
+	}
+}
+
+// Most version stuff got tested by range and/or union b/c most tests were
+// repeated bidirectionally (set operations are commutative; testing in pairs
+// helps us catch any situation where we fail to maintain that invariant)
+func TestVersionSetOps(t *testing.T) {
+	var actual Constraint
+
+	v1 := newV(1, 0, 0)
+
+	if actual = v1.Intersect(v1); !constraintEq(actual, v1) {
+		t.Errorf("Version intersected with itself should be itself, got %q", actual)
+	}
+	if !v1.MatchesAny(v1) {
+		t.Errorf("MatchesAny should work with a version against itself")
+	}
+
+	v2 := newV(2, 0, 0)
+	if actual = v1.Intersect(v2); !IsNone(actual) {
+		t.Errorf("Versions should only intersect with themselves, got %q", actual)
+	}
+	if v1.MatchesAny(v2) {
+		t.Errorf("MatchesAny should not work when combined with anything other than itself")
+	}
+
+	result := unionConstraint{v1, v2}
+
+	if actual = v1.Union(v1); !constraintEq(actual, v1) {
+		t.Errorf("Version union with itself should return self, got %q", actual)
+	}
+
+	if actual = v1.Union(v2); !constraintEq(actual, result) {
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+	if actual = v1.Union(v2); !constraintEq(actual, result) {
+		// Duplicate just to make sure ordering works right
+		t.Errorf("Got constraint %q, but expected %q", actual, result)
+	}
+}
+
+func TestAreAdjacent(t *testing.T) {
+	rc1 := rangeConstraint{
+		min: newV(1, 0, 0),
+		max: newV(2, 0, 0),
+	}
+	rc2 := rangeConstraint{
+		min: newV(1, 2, 0),
+		max: newV(2, 2, 0),
+	}
+
+	if areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges overlap, should not indicate as adjacent")
+	}
+
+	rc2 = rangeConstraint{
+		min: newV(2, 0, 0),
+	}
+
+	if areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges are non-overlapping and non-adjacent, but reported as adjacent")
+	}
+
+	rc2.includeMin = true
+
+	if !areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges are non-overlapping and adjacent, but reported as non-adjacent")
+	}
+
+	rc1.includeMax = true
+
+	if areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges are overlapping at a single version, but reported as adjacent")
+	}
+
+	rc2.includeMin = false
+	if !areAdjacent(rc1, rc2) {
+		t.Errorf("Ranges are non-overlapping and adjacent, but reported as non-adjacent")
+	}
+}
diff --git a/vendor/github.com/Masterminds/semver/union.go b/vendor/github.com/Masterminds/semver/union.go
new file mode 100644
index 0000000..2659828
--- /dev/null
+++ b/vendor/github.com/Masterminds/semver/union.go
@@ -0,0 +1,141 @@
+package semver
+
+import "strings"
+
+type unionConstraint []realConstraint
+
+func (uc unionConstraint) Matches(v *Version) error {
+	var uce MultiMatchFailure
+	for _, c := range uc {
+		if err := c.Matches(v); err == nil {
+			return nil
+		} else {
+			uce = append(uce, err.(MatchFailure))
+		}
+	}
+
+	return uce
+}
+
+func (uc unionConstraint) Intersect(c2 Constraint) Constraint {
+	var other []realConstraint
+
+	switch tc2 := c2.(type) {
+	case none:
+		return None()
+	case any:
+		return uc
+	case *Version:
+		return c2
+	case rangeConstraint:
+		other = append(other, tc2)
+	case unionConstraint:
+		other = c2.(unionConstraint)
+	default:
+		panic("unknown type")
+	}
+
+	var newc []Constraint
+	// TODO there's a smarter way to do this than NxN, but...worth it?
+	for _, c := range uc {
+		for _, oc := range other {
+			i := c.Intersect(oc)
+			if !IsNone(i) {
+				newc = append(newc, i)
+			}
+		}
+	}
+
+	return Union(newc...)
+}
+
+func (uc unionConstraint) MatchesAny(c Constraint) bool {
+	for _, ic := range uc {
+		if ic.MatchesAny(c) {
+			return true
+		}
+	}
+	return false
+}
+
+func (uc unionConstraint) Union(c Constraint) Constraint {
+	return Union(uc, c)
+}
+
+func (uc unionConstraint) String() string {
+	var pieces []string
+	for _, c := range uc {
+		pieces = append(pieces, c.String())
+	}
+
+	return strings.Join(pieces, " || ")
+}
+func (unionConstraint) _private() {}
+
+type constraintList []realConstraint
+
+func (cl constraintList) Len() int {
+	return len(cl)
+}
+
+func (cl constraintList) Swap(i, j int) {
+	cl[i], cl[j] = cl[j], cl[i]
+}
+
+func (cl constraintList) Less(i, j int) bool {
+	ic, jc := cl[i], cl[j]
+
+	switch tic := ic.(type) {
+	case *Version:
+		switch tjc := jc.(type) {
+		case *Version:
+			return tic.LessThan(tjc)
+		case rangeConstraint:
+			if tjc.min == nil {
+				return false
+			}
+
+			// Because we don't assume stable sort, always put versions ahead of
+			// range mins if they're equal and includeMin is on
+			if tjc.includeMin && tic.Equal(tjc.min) {
+				return false
+			}
+			return tic.LessThan(tjc.min)
+		}
+	case rangeConstraint:
+		switch tjc := jc.(type) {
+		case *Version:
+			if tic.min == nil {
+				return true
+			}
+
+			// Because we don't assume stable sort, always put versions ahead of
+			// range mins if they're equal and includeMin is on
+			if tic.includeMin && tjc.Equal(tic.min) {
+				return false
+			}
+			return tic.min.LessThan(tjc)
+		case rangeConstraint:
+			if tic.min == nil {
+				return true
+			}
+			if tjc.min == nil {
+				return false
+			}
+			return tic.min.LessThan(tjc.min)
+		}
+	}
+
+	panic("unreachable")
+}
+
+func (cl *constraintList) Push(x interface{}) {
+	*cl = append(*cl, x.(realConstraint))
+}
+
+func (cl *constraintList) Pop() interface{} {
+	o := *cl
+	c := o[len(o)-1]
+	*cl = o[:len(o)-1]
+	return c
+}
diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go
index dbb93f8..e9261ca 100644
--- a/vendor/github.com/Masterminds/semver/version.go
+++ b/vendor/github.com/Masterminds/semver/version.go
@@ -7,6 +7,7 @@
 	"regexp"
 	"strconv"
 	"strings"
+	"sync"
 )
 
 // The compiled version of the regex created at init() is cached here so it
@@ -19,6 +20,25 @@
 	ErrInvalidSemVer = errors.New("Invalid Semantic Version")
 )
 
+// Error type; lets us defer string interpolation
+type badVersionSegment struct {
+	e error
+}
+
+func (b badVersionSegment) Error() string {
+	return fmt.Sprintf("Error parsing version segment: %s", b.e)
+}
+
+// Controls whether or not parsed constraints are cached
+var CacheVersions = true
+var versionCache = make(map[string]vcache)
+var versionCacheLock sync.RWMutex
+
+type vcache struct {
+	v   *Version
+	err error
+}
+
 // SemVerRegex id the regular expression used to parse a semantic version.
 const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
 	`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
@@ -39,8 +59,22 @@
 // NewVersion parses a given version and returns an instance of Version or
 // an error if unable to parse the version.
 func NewVersion(v string) (*Version, error) {
+	if CacheVersions {
+		versionCacheLock.RLock()
+		if sv, exists := versionCache[v]; exists {
+			versionCacheLock.RUnlock()
+			return sv.v, sv.err
+		}
+		versionCacheLock.RUnlock()
+	}
+
 	m := versionRegex.FindStringSubmatch(v)
 	if m == nil {
+		if CacheVersions {
+			versionCacheLock.Lock()
+			versionCache[v] = vcache{err: ErrInvalidSemVer}
+			versionCacheLock.Unlock()
+		}
 		return nil, ErrInvalidSemVer
 	}
 
@@ -53,14 +87,28 @@
 	var temp int64
 	temp, err := strconv.ParseInt(m[1], 10, 32)
 	if err != nil {
-		return nil, fmt.Errorf("Error parsing version segment: %s", err)
+		bvs := badVersionSegment{e: err}
+		if CacheVersions {
+			versionCacheLock.Lock()
+			versionCache[v] = vcache{err: bvs}
+			versionCacheLock.Unlock()
+		}
+
+		return nil, bvs
 	}
 	sv.major = temp
 
 	if m[2] != "" {
 		temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 32)
 		if err != nil {
-			return nil, fmt.Errorf("Error parsing version segment: %s", err)
+			bvs := badVersionSegment{e: err}
+			if CacheVersions {
+				versionCacheLock.Lock()
+				versionCache[v] = vcache{err: bvs}
+				versionCacheLock.Unlock()
+			}
+
+			return nil, bvs
 		}
 		sv.minor = temp
 	} else {
@@ -70,13 +118,26 @@
 	if m[3] != "" {
 		temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 32)
 		if err != nil {
-			return nil, fmt.Errorf("Error parsing version segment: %s", err)
+			bvs := badVersionSegment{e: err}
+			if CacheVersions {
+				versionCacheLock.Lock()
+				versionCache[v] = vcache{err: bvs}
+				versionCacheLock.Unlock()
+			}
+
+			return nil, bvs
 		}
 		sv.patch = temp
 	} else {
 		sv.patch = 0
 	}
 
+	if CacheVersions {
+		versionCacheLock.Lock()
+		versionCache[v] = vcache{v: sv}
+		versionCacheLock.Unlock()
+	}
+
 	return sv, nil
 }
 
@@ -131,11 +192,21 @@
 
 // LessThan tests if one version is less than another one.
 func (v *Version) LessThan(o *Version) bool {
+	// If a nil version was passed, fail and bail out early.
+	if o == nil {
+		return false
+	}
+
 	return v.Compare(o) < 0
 }
 
 // GreaterThan tests if one version is greater than another one.
 func (v *Version) GreaterThan(o *Version) bool {
+	// If a nil version was passed, fail and bail out early.
+	if o == nil {
+		return false
+	}
+
 	return v.Compare(o) > 0
 }
 
@@ -143,6 +214,11 @@
 // Note, versions can be equal with different metadata since metadata
 // is not considered part of the comparable version.
 func (v *Version) Equal(o *Version) bool {
+	// If a nil version was passed, fail and bail out early.
+	if o == nil {
+		return false
+	}
+
 	return v.Compare(o) == 0
 }
 
@@ -181,6 +257,46 @@
 	return comparePrerelease(ps, po)
 }
 
+func (v *Version) Matches(v2 *Version) error {
+	if v.Equal(v2) {
+		return nil
+	}
+
+	return VersionMatchFailure{v: v, other: v2}
+}
+
+func (v *Version) MatchesAny(c Constraint) bool {
+	if v2, ok := c.(*Version); ok {
+		return v.Equal(v2)
+	} else {
+		// The other implementations all have specific handling for this; fall
+		// back on theirs.
+		return c.MatchesAny(v)
+	}
+}
+
+func (v *Version) Intersect(c Constraint) Constraint {
+	if v2, ok := c.(*Version); ok {
+		if v.Equal(v2) {
+			return v
+		}
+		return none{}
+	}
+
+	return c.Intersect(v)
+}
+
+func (v *Version) Union(c Constraint) Constraint {
+	if v2, ok := c.(*Version); ok && v.Equal(v2) {
+		return v
+	} else {
+		return Union(v, c)
+	}
+}
+
+func (Version) _private() {}
+func (Version) _real()    {}
+
 func compareSegment(v, o int64) int {
 	if v < o {
 		return -1
diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore
new file mode 100644
index 0000000..0026861
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml
new file mode 100644
index 0000000..1a0bbea
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/.travis.yml
@@ -0,0 +1,3 @@
+language: go
+go:
+  - tip
diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE
new file mode 100644
index 0000000..a5df10e
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Armon Dadgar
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md
new file mode 100644
index 0000000..26f42a2
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/README.md
@@ -0,0 +1,38 @@
+go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix)
+=========
+
+Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
+The package only provides a single `Tree` implementation, optimized for sparse nodes.
+
+As a radix tree, it provides the following:
+ * O(k) operations. In many cases, this can be faster than a hash table since
+   the hash function is an O(k) operation, and hash tables have very poor cache locality.
+ * Minimum / Maximum value lookups
+ * Ordered iteration
+
+For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix).
+
+Example
+=======
+
+Below is a simple example of usage
+
+```go
+// Create a tree
+r := radix.New()
+r.Insert("foo", 1)
+r.Insert("bar", 2)
+r.Insert("foobar", 2)
+
+// Find the longest prefix match
+m, _, _ := r.LongestPrefix("foozip")
+if m != "foo" {
+    panic("should be foo")
+}
+```
+
diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go
new file mode 100644
index 0000000..d2914c1
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/radix.go
@@ -0,0 +1,496 @@
+package radix
+
+import (
+	"sort"
+	"strings"
+)
+
+// WalkFn is used when walking the tree. Takes a
+// key and value, returning if iteration should
+// be terminated.
+type WalkFn func(s string, v interface{}) bool
+
+// leafNode is used to represent a value
+type leafNode struct {
+	key string
+	val interface{}
+}
+
+// edge is used to represent an edge node
+type edge struct {
+	label byte
+	node  *node
+}
+
+type node struct {
+	// leaf is used to store possible leaf
+	leaf *leafNode
+
+	// prefix is the common prefix we ignore
+	prefix string
+
+	// Edges should be stored in-order for iteration.
+	// We avoid a fully materialized slice to save memory,
+	// since in most cases we expect to be sparse
+	edges edges
+}
+
+func (n *node) isLeaf() bool {
+	return n.leaf != nil
+}
+
+func (n *node) addEdge(e edge) {
+	n.edges = append(n.edges, e)
+	n.edges.Sort()
+}
+
+func (n *node) replaceEdge(e edge) {
+	num := len(n.edges)
+	idx := sort.Search(num, func(i int) bool {
+		return n.edges[i].label >= e.label
+	})
+	if idx < num && n.edges[idx].label == e.label {
+		n.edges[idx].node = e.node
+		return
+	}
+	panic("replacing missing edge")
+}
+
+func (n *node) getEdge(label byte) *node {
+	num := len(n.edges)
+	idx := sort.Search(num, func(i int) bool {
+		return n.edges[i].label >= label
+	})
+	if idx < num && n.edges[idx].label == label {
+		return n.edges[idx].node
+	}
+	return nil
+}
+
+func (n *node) delEdge(label byte) {
+	num := len(n.edges)
+	idx := sort.Search(num, func(i int) bool {
+		return n.edges[i].label >= label
+	})
+	if idx < num && n.edges[idx].label == label {
+		copy(n.edges[idx:], n.edges[idx+1:])
+		n.edges[len(n.edges)-1] = edge{}
+		n.edges = n.edges[:len(n.edges)-1]
+	}
+}
+
+type edges []edge
+
+func (e edges) Len() int {
+	return len(e)
+}
+
+func (e edges) Less(i, j int) bool {
+	return e[i].label < e[j].label
+}
+
+func (e edges) Swap(i, j int) {
+	e[i], e[j] = e[j], e[i]
+}
+
+func (e edges) Sort() {
+	sort.Sort(e)
+}
+
+// Tree implements a radix tree. This can be treated as a
+// Dictionary abstract data type. The main advantage over
+// a standard hash map is prefix-based lookups and
+// ordered iteration,
+type Tree struct {
+	root *node
+	size int
+}
+
+// New returns an empty Tree
+func New() *Tree {
+	return NewFromMap(nil)
+}
+
+// NewFromMap returns a new tree containing the keys
+// from an existing map
+func NewFromMap(m map[string]interface{}) *Tree {
+	t := &Tree{root: &node{}}
+	for k, v := range m {
+		t.Insert(k, v)
+	}
+	return t
+}
+
+// Len is used to return the number of elements in the tree
+func (t *Tree) Len() int {
+	return t.size
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 string) int {
+	max := len(k1)
+	if l := len(k2); l < max {
+		max = l
+	}
+	var i int
+	for i = 0; i < max; i++ {
+		if k1[i] != k2[i] {
+			break
+		}
+	}
+	return i
+}
+
+// Insert is used to add a newentry or update
+// an existing entry. Returns if updated.
+func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) {
+	var parent *node
+	n := t.root
+	search := s
+	for {
+		// Handle key exhaution
+		if len(search) == 0 {
+			if n.isLeaf() {
+				old := n.leaf.val
+				n.leaf.val = v
+				return old, true
+			}
+
+			n.leaf = &leafNode{
+				key: s,
+				val: v,
+			}
+			t.size++
+			return nil, false
+		}
+
+		// Look for the edge
+		parent = n
+		n = n.getEdge(search[0])
+
+		// No edge, create one
+		if n == nil {
+			e := edge{
+				label: search[0],
+				node: &node{
+					leaf: &leafNode{
+						key: s,
+						val: v,
+					},
+					prefix: search,
+				},
+			}
+			parent.addEdge(e)
+			t.size++
+			return nil, false
+		}
+
+		// Determine longest prefix of the search key on match
+		commonPrefix := longestPrefix(search, n.prefix)
+		if commonPrefix == len(n.prefix) {
+			search = search[commonPrefix:]
+			continue
+		}
+
+		// Split the node
+		t.size++
+		child := &node{
+			prefix: search[:commonPrefix],
+		}
+		parent.replaceEdge(edge{
+			label: search[0],
+			node:  child,
+		})
+
+		// Restore the existing node
+		child.addEdge(edge{
+			label: n.prefix[commonPrefix],
+			node:  n,
+		})
+		n.prefix = n.prefix[commonPrefix:]
+
+		// Create a new leaf node
+		leaf := &leafNode{
+			key: s,
+			val: v,
+		}
+
+		// If the new key is a subset, add to to this node
+		search = search[commonPrefix:]
+		if len(search) == 0 {
+			child.leaf = leaf
+			return nil, false
+		}
+
+		// Create a new edge for the node
+		child.addEdge(edge{
+			label: search[0],
+			node: &node{
+				leaf:   leaf,
+				prefix: search,
+			},
+		})
+		return nil, false
+	}
+}
+
+// Delete is used to delete a key, returning the previous
+// value and if it was deleted
+func (t *Tree) Delete(s string) (interface{}, bool) {
+	var parent *node
+	var label byte
+	n := t.root
+	search := s
+	for {
+		// Check for key exhaution
+		if len(search) == 0 {
+			if !n.isLeaf() {
+				break
+			}
+			goto DELETE
+		}
+
+		// Look for an edge
+		parent = n
+		label = search[0]
+		n = n.getEdge(label)
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+	return nil, false
+
+DELETE:
+	// Delete the leaf
+	leaf := n.leaf
+	n.leaf = nil
+	t.size--
+
+	// Check if we should delete this node from the parent
+	if parent != nil && len(n.edges) == 0 {
+		parent.delEdge(label)
+	}
+
+	// Check if we should merge this node
+	if n != t.root && len(n.edges) == 1 {
+		n.mergeChild()
+	}
+
+	// Check if we should merge the parent's other child
+	if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
+		parent.mergeChild()
+	}
+
+	return leaf.val, true
+}
+
+func (n *node) mergeChild() {
+	e := n.edges[0]
+	child := e.node
+	n.prefix = n.prefix + child.prefix
+	n.leaf = child.leaf
+	n.edges = child.edges
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Tree) Get(s string) (interface{}, bool) {
+	n := t.root
+	search := s
+	for {
+		// Check for key exhaution
+		if len(search) == 0 {
+			if n.isLeaf() {
+				return n.leaf.val, true
+			}
+			break
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+	return nil, false
+}
+
+// LongestPrefix is like Get, but instead of an
+// exact match, it will return the longest prefix match.
+func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) {
+	var last *leafNode
+	n := t.root
+	search := s
+	for {
+		// Look for a leaf node
+		if n.isLeaf() {
+			last = n.leaf
+		}
+
+		// Check for key exhaution
+		if len(search) == 0 {
+			break
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+	if last != nil {
+		return last.key, last.val, true
+	}
+	return "", nil, false
+}
+
+// Minimum is used to return the minimum value in the tree
+func (t *Tree) Minimum() (string, interface{}, bool) {
+	n := t.root
+	for {
+		if n.isLeaf() {
+			return n.leaf.key, n.leaf.val, true
+		}
+		if len(n.edges) > 0 {
+			n = n.edges[0].node
+		} else {
+			break
+		}
+	}
+	return "", nil, false
+}
+
+// Maximum is used to return the maximum value in the tree
+func (t *Tree) Maximum() (string, interface{}, bool) {
+	n := t.root
+	for {
+		if num := len(n.edges); num > 0 {
+			n = n.edges[num-1].node
+			continue
+		}
+		if n.isLeaf() {
+			return n.leaf.key, n.leaf.val, true
+		}
+		break
+	}
+	return "", nil, false
+}
+
+// Walk is used to walk the tree
+func (t *Tree) Walk(fn WalkFn) {
+	recursiveWalk(t.root, fn)
+}
+
+// WalkPrefix is used to walk the tree under a prefix
+func (t *Tree) WalkPrefix(prefix string, fn WalkFn) {
+	n := t.root
+	search := prefix
+	for {
+		// Check for key exhaution
+		if len(search) == 0 {
+			recursiveWalk(n, fn)
+			return
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			break
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+
+		} else if strings.HasPrefix(n.prefix, search) {
+			// Child may be under our search prefix
+			recursiveWalk(n, fn)
+			return
+		} else {
+			break
+		}
+	}
+
+}
+
+// WalkPath is used to walk the tree, but only visiting nodes
+// from the root down to a given leaf. Where WalkPrefix walks
+// all the entries *under* the given prefix, this walks the
+// entries *above* the given prefix.
+func (t *Tree) WalkPath(path string, fn WalkFn) {
+	n := t.root
+	search := path
+	for {
+		// Visit the leaf values if any
+		if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+			return
+		}
+
+		// Check for key exhaution
+		if len(search) == 0 {
+			return
+		}
+
+		// Look for an edge
+		n = n.getEdge(search[0])
+		if n == nil {
+			return
+		}
+
+		// Consume the search prefix
+		if strings.HasPrefix(search, n.prefix) {
+			search = search[len(n.prefix):]
+		} else {
+			break
+		}
+	}
+}
+
+// recursiveWalk is used to do a pre-order walk of a node
+// recursively. Returns true if the walk should be aborted
+func recursiveWalk(n *node, fn WalkFn) bool {
+	// Visit the leaf values if any
+	if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+		return true
+	}
+
+	// Recurse on the children
+	for _, e := range n.edges {
+		if recursiveWalk(e.node, fn) {
+			return true
+		}
+	}
+	return false
+}
+
+// ToMap is used to walk the tree and convert it into a map
+func (t *Tree) ToMap() map[string]interface{} {
+	out := make(map[string]interface{}, t.size)
+	t.Walk(func(k string, v interface{}) bool {
+		out[k] = v
+		return false
+	})
+	return out
+}
diff --git a/vendor/github.com/armon/go-radix/radix_test.go b/vendor/github.com/armon/go-radix/radix_test.go
new file mode 100644
index 0000000..300f0d4
--- /dev/null
+++ b/vendor/github.com/armon/go-radix/radix_test.go
@@ -0,0 +1,319 @@
+package radix
+
+import (
+	crand "crypto/rand"
+	"fmt"
+	"reflect"
+	"sort"
+	"testing"
+)
+
+func TestRadix(t *testing.T) {
+	var min, max string
+	inp := make(map[string]interface{})
+	for i := 0; i < 1000; i++ {
+		gen := generateUUID()
+		inp[gen] = i
+		if gen < min || i == 0 {
+			min = gen
+		}
+		if gen > max || i == 0 {
+			max = gen
+		}
+	}
+
+	r := NewFromMap(inp)
+	if r.Len() != len(inp) {
+		t.Fatalf("bad length: %v %v", r.Len(), len(inp))
+	}
+
+	r.Walk(func(k string, v interface{}) bool {
+		println(k)
+		return false
+	})
+
+	for k, v := range inp {
+		out, ok := r.Get(k)
+		if !ok {
+			t.Fatalf("missing key: %v", k)
+		}
+		if out != v {
+			t.Fatalf("value mis-match: %v %v", out, v)
+		}
+	}
+
+	// Check min and max
+	outMin, _, _ := r.Minimum()
+	if outMin != min {
+		t.Fatalf("bad minimum: %v %v", outMin, min)
+	}
+	outMax, _, _ := r.Maximum()
+	if outMax != max {
+		t.Fatalf("bad maximum: %v %v", outMax, max)
+	}
+
+	for k, v := range inp {
+		out, ok := r.Delete(k)
+		if !ok {
+			t.Fatalf("missing key: %v", k)
+		}
+		if out != v {
+			t.Fatalf("value mis-match: %v %v", out, v)
+		}
+	}
+	if r.Len() != 0 {
+		t.Fatalf("bad length: %v", r.Len())
+	}
+}
+
+func TestRoot(t *testing.T) {
+	r := New()
+	_, ok := r.Delete("")
+	if ok {
+		t.Fatalf("bad")
+	}
+	_, ok = r.Insert("", true)
+	if ok {
+		t.Fatalf("bad")
+	}
+	val, ok := r.Get("")
+	if !ok || val != true {
+		t.Fatalf("bad: %v", val)
+	}
+	val, ok = r.Delete("")
+	if !ok || val != true {
+		t.Fatalf("bad: %v", val)
+	}
+}
+
+func TestDelete(t *testing.T) {
+
+	r := New()
+
+	s := []string{"", "A", "AB"}
+
+	for _, ss := range s {
+		r.Insert(ss, true)
+	}
+
+	for _, ss := range s {
+		_, ok := r.Delete(ss)
+		if !ok {
+			t.Fatalf("bad %q", ss)
+		}
+	}
+}
+
+func TestLongestPrefix(t *testing.T) {
+	r := New()
+
+	keys := []string{
+		"",
+		"foo",
+		"foobar",
+		"foobarbaz",
+		"foobarbazzip",
+		"foozip",
+	}
+	for _, k := range keys {
+		r.Insert(k, nil)
+	}
+	if r.Len() != len(keys) {
+		t.Fatalf("bad len: %v %v", r.Len(), len(keys))
+	}
+
+	type exp struct {
+		inp string
+		out string
+	}
+	cases := []exp{
+		{"a", ""},
+		{"abc", ""},
+		{"fo", ""},
+		{"foo", "foo"},
+		{"foob", "foo"},
+		{"foobar", "foobar"},
+		{"foobarba", "foobar"},
+		{"foobarbaz", "foobarbaz"},
+		{"foobarbazzi", "foobarbaz"},
+		{"foobarbazzip", "foobarbazzip"},
+		{"foozi", "foo"},
+		{"foozip", "foozip"},
+		{"foozipzap", "foozip"},
+	}
+	for _, test := range cases {
+		m, _, ok := r.LongestPrefix(test.inp)
+		if !ok {
+			t.Fatalf("no match: %v", test)
+		}
+		if m != test.out {
+			t.Fatalf("mis-match: %v %v", m, test)
+		}
+	}
+}
+
+func TestWalkPrefix(t *testing.T) {
+	r := New()
+
+	keys := []string{
+		"foobar",
+		"foo/bar/baz",
+		"foo/baz/bar",
+		"foo/zip/zap",
+		"zipzap",
+	}
+	for _, k := range keys {
+		r.Insert(k, nil)
+	}
+	if r.Len() != len(keys) {
+		t.Fatalf("bad len: %v %v", r.Len(), len(keys))
+	}
+
+	type exp struct {
+		inp string
+		out []string
+	}
+	cases := []exp{
+		{
+			"f",
+			[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
+		},
+		{
+			"foo",
+			[]string{"foobar", "foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
+		},
+		{
+			"foob",
+			[]string{"foobar"},
+		},
+		{
+			"foo/",
+			[]string{"foo/bar/baz", "foo/baz/bar", "foo/zip/zap"},
+		},
+		{
+			"foo/b",
+			[]string{"foo/bar/baz", "foo/baz/bar"},
+		},
+		{
+			"foo/ba",
+			[]string{"foo/bar/baz", "foo/baz/bar"},
+		},
+		{
+			"foo/bar",
+			[]string{"foo/bar/baz"},
+		},
+		{
+			"foo/bar/baz",
+			[]string{"foo/bar/baz"},
+		},
+		{
+			"foo/bar/bazoo",
+			[]string{},
+		},
+		{
+			"z",
+			[]string{"zipzap"},
+		},
+	}
+
+	for _, test := range cases {
+		out := []string{}
+		fn := func(s string, v interface{}) bool {
+			out = append(out, s)
+			return false
+		}
+		r.WalkPrefix(test.inp, fn)
+		sort.Strings(out)
+		sort.Strings(test.out)
+		if !reflect.DeepEqual(out, test.out) {
+			t.Fatalf("mis-match: %v %v", out, test.out)
+		}
+	}
+}
+
+func TestWalkPath(t *testing.T) {
+	r := New()
+
+	keys := []string{
+		"foo",
+		"foo/bar",
+		"foo/bar/baz",
+		"foo/baz/bar",
+		"foo/zip/zap",
+		"zipzap",
+	}
+	for _, k := range keys {
+		r.Insert(k, nil)
+	}
+	if r.Len() != len(keys) {
+		t.Fatalf("bad len: %v %v", r.Len(), len(keys))
+	}
+
+	type exp struct {
+		inp string
+		out []string
+	}
+	cases := []exp{
+		{
+			"f",
+			[]string{},
+		},
+		{
+			"foo",
+			[]string{"foo"},
+		},
+		{
+			"foo/",
+			[]string{"foo"},
+		},
+		{
+			"foo/ba",
+			[]string{"foo"},
+		},
+		{
+			"foo/bar",
+			[]string{"foo", "foo/bar"},
+		},
+		{
+			"foo/bar/baz",
+			[]string{"foo", "foo/bar", "foo/bar/baz"},
+		},
+		{
+			"foo/bar/bazoo",
+			[]string{"foo", "foo/bar", "foo/bar/baz"},
+		},
+		{
+			"z",
+			[]string{},
+		},
+	}
+
+	for _, test := range cases {
+		out := []string{}
+		fn := func(s string, v interface{}) bool {
+			out = append(out, s)
+			return false
+		}
+		r.WalkPath(test.inp, fn)
+		sort.Strings(out)
+		sort.Strings(test.out)
+		if !reflect.DeepEqual(out, test.out) {
+			t.Fatalf("mis-match: %v %v", out, test.out)
+		}
+	}
+}
+
+// generateUUID is used to generate a random UUID
+func generateUUID() string {
+	buf := make([]byte, 16)
+	if _, err := crand.Read(buf); err != nil {
+		panic(fmt.Errorf("failed to read random bytes: %v", err))
+	}
+
+	return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+		buf[0:4],
+		buf[4:6],
+		buf[6:8],
+		buf[8:10],
+		buf[10:16])
+}
diff --git a/vendor/github.com/sdboyer/gps/.gitignore b/vendor/github.com/sdboyer/gps/.gitignore
new file mode 100644
index 0000000..22d0d82
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/.gitignore
@@ -0,0 +1 @@
+vendor
diff --git a/vendor/github.com/sdboyer/gps/CODE_OF_CONDUCT.md b/vendor/github.com/sdboyer/gps/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..660ee84
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/CODE_OF_CONDUCT.md
@@ -0,0 +1,74 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+  advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+  address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at sam (at) samboyer.org. All complaints
+will be reviewed and investigated and will result in a response that is deemed
+necessary and appropriate to the circumstances. The project team is obligated to
+maintain confidentiality with regard to the reporter of an incident. Further
+details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/sdboyer/gps/CONTRIBUTING.md b/vendor/github.com/sdboyer/gps/CONTRIBUTING.md
new file mode 100644
index 0000000..3ff03b3
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/CONTRIBUTING.md
@@ -0,0 +1,58 @@
+# Contributing to `gps`
+
+:+1::tada: First, we're thrilled you're thinking about contributing! :tada::+1:
+
+As a library trying to cover all the bases in Go package management, it's
+crucial that we incorporate a broad range of experiences and use cases. There is
+a strong, motivating design behind `gps`, but we are always open to discussion
+on ways we can improve the library, particularly if it allows `gps` to cover
+more of the Go package management possibility space.
+
+`gps` has no CLA, but we do have a [Code of Conduct](https://github.com/sdboyer/gps/blob/master/CODE_OF_CONDUCT.md). By
+participating, you are expected to uphold this code.
+
+## How can I contribute?
+
+It may be best to start by getting a handle on what `gps` actually is. Our
+wiki has a [general introduction](https://github.com/sdboyer/gps/wiki/Introduction-to-gps), a
+[guide for tool implementors](https://github.com/sdboyer/gps/wiki/gps-for-Implementors), and
+a [guide for contributors](https://github.com/sdboyer/gps/wiki/gps-for-contributors).
+There's also a [discursive essay](https://medium.com/@sdboyer/so-you-want-to-write-a-package-manager-4ae9c17d9527)
+that lays out the big-picture goals and considerations driving the `gps` design.
+
+There are a number of ways to contribute, all highly valuable and deeply
+appreciated:
+
+* **Helping "translate" existing issues:** as `gps` exits its larval stage, it still
+  has a number of issues that may be incomprehensible to everyone except
+  @sdboyer. Simply asking clarifying questions on these issues is helpful!
+* **Identifying missed use cases:** the loose `gps` rule of thumb is, "if you can do
+  it in Go, we support it in `gps`." Posting issues about cases we've missed
+  helps us reach that goal.
+* **Writing tests:** in the same vein, `gps` has a [large suite](https://github.com/sdboyer/gps/blob/master/CODE_OF_CONDUCT.md) of solving tests, but
+  they still only scratch the surface. Writing tests is not only helpful, but is
+  also a great way to get a feel for how `gps` works.
+* **Suggesting enhancements:** `gps` has plenty of missing chunks. Help fill them in!
+* **Reporting bugs**: `gps` being a library means this isn't always the easiest.
+  However, you could always compile the [example](https://github.com/sdboyer/gps/blob/master/example.go), run that against some of
+  your projects, and report problems you encounter.
+* **Building experimental tools with `gps`:** probably the best and fastest ways to
+  kick the tires!
+
+`gps` is still beta-ish software. There are plenty of bugs to squash! APIs are
+stabilizing, but are still subject to change.
+
+## Issues and Pull Requests
+
+Pull requests are the preferred way to submit changes to 'gps'. Unless the
+changes are quite small, pull requests should generally reference an
+already-opened issue. Make sure to explain clearly in the body of the PR what
+the reasoning behind the change is.
+
+The changes themselves should generally conform to the following guidelines:
+
+* Git commit messages should be [well-written](http://chris.beams.io/posts/git-commit/#seven-rules).
+* Code should be `gofmt`-ed.
+* New or changed logic should be accompanied by tests.
+* Maintainable, table-based tests are strongly preferred, even if it means
+  writing a new testing harness to execute them.
diff --git a/vendor/github.com/sdboyer/gps/LICENSE b/vendor/github.com/sdboyer/gps/LICENSE
new file mode 100644
index 0000000..d4a1dcc
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Sam Boyer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/sdboyer/gps/README.md b/vendor/github.com/sdboyer/gps/README.md
new file mode 100644
index 0000000..0cb902b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/README.md
@@ -0,0 +1,114 @@
+<p align="center">
+<img 
+    src="header.png"
+    width="800" height="255" border="0" alt="gps">
+<br>
+<a href="https://circleci.com/gh/sdboyer/gps"><img src="https://circleci.com/gh/sdboyer/gps.svg?style=shield" alt="Build Status"></a>
+<a href="https://ci.appveyor.com/project/sdboyer/gps"><img src="https://ci.appveyor.com/api/projects/status/github/sdboyer/gps?svg=true&branch=master&passingText=Windows%20-%20OK&failingText=Windows%20-%20failed&pendingText=Windows%20-%20pending" alt="Windows Build Status"></a>
+<a href="https://goreportcard.com/report/github.com/sdboyer/gps"><img src="https://goreportcard.com/badge/github.com/sdboyer/gps" alt="Build Status"></a>
+<a href="https://codecov.io/gh/sdboyer/gps"><img src="https://codecov.io/gh/sdboyer/gps/branch/master/graph/badge.svg" alt="Codecov" /></a>
+<a href="https://godoc.org/github.com/sdboyer/gps"><img src="https://godoc.org/github.com/sdboyer/gps?status.svg" alt="GoDoc"></a>
+</p>
+
+--
+
+`gps` is the Go Packaging Solver. It is an engine for tackling dependency
+management problems in Go. It is trivial - [about 35 lines of
+code](https://github.com/sdboyer/gps/blob/master/example.go) - to replicate the
+fetching bits of `go get` using `gps`.
+
+`gps` is _not_ Yet Another Go Package Management Tool. Rather, it's a library
+that package management (and adjacent) tools can use to solve the
+[hard](https://en.wikipedia.org/wiki/Boolean_satisfiability_problem) parts of
+the problem in a consistent,
+[holistic](https://medium.com/@sdboyer/so-you-want-to-write-a-package-manager-4ae9c17d9527)
+way. It is a distillation of the ideas behind language package managers like
+[bundler](http://bundler.io), [npm](https://www.npmjs.com/),
+[elm-package](https://github.com/elm-lang/elm-package),
+[cargo](https://crates.io/) (and others) into a library, artisanally
+handcrafted with ❤️ for Go's specific requirements.
+
+`gps` is [on track](https://github.com/Masterminds/glide/pull/384) to become the engine behind [glide](https://glide.sh).
+
+The wiki has a [general introduction to the `gps`
+approach](https://github.com/sdboyer/gps/wiki/Introduction-to-gps), as well
+as guides for folks [implementing
+tools](https://github.com/sdboyer/gps/wiki/gps-for-Implementors) or [looking
+to contribute](https://github.com/sdboyer/gps/wiki/gps-for-Contributors).
+
+**`gps` is progressing rapidly, but still in beta, with a concomitantly liberal sprinkling of panics.**
+
+## Wait...a package management _library_?!
+
+Yup. See [the rationale](https://github.com/sdboyer/gps/wiki/Rationale).
+
+## Features
+
+A feature list for a package management library is a bit different than one for
+a package management tool. Instead of listing the things an end-user can do,
+we list the choices a tool *can* make and offer, in some form, to its users, as
+well as the non-choices/assumptions/constraints that `gps` imposes on a tool.
+
+### Non-Choices
+
+We'd love for `gps`'s non-choices to be noncontroversial. But that's not always
+the case.
+
+Nevertheless, these non-choices remain because, taken as a whole, they make
+experiments and discussion around Go package management coherent and
+productive.
+
+* Go >=1.6, or 1.5 with `GO15VENDOREXPERIMENT = 1` set
+* Everything under `vendor/` is volatile and controlled solely by the tool
+* A central cache of repositories is used (cannot be `GOPATH`)
+* A [**project**](https://godoc.org/github.com/sdboyer/gps#ProjectRoot) concept:
+  a tree of packages, all covered by one `vendor` directory
+* A [**manifest** and
+  **lock**](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#manifests-and-locks)
+  approach to tracking version and constraint information
+* Upstream sources are one of `git`, `bzr`, `hg` or `svn` repositories
+* What the available versions are for a given project/repository (all branches, tags, or revs are eligible)
+  * In general, semver tags are preferred to branches, are preferred to plain tags
+* The actual packages that must be present (determined through import graph static analysis)
+  * How the import graph is statically analyzed (Similar to `go/build`, but with a combinatorial view of build tags)
+* All packages from the same source (repository) must be the same version
+* Package import cycles are not allowed ([not yet implemented](https://github.com/sdboyer/gps/issues/66))
+
+There are also some current non-choices that we would like to push into the realm of choice:
+
+* Importable projects that are not bound to the repository root
+* Source inference around different import path patterns (e.g., how `github.com/*` or `my_company/*` are handled)
+
+### Choices
+
+These choices represent many of the ways that `gps`-based tools could
+substantively differ from each other.
+
+Some of these are choices designed to encompass all options for topics on which
+reasonable people have disagreed. Others are simply important controls that no
+general library could know _a priori_.
+
+* How to store manifest and lock information (file(s)? a db?)
+* Which of the other package managers to interoperate with
+* Which types of version constraints to allow the user to specify (e.g., allowing [semver ranges](https://docs.npmjs.com/misc/semver) or not)
+* Whether or not to strip nested `vendor` directories
+* Which packages in the import graph to [ignore](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#ignoring-packages) (if any)
+* What constraint [overrides](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#overrides) to apply (if any)
+* What [informational output](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#trace-and-tracelogger) to show the end user
+* What dependency version constraints are declared by the [root project](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#manifest-data)
+* What dependency version constraints are declared by [all dependencies](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#the-projectanalyzer)
+* Given a [previous solution](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#lock-data), [which versions to let change, and how](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#tochange-changeall-and-downgrade)
+  * In the absence of a previous solution, whether or not to use [preferred versions](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#preferred-versions)
+* Allowing, or not, the user to [swap in different network names](https://github.com/sdboyer/gps/wiki/gps-for-Implementors#projectidentifier) for import paths (e.g. forks)
+* Specifying additional input/source packages not reachable from the root import graph ([not complete](https://github.com/sdboyer/gps/issues/42))
+
+This list may not be exhaustive - see the
+[implementor's guide](https://github.com/sdboyer/gps/wiki/gps-for-Implementors)
+for a proper treatment.
+
+## Contributing
+
+Yay, contributing! Please see
+[CONTRIBUTING.md](https://github.com/sdboyer/gps/blob/master/CONTRIBUTING.md).
+Note that `gps` also abides by a [Code of
+Conduct](https://github.com/sdboyer/gps/blob/master/CODE_OF_CONDUCT.md), and is MIT-licensed.
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/a.go
new file mode 100644
index 0000000..e4e2ced
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	S = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/.m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/disallow/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/a.go
new file mode 100644
index 0000000..59d2f72
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/a.go
@@ -0,0 +1,14 @@
+package disallow
+
+import (
+	"sort"
+	"disallow/testdata"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+	_ = testdata.H
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/disallow/testdata/another.go b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/testdata/another.go
new file mode 100644
index 0000000..6defdae
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/disallow/testdata/another.go
@@ -0,0 +1,7 @@
+package testdata
+
+import "hash"
+
+var (
+	H = hash.Hash
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/a.go
new file mode 100644
index 0000000..04cac6a
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/a.go
@@ -0,0 +1,12 @@
+package base
+
+import (
+	"go/parser"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = parser.ParseFile
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/nm.go b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/nm.go
new file mode 100644
index 0000000..44a0abb
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/doublenest/namemismatch/nm.go
@@ -0,0 +1,12 @@
+package nm
+
+import (
+	"os"
+
+	"github.com/Masterminds/semver"
+)
+
+var (
+	V = os.FileInfo
+	_ = semver.Constraint
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/empty/.gitkeep b/vendor/github.com/sdboyer/gps/_testdata/src/empty/.gitkeep
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/empty/.gitkeep
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmain/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmain/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmain/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmain/igmain.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmain/igmain.go
new file mode 100644
index 0000000..52129ef
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmain/igmain.go
@@ -0,0 +1,7 @@
+// +build ignore
+
+package main
+
+import "unicode"
+
+var _ = unicode.In
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/igmain.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/igmain.go
new file mode 100644
index 0000000..efee3f9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmainlong/igmain.go
@@ -0,0 +1,9 @@
+// Another comment, which the parser should ignore and still see builds tags
+
+// +build ignore
+
+package main
+
+import "unicode"
+
+var _ = unicode.In
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/igmain.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/igmain.go
new file mode 100644
index 0000000..52129ef
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/igmain.go
@@ -0,0 +1,7 @@
+// +build ignore
+
+package main
+
+import "unicode"
+
+var _ = unicode.In
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/t_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/t_test.go
new file mode 100644
index 0000000..ff4f77b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/igmaint/t_test.go
@@ -0,0 +1,11 @@
+package simple
+
+import (
+	"math/rand"
+	"strconv"
+)
+
+var (
+	_ = rand.Int()
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/missing/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/missing/a.go
new file mode 100644
index 0000000..8522bdd
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/missing/a.go
@@ -0,0 +1,14 @@
+package simple
+
+import (
+	"sort"
+
+	"missing/missing"
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+	_ = missing.Foo
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/missing/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/nest/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/nest/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/nest/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/nest/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/a.go
new file mode 100644
index 0000000..ec1f9b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/ren/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/ren/simple/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/ren/simple/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/ren/simple/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simple/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/simple/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simple/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a_test.go
new file mode 100644
index 0000000..72a3014
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/a_test.go
@@ -0,0 +1,11 @@
+package simple_test
+
+import (
+	"sort"
+	"strconv"
+)
+
+var (
+	_ = sort.Strings
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/t_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/t_test.go
new file mode 100644
index 0000000..ff4f77b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simpleallt/t_test.go
@@ -0,0 +1,11 @@
+package simple
+
+import (
+	"math/rand"
+	"strconv"
+)
+
+var (
+	_ = rand.Int()
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simplet/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/simplet/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simplet/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simplet/t_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/simplet/t_test.go
new file mode 100644
index 0000000..ff4f77b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simplet/t_test.go
@@ -0,0 +1,11 @@
+package simple
+
+import (
+	"math/rand"
+	"strconv"
+)
+
+var (
+	_ = rand.Int()
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a_test.go
new file mode 100644
index 0000000..72a3014
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/simplext/a_test.go
@@ -0,0 +1,11 @@
+package simple_test
+
+import (
+	"sort"
+	"strconv"
+)
+
+var (
+	_ = sort.Strings
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/t/t_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/t/t_test.go
new file mode 100644
index 0000000..ff4f77b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/t/t_test.go
@@ -0,0 +1,11 @@
+package simple
+
+import (
+	"math/rand"
+	"strconv"
+)
+
+var (
+	_ = rand.Int()
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/a.go
new file mode 100644
index 0000000..300b730
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/a.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/twopkgs/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/locals.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/locals.go
new file mode 100644
index 0000000..5c7e6c7
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/locals.go
@@ -0,0 +1,13 @@
+package main
+
+import (
+	"varied/namemismatch"
+	"varied/otherpath"
+	"varied/simple"
+)
+
+var (
+	_ = simple.S
+	_ = nm.V
+	_ = otherpath.O
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/a.go
new file mode 100644
index 0000000..65fd7ca
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/a.go
@@ -0,0 +1,12 @@
+package m1p
+
+import (
+	"sort"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	M = sort.Strings
+	_ = gps.Solve
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/b.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/b.go
new file mode 100644
index 0000000..83674b9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/m1p/b.go
@@ -0,0 +1,11 @@
+package m1p
+
+import (
+	"os"
+	"sort"
+)
+
+var (
+	_ = sort.Strings
+	_ = os.PathSeparator
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/main.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/main.go
new file mode 100644
index 0000000..92c3dc1
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/main.go
@@ -0,0 +1,9 @@
+package main
+
+import (
+	"net/http"
+)
+
+var (
+	_ = http.Client
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/namemismatch/nm.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/namemismatch/nm.go
new file mode 100644
index 0000000..44a0abb
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/namemismatch/nm.go
@@ -0,0 +1,12 @@
+package nm
+
+import (
+	"os"
+
+	"github.com/Masterminds/semver"
+)
+
+var (
+	V = os.FileInfo
+	_ = semver.Constraint
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/otherpath/otherpath_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/otherpath/otherpath_test.go
new file mode 100644
index 0000000..73891e6
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/otherpath/otherpath_test.go
@@ -0,0 +1,5 @@
+package otherpath
+
+import "varied/m1p"
+
+var O = m1p.M
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another.go
new file mode 100644
index 0000000..85368da
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another.go
@@ -0,0 +1,7 @@
+package another
+
+import "hash"
+
+var (
+	H = hash.Hash
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another_test.go
new file mode 100644
index 0000000..72a89ad
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/another_test.go
@@ -0,0 +1,7 @@
+package another
+
+import "encoding/binary"
+
+var (
+	_ = binary.PutVarint
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/locals.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/locals.go
new file mode 100644
index 0000000..d8d0316
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/another/locals.go
@@ -0,0 +1,5 @@
+package another
+
+import "varied/m1p"
+
+var _ = m1p.M
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/locals.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/locals.go
new file mode 100644
index 0000000..6ebb90f
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/locals.go
@@ -0,0 +1,7 @@
+package simple
+
+import "varied/simple/another"
+
+var (
+	_ = another.H
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/simple.go b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/simple.go
new file mode 100644
index 0000000..c8fbb05
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/varied/simple/simple.go
@@ -0,0 +1,12 @@
+package simple
+
+import (
+	"go/parser"
+
+	"github.com/sdboyer/gps"
+)
+
+var (
+	_ = parser.ParseFile
+	S = gps.Prepare
+)
diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/xt/a_test.go b/vendor/github.com/sdboyer/gps/_testdata/src/xt/a_test.go
new file mode 100644
index 0000000..72a3014
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/_testdata/src/xt/a_test.go
@@ -0,0 +1,11 @@
+package simple_test
+
+import (
+	"sort"
+	"strconv"
+)
+
+var (
+	_ = sort.Strings
+	_ = strconv.Unquote
+)
diff --git a/vendor/github.com/sdboyer/gps/analysis.go b/vendor/github.com/sdboyer/gps/analysis.go
new file mode 100644
index 0000000..d410eb3
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/analysis.go
@@ -0,0 +1,906 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"go/build"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"text/scanner"
+)
+
+var osList []string
+var archList []string
+var stdlib = make(map[string]bool)
+
+const stdlibPkgs string = "archive archive/tar archive/zip bufio builtin bytes compress compress/bzip2 compress/flate compress/gzip compress/lzw compress/zlib container container/heap container/list container/ring context crypto crypto/aes crypto/cipher crypto/des crypto/dsa crypto/ecdsa crypto/elliptic crypto/hmac crypto/md5 crypto/rand crypto/rc4 crypto/rsa crypto/sha1 crypto/sha256 crypto/sha512 crypto/subtle crypto/tls crypto/x509 crypto/x509/pkix database database/sql database/sql/driver debug debug/dwarf debug/elf debug/gosym debug/macho debug/pe debug/plan9obj encoding encoding/ascii85 encoding/asn1 encoding/base32 encoding/base64 encoding/binary encoding/csv encoding/gob encoding/hex encoding/json encoding/pem encoding/xml errors expvar flag fmt go go/ast go/build go/constant go/doc go/format go/importer go/parser go/printer go/scanner go/token go/types hash hash/adler32 hash/crc32 hash/crc64 hash/fnv html html/template image image/color image/color/palette image/draw image/gif image/jpeg image/png index index/suffixarray io io/ioutil log log/syslog math math/big math/cmplx math/rand mime mime/multipart mime/quotedprintable net net/http net/http/cgi net/http/cookiejar net/http/fcgi net/http/httptest net/http/httputil net/http/pprof net/mail net/rpc net/rpc/jsonrpc net/smtp net/textproto net/url os os/exec os/signal os/user path path/filepath reflect regexp regexp/syntax runtime runtime/cgo runtime/debug runtime/msan runtime/pprof runtime/race runtime/trace sort strconv strings sync sync/atomic syscall testing testing/iotest testing/quick text text/scanner text/tabwriter text/template text/template/parse time unicode unicode/utf16 unicode/utf8 unsafe"
+
+// Before appengine moved to google.golang.org/appengine, it had a magic
+// stdlib-like import path. We have to ignore all of these.
+const appenginePkgs string = "appengine/aetest appengine/blobstore appengine/capability appengine/channel appengine/cloudsql appengine/cmd appengine/cmd/aebundler appengine/cmd/aedeploy appengine/cmd/aefix appengine/datastore appengine/delay appengine/demos appengine/demos/guestbook appengine/demos/guestbook/templates appengine/demos/helloworld appengine/file appengine/image appengine/internal appengine/internal/aetesting appengine/internal/app_identity appengine/internal/base appengine/internal/blobstore appengine/internal/capability appengine/internal/channel appengine/internal/datastore appengine/internal/image appengine/internal/log appengine/internal/mail appengine/internal/memcache appengine/internal/modules appengine/internal/remote_api appengine/internal/search appengine/internal/socket appengine/internal/system appengine/internal/taskqueue appengine/internal/urlfetch appengine/internal/user appengine/internal/xmpp appengine/log appengine/mail appengine/memcache appengine/module appengine/remote_api appengine/runtime appengine/search appengine/socket appengine/taskqueue appengine/urlfetch appengine/user appengine/xmpp"
+
+func init() {
+	// The supported systems are listed in
+	// https://github.com/golang/go/blob/master/src/go/build/syslist.go
+	// The lists are not exported so we need to duplicate them here.
+	osListString := "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows"
+	osList = strings.Split(osListString, " ")
+
+	archListString := "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64"
+	archList = strings.Split(archListString, " ")
+
+	for _, pkg := range strings.Split(stdlibPkgs, " ") {
+		stdlib[pkg] = true
+	}
+	for _, pkg := range strings.Split(appenginePkgs, " ") {
+		stdlib[pkg] = true
+	}
+
+	// Also ignore C
+	// TODO(sdboyer) actually figure out how to deal with cgo
+	stdlib["C"] = true
+}
+
+// listPackages lists info for all packages at or below the provided fileRoot.
+//
+// Directories without any valid Go files are excluded. Directories with
+// multiple packages are excluded.
+//
+// The importRoot parameter is prepended to the relative path when determining
+// the import path for each package. The obvious case is for something typical,
+// like:
+//
+//  fileRoot = "/home/user/go/src/github.com/foo/bar"
+//  importRoot = "github.com/foo/bar"
+//
+// where the fileRoot and importRoot align. However, if you provide:
+//
+//  fileRoot = "/home/user/workspace/path/to/repo"
+//  importRoot = "github.com/foo/bar"
+//
+// then the root package at path/to/repo will be ascribed import path
+// "github.com/foo/bar", and its subpackage "baz" will be
+// "github.com/foo/bar/baz".
+//
+// A PackageTree is returned, which contains the ImportRoot and map of import path
+// to PackageOrErr - each path under the root that exists will have either a
+// Package, or an error describing why the directory is not a valid package.
+func ListPackages(fileRoot, importRoot string) (PackageTree, error) {
+	// Set up a build.ctx for parsing
+	ctx := build.Default
+	ctx.GOROOT = ""
+	ctx.GOPATH = ""
+	ctx.UseAllFiles = true
+
+	ptree := PackageTree{
+		ImportRoot: importRoot,
+		Packages:   make(map[string]PackageOrErr),
+	}
+
+	// mkfilter returns two funcs that can be injected into a build.Context,
+	// letting us filter the results into an "in" and "out" set.
+	mkfilter := func(files map[string]struct{}) (in, out func(dir string) (fi []os.FileInfo, err error)) {
+		in = func(dir string) (fi []os.FileInfo, err error) {
+			all, err := ioutil.ReadDir(dir)
+			if err != nil {
+				return nil, err
+			}
+
+			for _, f := range all {
+				if _, exists := files[f.Name()]; exists {
+					fi = append(fi, f)
+				}
+			}
+			return fi, nil
+		}
+
+		out = func(dir string) (fi []os.FileInfo, err error) {
+			all, err := ioutil.ReadDir(dir)
+			if err != nil {
+				return nil, err
+			}
+
+			for _, f := range all {
+				if _, exists := files[f.Name()]; !exists {
+					fi = append(fi, f)
+				}
+			}
+			return fi, nil
+		}
+
+		return
+	}
+
+	// helper func to create a Package from a *build.Package
+	happy := func(importPath string, p *build.Package) Package {
+		// Happy path - simple parsing worked
+		pkg := Package{
+			ImportPath:  importPath,
+			CommentPath: p.ImportComment,
+			Name:        p.Name,
+			Imports:     p.Imports,
+			TestImports: dedupeStrings(p.TestImports, p.XTestImports),
+		}
+
+		return pkg
+	}
+
+	err := filepath.Walk(fileRoot, func(path string, fi os.FileInfo, err error) error {
+		if err != nil && err != filepath.SkipDir {
+			return err
+		}
+		if !fi.IsDir() {
+			return nil
+		}
+
+		// Skip dirs that are known to hold non-local/dependency code.
+		//
+		// We don't skip _*, or testdata dirs because, while it may be poor
+		// form, importing them is not a compilation error.
+		switch fi.Name() {
+		case "vendor", "Godeps":
+			return filepath.SkipDir
+		}
+		// We do skip dot-dirs, though, because it's such a ubiquitous standard
+		// that they not be visited by normal commands, and because things get
+		// really weird if we don't.
+		if strings.HasPrefix(fi.Name(), ".") {
+			return filepath.SkipDir
+		}
+
+		// Compute the import path. Run the result through ToSlash(), so that windows
+		// paths are normalized to Unix separators, as import paths are expected
+		// to be.
+		ip := filepath.ToSlash(filepath.Join(importRoot, strings.TrimPrefix(path, fileRoot)))
+
+		// Find all the imports, across all os/arch combos
+		p, err := ctx.ImportDir(path, analysisImportMode())
+		var pkg Package
+		if err == nil {
+			pkg = happy(ip, p)
+		} else {
+			switch terr := err.(type) {
+			case *build.NoGoError:
+				ptree.Packages[ip] = PackageOrErr{
+					Err: err,
+				}
+				return nil
+			case *build.MultiplePackageError:
+				// Set this up preemptively, so we can easily just return out if
+				// something goes wrong. Otherwise, it'll get transparently
+				// overwritten later.
+				ptree.Packages[ip] = PackageOrErr{
+					Err: err,
+				}
+
+				// For now, we're punting entirely on dealing with os/arch
+				// combinations. That will be a more significant refactor.
+				//
+				// However, there is one case we want to allow here - one or
+				// more files with "+build ignore" with package `main`. (Ignore
+				// is just a convention, but for now it's good enough to just
+				// check that.) This is a fairly common way to give examples,
+				// and to make a more sophisticated build system than a Makefile
+				// allows, so we want to support that case. So, transparently
+				// lump the deps together.
+				mains := make(map[string]struct{})
+				for k, pkgname := range terr.Packages {
+					if pkgname == "main" {
+						tags, err2 := readFileBuildTags(filepath.Join(path, terr.Files[k]))
+						if err2 != nil {
+							return nil
+						}
+
+						var hasignore bool
+						for _, t := range tags {
+							if t == "ignore" {
+								hasignore = true
+								break
+							}
+						}
+						if !hasignore {
+							// No ignore tag found - bail out
+							return nil
+						}
+						mains[terr.Files[k]] = struct{}{}
+					}
+				}
+				// Make filtering funcs that will let us look only at the main
+				// files, and exclude the main files; inf and outf, respectively
+				inf, outf := mkfilter(mains)
+
+				// outf first; if there's another err there, we bail out with a
+				// return
+				ctx.ReadDir = outf
+				po, err2 := ctx.ImportDir(path, analysisImportMode())
+				if err2 != nil {
+					return nil
+				}
+				ctx.ReadDir = inf
+				pi, err2 := ctx.ImportDir(path, analysisImportMode())
+				if err2 != nil {
+					return nil
+				}
+				ctx.ReadDir = nil
+
+				// Use the other files as baseline, they're the main stuff
+				pkg = happy(ip, po)
+				mpkg := happy(ip, pi)
+				pkg.Imports = dedupeStrings(pkg.Imports, mpkg.Imports)
+				pkg.TestImports = dedupeStrings(pkg.TestImports, mpkg.TestImports)
+			default:
+				return err
+			}
+		}
+
+		// This area has some...fuzzy rules, but check all the imports for
+		// local/relative/dot-ness, and record an error for the package if we
+		// see any.
+		var lim []string
+		for _, imp := range append(pkg.Imports, pkg.TestImports...) {
+			switch {
+			// Do allow the single-dot, at least for now
+			case imp == "..":
+				lim = append(lim, imp)
+				// ignore stdlib done this way, b/c that's what the go tooling does
+			case strings.HasPrefix(imp, "./"):
+				if stdlib[imp[2:]] {
+					lim = append(lim, imp)
+				}
+			case strings.HasPrefix(imp, "../"):
+				if stdlib[imp[3:]] {
+					lim = append(lim, imp)
+				}
+			}
+		}
+
+		if len(lim) > 0 {
+			ptree.Packages[ip] = PackageOrErr{
+				Err: &LocalImportsError{
+					Dir:          ip,
+					LocalImports: lim,
+				},
+			}
+		} else {
+			ptree.Packages[ip] = PackageOrErr{
+				P: pkg,
+			}
+		}
+
+		return nil
+	})
+
+	if err != nil {
+		return PackageTree{}, err
+	}
+
+	return ptree, nil
+}
+
+// LocalImportsError indicates that a package contains at least one relative
+// import that will prevent it from compiling.
+//
+// TODO(sdboyer) add a Files property once we're doing our own per-file parsing
+type LocalImportsError struct {
+	Dir          string
+	LocalImports []string
+}
+
+func (e *LocalImportsError) Error() string {
+	return fmt.Sprintf("import path %s had problematic local imports", e.Dir)
+}
+
+func readFileBuildTags(fp string) ([]string, error) {
+	co, err := readGoContents(fp)
+	if err != nil {
+		return []string{}, err
+	}
+
+	var tags []string
+	// Only look at places where we had a code comment.
+	if len(co) > 0 {
+		t := findTags(co)
+		for _, tg := range t {
+			found := false
+			for _, tt := range tags {
+				if tt == tg {
+					found = true
+				}
+			}
+			if !found {
+				tags = append(tags, tg)
+			}
+		}
+	}
+
+	return tags, nil
+}
+
+// Read contents of a Go file up to the package declaration. This can be used
+// to find the the build tags.
+func readGoContents(fp string) ([]byte, error) {
+	f, err := os.Open(fp)
+	defer f.Close()
+	if err != nil {
+		return []byte{}, err
+	}
+
+	var s scanner.Scanner
+	s.Init(f)
+	var tok rune
+	var pos scanner.Position
+	for tok != scanner.EOF {
+		tok = s.Scan()
+
+		// Getting the token text will skip comments by default.
+		tt := s.TokenText()
+		// build tags will not be after the package declaration.
+		if tt == "package" {
+			pos = s.Position
+			break
+		}
+	}
+
+	var buf bytes.Buffer
+	f.Seek(0, 0)
+	_, err = io.CopyN(&buf, f, int64(pos.Offset))
+	if err != nil {
+		return []byte{}, err
+	}
+
+	return buf.Bytes(), nil
+}
+
+// From a byte slice of a Go file find the tags.
+func findTags(co []byte) []string {
+	p := co
+	var tgs []string
+	for len(p) > 0 {
+		line := p
+		if i := bytes.IndexByte(line, '\n'); i >= 0 {
+			line, p = line[:i], p[i+1:]
+		} else {
+			p = p[len(p):]
+		}
+		line = bytes.TrimSpace(line)
+		// Only look at comment lines that are well formed in the Go style
+		if bytes.HasPrefix(line, []byte("//")) {
+			line = bytes.TrimSpace(line[len([]byte("//")):])
+			if len(line) > 0 && line[0] == '+' {
+				f := strings.Fields(string(line))
+
+				// We've found a +build tag line.
+				if f[0] == "+build" {
+					for _, tg := range f[1:] {
+						tgs = append(tgs, tg)
+					}
+				}
+			}
+		}
+	}
+
+	return tgs
+}
+
+// A PackageTree represents the results of recursively parsing a tree of
+// packages, starting at the ImportRoot. The results of parsing the files in the
+// directory identified by each import path - a Package or an error - are stored
+// in the Packages map, keyed by that import path.
+type PackageTree struct {
+	ImportRoot string
+	Packages   map[string]PackageOrErr
+}
+
+// dup copies the PackageTree.
+//
+// This is really only useful as a defensive measure to prevent external state
+// mutations.
+func (t PackageTree) dup() PackageTree {
+	t2 := PackageTree{
+		ImportRoot: t.ImportRoot,
+		Packages:   map[string]PackageOrErr{},
+	}
+
+	for path, poe := range t.Packages {
+		poe2 := PackageOrErr{
+			Err: poe.Err,
+			P:   poe.P,
+		}
+		if len(poe.P.Imports) > 0 {
+			poe2.P.Imports = make([]string, len(poe.P.Imports))
+			copy(poe2.P.Imports, poe.P.Imports)
+		}
+		if len(poe.P.TestImports) > 0 {
+			poe2.P.TestImports = make([]string, len(poe.P.TestImports))
+			copy(poe2.P.TestImports, poe.P.TestImports)
+		}
+
+		t2.Packages[path] = poe2
+	}
+
+	return t2
+}
+
+type wm struct {
+	err error
+	ex  map[string]bool
+	in  map[string]bool
+}
+
+// PackageOrErr stores the results of attempting to parse a single directory for
+// Go source code.
+type PackageOrErr struct {
+	P   Package
+	Err error
+}
+
+// ReachMap maps a set of import paths (keys) to the set of external packages
+// transitively reachable from the packages at those import paths.
+//
+// See PackageTree.ExternalReach() for more information.
+type ReachMap map[string][]string
+
+// ExternalReach looks through a PackageTree and computes the list of external
+// import statements (that is, import statements pointing to packages that are
+// not logical children of PackageTree.ImportRoot) that are transitively
+// imported by the internal packages in the tree.
+//
+// main indicates whether (true) or not (false) to include main packages in the
+// analysis. When utilized by gps' solver, main packages are generally excluded
+// from analyzing anything other than the root project, as they necessarily can't
+// be imported.
+//
+// tests indicates whether (true) or not (false) to include imports from test
+// files in packages when computing the reach map.
+//
+// ignore is a map of import paths that, if encountered, should be excluded from
+// analysis. This exclusion applies to both internal and external packages. If
+// an external import path is ignored, it is simply omitted from the results.
+//
+// If an internal path is ignored, then not only does it not appear in the final
+// map, but it is also excluded from the transitive calculations of other
+// internal packages.  That is, if you ignore A/foo, then the external package
+// list for all internal packages that import A/foo will not include external
+// packages that are only reachable through A/foo.
+//
+// Visually, this means that, given a PackageTree with root A and packages at A,
+// A/foo, and A/bar, and the following import chain:
+//
+//  A -> A/foo -> A/bar -> B/baz
+//
+// In this configuration, all of A's packages transitively import B/baz, so the
+// returned map would be:
+//
+//  map[string][]string{
+// 	"A": []string{"B/baz"},
+// 	"A/foo": []string{"B/baz"}
+// 	"A/bar": []string{"B/baz"},
+//  }
+//
+// However, if you ignore A/foo, then A's path to B/baz is broken, and A/foo is
+// omitted entirely. Thus, the returned map would be:
+//
+//  map[string][]string{
+// 	"A": []string{},
+// 	"A/bar": []string{"B/baz"},
+//  }
+//
+// If there are no packages to ignore, it is safe to pass a nil map.
+func (t PackageTree) ExternalReach(main, tests bool, ignore map[string]bool) ReachMap {
+	if ignore == nil {
+		ignore = make(map[string]bool)
+	}
+
+	// world's simplest adjacency list
+	workmap := make(map[string]wm)
+
+	var imps []string
+	for ip, perr := range t.Packages {
+		if perr.Err != nil {
+			workmap[ip] = wm{
+				err: perr.Err,
+			}
+			continue
+		}
+		p := perr.P
+
+		// Skip main packages, unless param says otherwise
+		if p.Name == "main" && !main {
+			continue
+		}
+		// Skip ignored packages
+		if ignore[ip] {
+			continue
+		}
+
+		imps = imps[:0]
+		imps = p.Imports
+		if tests {
+			imps = dedupeStrings(imps, p.TestImports)
+		}
+
+		w := wm{
+			ex: make(map[string]bool),
+			in: make(map[string]bool),
+		}
+
+		for _, imp := range imps {
+			// Skip ignored imports
+			if ignore[imp] {
+				continue
+			}
+
+			if !checkPrefixSlash(filepath.Clean(imp), t.ImportRoot) {
+				w.ex[imp] = true
+			} else {
+				if w2, seen := workmap[imp]; seen {
+					for i := range w2.ex {
+						w.ex[i] = true
+					}
+					for i := range w2.in {
+						w.in[i] = true
+					}
+				} else {
+					w.in[imp] = true
+				}
+			}
+		}
+
+		workmap[ip] = w
+	}
+
+	//return wmToReach(workmap, t.ImportRoot)
+	return wmToReach(workmap, "") // TODO(sdboyer) this passes tests, but doesn't seem right
+}
+
+// wmToReach takes an internal "workmap" constructed by
+// PackageTree.ExternalReach(), transitively walks (via depth-first traversal)
+// all internal imports until they reach an external path or terminate, then
+// translates the results into a slice of external imports for each internal
+// pkg.
+//
+// The basedir string, with a trailing slash ensured, will be stripped from the
+// keys of the returned map.
+//
+// This is mostly separated out for testing purposes.
+func wmToReach(workmap map[string]wm, basedir string) map[string][]string {
+	// Uses depth-first exploration to compute reachability into external
+	// packages, dropping any internal packages on "poisoned paths" - a path
+	// containing a package with an error, or with a dep on an internal package
+	// that's missing.
+
+	const (
+		white uint8 = iota
+		grey
+		black
+	)
+
+	colors := make(map[string]uint8)
+	allreachsets := make(map[string]map[string]struct{})
+
+	// poison is a helper func to eliminate specific reachsets from allreachsets
+	poison := func(path []string) {
+		for _, ppkg := range path {
+			delete(allreachsets, ppkg)
+		}
+	}
+
+	var dfe func(string, []string) bool
+
+	// dfe is the depth-first-explorer that computes safe, error-free external
+	// reach map.
+	//
+	// pkg is the import path of the pkg currently being visited; path is the
+	// stack of parent packages we've visited to get to pkg. The return value
+	// indicates whether the level completed successfully (true) or if it was
+	// poisoned (false).
+	//
+	// TODO(sdboyer) some deft improvements could probably be made by passing the list of
+	// parent reachsets, rather than a list of parent package string names.
+	// might be able to eliminate the use of allreachsets map-of-maps entirely.
+	dfe = func(pkg string, path []string) bool {
+		// white is the zero value of uint8, which is what we want if the pkg
+		// isn't in the colors map, so this works fine
+		switch colors[pkg] {
+		case white:
+			// first visit to this pkg; mark it as in-process (grey)
+			colors[pkg] = grey
+
+			// make sure it's present and w/out errs
+			w, exists := workmap[pkg]
+			if !exists || w.err != nil {
+				// Does not exist or has an err; poison self and all parents
+				poison(path)
+
+				// we know we're done here, so mark it black
+				colors[pkg] = black
+				return false
+			}
+			// pkg exists with no errs. mark it as in-process (grey), and start
+			// a reachmap for it
+			//
+			// TODO(sdboyer) use sync.Pool here? can be lots of explicit map alloc/dealloc
+			rs := make(map[string]struct{})
+
+			// Push self onto the path slice. Passing this as a value has the
+			// effect of auto-popping the slice, while also giving us safe
+			// memory reuse.
+			path = append(path, pkg)
+
+			// Dump this package's external pkgs into its own reachset. Separate
+			// loop from the parent dump to avoid nested map loop lookups.
+			for ex := range w.ex {
+				rs[ex] = struct{}{}
+			}
+			allreachsets[pkg] = rs
+
+			// Push this pkg's external imports into all parent reachsets. Not
+			// all parents will necessarily have a reachset; none, some, or all
+			// could have been poisoned by a different path than what we're on
+			// right now. (Or we could be at depth 0)
+			for _, ppkg := range path {
+				if prs, exists := allreachsets[ppkg]; exists {
+					for ex := range w.ex {
+						prs[ex] = struct{}{}
+					}
+				}
+			}
+
+			// Now, recurse until done, or a false bubbles up, indicating the
+			// path is poisoned.
+			var clean bool
+			for in := range w.in {
+				// It's possible, albeit weird, for a package to import itself.
+				// If we try to visit self, though, then it erroneously poisons
+				// the path, as it would be interpreted as grey. In reality,
+				// this becomes a no-op, so just skip it.
+				if in == pkg {
+					continue
+				}
+
+				clean = dfe(in, path)
+				if !clean {
+					// Path is poisoned. Our reachmap was already deleted by the
+					// path we're returning from; mark ourselves black, then
+					// bubble up the poison. This is OK to do early, before
+					// exploring all internal imports, because the outer loop
+					// visits all internal packages anyway.
+					//
+					// In fact, stopping early is preferable - white subpackages
+					// won't have to iterate pointlessly through a parent path
+					// with no reachset.
+					colors[pkg] = black
+					return false
+				}
+			}
+
+			// Fully done with this pkg; no transitive problems.
+			colors[pkg] = black
+			return true
+
+		case grey:
+			// grey means an import cycle; guaranteed badness right here. You'd
+			// hope we never encounter it in a dependency (really? you published
+			// that code?), but we have to defend against it.
+			//
+			// FIXME handle import cycles by dropping everything involved. (i
+			// think we need to compute SCC, then drop *all* of them?)
+			colors[pkg] = black
+			poison(append(path, pkg)) // poison self and parents
+
+		case black:
+			// black means we're done with the package. If it has an entry in
+			// allreachsets, it completed successfully. If not, it was poisoned,
+			// and we need to bubble the poison back up.
+			rs, exists := allreachsets[pkg]
+			if !exists {
+				// just poison parents; self was necessarily already poisoned
+				poison(path)
+				return false
+			}
+
+			// It's good; pull over of the external imports from its reachset
+			// into all non-poisoned parent reachsets
+			for _, ppkg := range path {
+				if prs, exists := allreachsets[ppkg]; exists {
+					for ex := range rs {
+						prs[ex] = struct{}{}
+					}
+				}
+			}
+			return true
+
+		default:
+			panic(fmt.Sprintf("invalid color marker %v for %s", colors[pkg], pkg))
+		}
+
+		// shouldn't ever hit this
+		return false
+	}
+
+	// Run the depth-first exploration.
+	//
+	// Don't bother computing graph sources, this straightforward loop works
+	// comparably well, and fits nicely with an escape hatch in the dfe.
+	var path []string
+	for pkg := range workmap {
+		dfe(pkg, path)
+	}
+
+	if len(allreachsets) == 0 {
+		return nil
+	}
+
+	// Flatten allreachsets into the final reachlist
+	rt := strings.TrimSuffix(basedir, string(os.PathSeparator)) + string(os.PathSeparator)
+	rm := make(map[string][]string)
+	for pkg, rs := range allreachsets {
+		rlen := len(rs)
+		if rlen == 0 {
+			rm[strings.TrimPrefix(pkg, rt)] = nil
+			continue
+		}
+
+		edeps := make([]string, rlen)
+		k := 0
+		for opkg := range rs {
+			edeps[k] = opkg
+			k++
+		}
+
+		sort.Strings(edeps)
+		rm[strings.TrimPrefix(pkg, rt)] = edeps
+	}
+
+	return rm
+}
+
+// ListExternalImports computes a sorted, deduplicated list of all the external
+// packages that are reachable through imports from all valid packages in a
+// ReachMap, as computed by PackageTree.ExternalReach().
+//
+// main and tests determine whether main packages and test imports should be
+// included in the calculation. "External" is defined as anything not prefixed,
+// after path cleaning, by the PackageTree.ImportRoot. This includes stdlib.
+//
+// If an internal path is ignored, all of the external packages that it uniquely
+// imports are omitted. Note, however, that no internal transitivity checks are
+// made here - every non-ignored package in the tree is considered independently
+// (with one set of exceptions, noted below). That means, given a PackageTree
+// with root A and packages at A, A/foo, and A/bar, and the following import
+// chain:
+//
+//  A -> A/foo -> A/bar -> B/baz
+//
+// If you ignore A or A/foo, A/bar will still be visited, and B/baz will be
+// returned, because this method visits ALL packages in the tree, not only those reachable
+// from the root (or any other) packages. If your use case requires interrogating
+// external imports with respect to only specific package entry points, you need
+// ExternalReach() instead.
+//
+// It is safe to pass a nil map if there are no packages to ignore.
+//
+// If an internal package has an error (that is, PackageOrErr is Err), it is excluded from
+// consideration. Internal packages that transitively import the error package
+// are also excluded. So, if:
+//
+//    -> B/foo
+//   /
+//  A
+//   \
+//    -> A/bar -> B/baz
+//
+// And A/bar has some error in it, then both A and A/bar will be eliminated from
+// consideration; neither B/foo nor B/baz will be in the results. If A/bar, with
+// its errors, is ignored, however, then A will remain, and B/foo will be in the
+// results.
+//
+// Finally, note that if a directory is named "testdata", or has a leading dot
+// or underscore, it will not be directly analyzed as a source. This is in
+// keeping with Go tooling conventions that such directories should be ignored.
+// So, if:
+//
+//  A -> B/foo
+//  A/.bar -> B/baz
+//  A/_qux -> B/baz
+//  A/testdata -> B/baz
+//
+// Then B/foo will be returned, but B/baz will not, because all three of the
+// packages that import it are in directories with disallowed names.
+//
+// HOWEVER, in keeping with the Go compiler, if one of those packages in a
+// disallowed directory is imported by a package in an allowed directory, then
+// it *will* be used. That is, while tools like go list will ignore a directory
+// named .foo, you can still import from .foo. Thus, it must be included. So,
+// if:
+//
+//    -> B/foo
+//   /
+//  A
+//   \
+//    -> A/.bar -> B/baz
+//
+// A is legal, and it imports A/.bar, so the results will include B/baz.
+func (rm ReachMap) ListExternalImports() []string {
+	exm := make(map[string]struct{})
+	for pkg, reach := range rm {
+		// Eliminate import paths with any elements having leading dots, leading
+		// underscores, or testdata. If these are internally reachable (which is
+		// a no-no, but possible), any external imports will have already been
+		// pulled up through ExternalReach. The key here is that we don't want
+		// to treat such packages as themselves being sources.
+		//
+		// TODO(sdboyer) strings.Split will always heap alloc, which isn't great to do
+		// in a loop like this. We could also just parse it ourselves...
+		var skip bool
+		for _, elem := range strings.Split(pkg, "/") {
+			if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" {
+				skip = true
+				break
+			}
+		}
+
+		if !skip {
+			for _, ex := range reach {
+				exm[ex] = struct{}{}
+			}
+		}
+	}
+
+	if len(exm) == 0 {
+		return nil
+	}
+
+	ex := make([]string, len(exm))
+	k := 0
+	for p := range exm {
+		ex[k] = p
+		k++
+	}
+
+	sort.Strings(ex)
+	return ex
+}
+
+// checkPrefixSlash checks to see if the prefix is a prefix of the string as-is,
+// and that it is either equal OR the prefix + / is still a prefix.
+func checkPrefixSlash(s, prefix string) bool {
+	if !strings.HasPrefix(s, prefix) {
+		return false
+	}
+	return s == prefix || strings.HasPrefix(s, ensureTrailingSlash(prefix))
+}
+
+func ensureTrailingSlash(s string) string {
+	return strings.TrimSuffix(s, string(os.PathSeparator)) + string(os.PathSeparator)
+}
+
+// helper func to merge, dedupe, and sort strings
+func dedupeStrings(s1, s2 []string) (r []string) {
+	dedupe := make(map[string]bool)
+
+	if len(s1) > 0 && len(s2) > 0 {
+		for _, i := range s1 {
+			dedupe[i] = true
+		}
+		for _, i := range s2 {
+			dedupe[i] = true
+		}
+
+		for i := range dedupe {
+			r = append(r, i)
+		}
+		// And then re-sort them
+		sort.Strings(r)
+	} else if len(s1) > 0 {
+		r = s1
+	} else if len(s2) > 0 {
+		r = s2
+	}
+
+	return
+}
diff --git a/vendor/github.com/sdboyer/gps/analysis_test.go b/vendor/github.com/sdboyer/gps/analysis_test.go
new file mode 100644
index 0000000..c21f53b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/analysis_test.go
@@ -0,0 +1,1262 @@
+package gps
+
+import (
+	"fmt"
+	"go/build"
+	"os"
+	"path/filepath"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+// PackageTree.ExternalReach() uses an easily separable algorithm, wmToReach(),
+// to turn a discovered set of packages and their imports into a proper external
+// reach map.
+//
+// That algorithm is purely symbolic (no filesystem interaction), and thus is
+// easy to test. This is that test.
+func TestWorkmapToReach(t *testing.T) {
+	empty := func() map[string]bool {
+		return make(map[string]bool)
+	}
+
+	table := map[string]struct {
+		workmap map[string]wm
+		basedir string
+		out     map[string][]string
+	}{
+		"single": {
+			workmap: map[string]wm{
+				"foo": {
+					ex: empty(),
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"foo": nil,
+			},
+		},
+		"no external": {
+			workmap: map[string]wm{
+				"foo": {
+					ex: empty(),
+					in: empty(),
+				},
+				"foo/bar": {
+					ex: empty(),
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"foo":     nil,
+				"foo/bar": nil,
+			},
+		},
+		"no external with subpkg": {
+			workmap: map[string]wm{
+				"foo": {
+					ex: empty(),
+					in: map[string]bool{
+						"foo/bar": true,
+					},
+				},
+				"foo/bar": {
+					ex: empty(),
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"foo":     nil,
+				"foo/bar": nil,
+			},
+		},
+		"simple base transitive": {
+			workmap: map[string]wm{
+				"foo": {
+					ex: empty(),
+					in: map[string]bool{
+						"foo/bar": true,
+					},
+				},
+				"foo/bar": {
+					ex: map[string]bool{
+						"baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"foo": {
+					"baz",
+				},
+				"foo/bar": {
+					"baz",
+				},
+			},
+		},
+		"missing package is poison": {
+			workmap: map[string]wm{
+				"A": {
+					ex: map[string]bool{
+						"B/foo": true,
+					},
+					in: map[string]bool{
+						"A/foo": true, // missing
+						"A/bar": true,
+					},
+				},
+				"A/bar": {
+					ex: map[string]bool{
+						"B/baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"A/bar": {
+					"B/baz",
+				},
+			},
+		},
+		"transitive missing package is poison": {
+			workmap: map[string]wm{
+				"A": {
+					ex: map[string]bool{
+						"B/foo": true,
+					},
+					in: map[string]bool{
+						"A/foo":  true, // transitively missing
+						"A/quux": true,
+					},
+				},
+				"A/foo": {
+					ex: map[string]bool{
+						"C/flugle": true,
+					},
+					in: map[string]bool{
+						"A/bar": true, // missing
+					},
+				},
+				"A/quux": {
+					ex: map[string]bool{
+						"B/baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"A/quux": {
+					"B/baz",
+				},
+			},
+		},
+		"err'd package is poison": {
+			workmap: map[string]wm{
+				"A": {
+					ex: map[string]bool{
+						"B/foo": true,
+					},
+					in: map[string]bool{
+						"A/foo": true, // err'd
+						"A/bar": true,
+					},
+				},
+				"A/foo": {
+					err: fmt.Errorf("err pkg"),
+				},
+				"A/bar": {
+					ex: map[string]bool{
+						"B/baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"A/bar": {
+					"B/baz",
+				},
+			},
+		},
+		"transitive err'd package is poison": {
+			workmap: map[string]wm{
+				"A": {
+					ex: map[string]bool{
+						"B/foo": true,
+					},
+					in: map[string]bool{
+						"A/foo":  true, // transitively err'd
+						"A/quux": true,
+					},
+				},
+				"A/foo": {
+					ex: map[string]bool{
+						"C/flugle": true,
+					},
+					in: map[string]bool{
+						"A/bar": true, // err'd
+					},
+				},
+				"A/bar": {
+					err: fmt.Errorf("err pkg"),
+				},
+				"A/quux": {
+					ex: map[string]bool{
+						"B/baz": true,
+					},
+					in: empty(),
+				},
+			},
+			out: map[string][]string{
+				"A/quux": {
+					"B/baz",
+				},
+			},
+		},
+	}
+
+	for name, fix := range table {
+		out := wmToReach(fix.workmap, fix.basedir)
+		if !reflect.DeepEqual(out, fix.out) {
+			t.Errorf("wmToReach(%q): Did not get expected reach map:\n\t(GOT): %s\n\t(WNT): %s", name, out, fix.out)
+		}
+	}
+}
+
+func TestListPackages(t *testing.T) {
+	srcdir := filepath.Join(getwd(t), "_testdata", "src")
+	j := func(s string) string {
+		return filepath.Join(srcdir, s)
+	}
+
+	table := map[string]struct {
+		fileRoot   string
+		importRoot string
+		out        PackageTree
+		err        error
+	}{
+		"empty": {
+			fileRoot:   j("empty"),
+			importRoot: "empty",
+			out: PackageTree{
+				ImportRoot: "empty",
+				Packages: map[string]PackageOrErr{
+					"empty": {
+						Err: &build.NoGoError{
+							Dir: j("empty"),
+						},
+					},
+				},
+			},
+			err: nil,
+		},
+		"code only": {
+			fileRoot:   j("simple"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"impose import path": {
+			fileRoot:   j("simple"),
+			importRoot: "arbitrary",
+			out: PackageTree{
+				ImportRoot: "arbitrary",
+				Packages: map[string]PackageOrErr{
+					"arbitrary": {
+						P: Package{
+							ImportPath:  "arbitrary",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"test only": {
+			fileRoot:   j("t"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports:     []string{},
+							TestImports: []string{
+								"math/rand",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"xtest only": {
+			fileRoot:   j("xt"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports:     []string{},
+							TestImports: []string{
+								"sort",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code and test": {
+			fileRoot:   j("simplet"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+							TestImports: []string{
+								"math/rand",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code and xtest": {
+			fileRoot:   j("simplext"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+							TestImports: []string{
+								"sort",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code, test, xtest": {
+			fileRoot:   j("simpleallt"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+							TestImports: []string{
+								"math/rand",
+								"sort",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"one pkg multifile": {
+			fileRoot:   j("m1p"),
+			importRoot: "m1p",
+			out: PackageTree{
+				ImportRoot: "m1p",
+				Packages: map[string]PackageOrErr{
+					"m1p": {
+						P: Package{
+							ImportPath:  "m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"one nested below": {
+			fileRoot:   j("nest"),
+			importRoot: "nest",
+			out: PackageTree{
+				ImportRoot: "nest",
+				Packages: map[string]PackageOrErr{
+					"nest": {
+						P: Package{
+							ImportPath:  "nest",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+					"nest/m1p": {
+						P: Package{
+							ImportPath:  "nest/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"two nested under empty root": {
+			fileRoot:   j("ren"),
+			importRoot: "ren",
+			out: PackageTree{
+				ImportRoot: "ren",
+				Packages: map[string]PackageOrErr{
+					"ren": {
+						Err: &build.NoGoError{
+							Dir: j("ren"),
+						},
+					},
+					"ren/m1p": {
+						P: Package{
+							ImportPath:  "ren/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+					"ren/simple": {
+						P: Package{
+							ImportPath:  "ren/simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"internal name mismatch": {
+			fileRoot:   j("doublenest"),
+			importRoot: "doublenest",
+			out: PackageTree{
+				ImportRoot: "doublenest",
+				Packages: map[string]PackageOrErr{
+					"doublenest": {
+						P: Package{
+							ImportPath:  "doublenest",
+							CommentPath: "",
+							Name:        "base",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"go/parser",
+							},
+						},
+					},
+					"doublenest/namemismatch": {
+						P: Package{
+							ImportPath:  "doublenest/namemismatch",
+							CommentPath: "",
+							Name:        "nm",
+							Imports: []string{
+								"github.com/Masterminds/semver",
+								"os",
+							},
+						},
+					},
+					"doublenest/namemismatch/m1p": {
+						P: Package{
+							ImportPath:  "doublenest/namemismatch/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"file and importroot mismatch": {
+			fileRoot:   j("doublenest"),
+			importRoot: "other",
+			out: PackageTree{
+				ImportRoot: "other",
+				Packages: map[string]PackageOrErr{
+					"other": {
+						P: Package{
+							ImportPath:  "other",
+							CommentPath: "",
+							Name:        "base",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"go/parser",
+							},
+						},
+					},
+					"other/namemismatch": {
+						P: Package{
+							ImportPath:  "other/namemismatch",
+							CommentPath: "",
+							Name:        "nm",
+							Imports: []string{
+								"github.com/Masterminds/semver",
+								"os",
+							},
+						},
+					},
+					"other/namemismatch/m1p": {
+						P: Package{
+							ImportPath:  "other/namemismatch/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code and ignored main": {
+			fileRoot:   j("igmain"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+								"unicode",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code and ignored main with comment leader": {
+			fileRoot:   j("igmainlong"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+								"unicode",
+							},
+						},
+					},
+				},
+			},
+		},
+		"code, tests, and ignored main": {
+			fileRoot:   j("igmaint"),
+			importRoot: "simple",
+			out: PackageTree{
+				ImportRoot: "simple",
+				Packages: map[string]PackageOrErr{
+					"simple": {
+						P: Package{
+							ImportPath:  "simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"sort",
+								"unicode",
+							},
+							TestImports: []string{
+								"math/rand",
+								"strconv",
+							},
+						},
+					},
+				},
+			},
+		},
+		"two pkgs": {
+			fileRoot:   j("twopkgs"),
+			importRoot: "twopkgs",
+			out: PackageTree{
+				ImportRoot: "twopkgs",
+				Packages: map[string]PackageOrErr{
+					"twopkgs": {
+						Err: &build.MultiplePackageError{
+							Dir:      j("twopkgs"),
+							Packages: []string{"simple", "m1p"},
+							Files:    []string{"a.go", "b.go"},
+						},
+					},
+				},
+			},
+		},
+		// imports a missing pkg
+		"missing import": {
+			fileRoot:   j("missing"),
+			importRoot: "missing",
+			out: PackageTree{
+				ImportRoot: "missing",
+				Packages: map[string]PackageOrErr{
+					"missing": {
+						P: Package{
+							ImportPath:  "missing",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"missing/missing",
+								"sort",
+							},
+						},
+					},
+					"missing/m1p": {
+						P: Package{
+							ImportPath:  "missing/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+		// has disallowed dir names
+		"disallowed dirs": {
+			fileRoot:   j("disallow"),
+			importRoot: "disallow",
+			out: PackageTree{
+				ImportRoot: "disallow",
+				Packages: map[string]PackageOrErr{
+					"disallow": {
+						P: Package{
+							ImportPath:  "disallow",
+							CommentPath: "",
+							Name:        "disallow",
+							Imports: []string{
+								"disallow/testdata",
+								"github.com/sdboyer/gps",
+								"sort",
+							},
+						},
+					},
+					// disallow/.m1p is ignored by listPackages...for now. Kept
+					// here commented because this might change again...
+					//"disallow/.m1p": {
+					//P: Package{
+					//ImportPath:  "disallow/.m1p",
+					//CommentPath: "",
+					//Name:        "m1p",
+					//Imports: []string{
+					//"github.com/sdboyer/gps",
+					//"os",
+					//"sort",
+					//},
+					//},
+					//},
+					"disallow/testdata": {
+						P: Package{
+							ImportPath:  "disallow/testdata",
+							CommentPath: "",
+							Name:        "testdata",
+							Imports: []string{
+								"hash",
+							},
+						},
+					},
+				},
+			},
+		},
+		// This case mostly exists for the PackageTree methods, but it does
+		// cover a bit of range
+		"varied": {
+			fileRoot:   j("varied"),
+			importRoot: "varied",
+			out: PackageTree{
+				ImportRoot: "varied",
+				Packages: map[string]PackageOrErr{
+					"varied": {
+						P: Package{
+							ImportPath:  "varied",
+							CommentPath: "",
+							Name:        "main",
+							Imports: []string{
+								"net/http",
+								"varied/namemismatch",
+								"varied/otherpath",
+								"varied/simple",
+							},
+						},
+					},
+					"varied/otherpath": {
+						P: Package{
+							ImportPath:  "varied/otherpath",
+							CommentPath: "",
+							Name:        "otherpath",
+							Imports:     []string{},
+							TestImports: []string{
+								"varied/m1p",
+							},
+						},
+					},
+					"varied/simple": {
+						P: Package{
+							ImportPath:  "varied/simple",
+							CommentPath: "",
+							Name:        "simple",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"go/parser",
+								"varied/simple/another",
+							},
+						},
+					},
+					"varied/simple/another": {
+						P: Package{
+							ImportPath:  "varied/simple/another",
+							CommentPath: "",
+							Name:        "another",
+							Imports: []string{
+								"hash",
+								"varied/m1p",
+							},
+							TestImports: []string{
+								"encoding/binary",
+							},
+						},
+					},
+					"varied/namemismatch": {
+						P: Package{
+							ImportPath:  "varied/namemismatch",
+							CommentPath: "",
+							Name:        "nm",
+							Imports: []string{
+								"github.com/Masterminds/semver",
+								"os",
+							},
+						},
+					},
+					"varied/m1p": {
+						P: Package{
+							ImportPath:  "varied/m1p",
+							CommentPath: "",
+							Name:        "m1p",
+							Imports: []string{
+								"github.com/sdboyer/gps",
+								"os",
+								"sort",
+							},
+						},
+					},
+				},
+			},
+		},
+	}
+
+	for name, fix := range table {
+		if _, err := os.Stat(fix.fileRoot); err != nil {
+			t.Errorf("listPackages(%q): error on fileRoot %s: %s", name, fix.fileRoot, err)
+			continue
+		}
+
+		out, err := ListPackages(fix.fileRoot, fix.importRoot)
+
+		if err != nil && fix.err == nil {
+			t.Errorf("listPackages(%q): Received error but none expected: %s", name, err)
+		} else if fix.err != nil && err == nil {
+			t.Errorf("listPackages(%q): Error expected but none received", name)
+		} else if fix.err != nil && err != nil {
+			if !reflect.DeepEqual(fix.err, err) {
+				t.Errorf("listPackages(%q): Did not receive expected error:\n\t(GOT): %s\n\t(WNT): %s", name, err, fix.err)
+			}
+		}
+
+		if fix.out.ImportRoot != "" && fix.out.Packages != nil {
+			if !reflect.DeepEqual(out, fix.out) {
+				if fix.out.ImportRoot != out.ImportRoot {
+					t.Errorf("listPackages(%q): Expected ImportRoot %s, got %s", name, fix.out.ImportRoot, out.ImportRoot)
+				}
+
+				// overwrite the out one to see if we still have a real problem
+				out.ImportRoot = fix.out.ImportRoot
+
+				if !reflect.DeepEqual(out, fix.out) {
+					if len(fix.out.Packages) < 2 {
+						t.Errorf("listPackages(%q): Did not get expected PackageOrErrs:\n\t(GOT): %s\n\t(WNT): %s", name, out, fix.out)
+					} else {
+						seen := make(map[string]bool)
+						for path, perr := range fix.out.Packages {
+							seen[path] = true
+							if operr, exists := out.Packages[path]; !exists {
+								t.Errorf("listPackages(%q): Expected PackageOrErr for path %s was missing from output:\n\t%s", name, path, perr)
+							} else {
+								if !reflect.DeepEqual(perr, operr) {
+									t.Errorf("listPackages(%q): PkgOrErr for path %s was not as expected:\n\t(GOT): %s\n\t(WNT): %s", name, path, operr, perr)
+								}
+							}
+						}
+
+						for path, operr := range out.Packages {
+							if seen[path] {
+								continue
+							}
+
+							t.Errorf("listPackages(%q): Got PackageOrErr for path %s, but none was expected:\n\t%s", name, path, operr)
+						}
+					}
+				}
+			}
+		}
+	}
+}
+
+func TestListExternalImports(t *testing.T) {
+	// There's enough in the 'varied' test case to test most of what matters
+	vptree, err := ListPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
+	if err != nil {
+		t.Fatalf("listPackages failed on varied test case: %s", err)
+	}
+
+	var expect []string
+	var name string
+	var ignore map[string]bool
+	var main, tests bool
+
+	validate := func() {
+		result := vptree.ExternalReach(main, tests, ignore).ListExternalImports()
+		if !reflect.DeepEqual(expect, result) {
+			t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect)
+		}
+	}
+
+	all := []string{
+		"encoding/binary",
+		"github.com/Masterminds/semver",
+		"github.com/sdboyer/gps",
+		"go/parser",
+		"hash",
+		"net/http",
+		"os",
+		"sort",
+	}
+
+	// helper to rewrite expect, except for a couple packages
+	//
+	// this makes it easier to see what we're taking out on each test
+	except := func(not ...string) {
+		expect = make([]string, len(all)-len(not))
+
+		drop := make(map[string]bool)
+		for _, npath := range not {
+			drop[npath] = true
+		}
+
+		k := 0
+		for _, path := range all {
+			if !drop[path] {
+				expect[k] = path
+				k++
+			}
+		}
+	}
+
+	// everything on
+	name = "simple"
+	except()
+	main, tests = true, true
+	validate()
+
+	// Now without tests, which should just cut one
+	name = "no tests"
+	tests = false
+	except("encoding/binary")
+	validate()
+
+	// Now skip main, which still just cuts out one
+	name = "no main"
+	main, tests = false, true
+	except("net/http")
+	validate()
+
+	// No test and no main, which should be additive
+	name = "no test, no main"
+	main, tests = false, false
+	except("net/http", "encoding/binary")
+	validate()
+
+	// now, the ignore tests. turn main and tests back on
+	main, tests = true, true
+
+	// start with non-matching
+	name = "non-matching ignore"
+	ignore = map[string]bool{
+		"nomatch": true,
+	}
+	except()
+	validate()
+
+	// should have the same effect as ignoring main
+	name = "ignore the root"
+	ignore = map[string]bool{
+		"varied": true,
+	}
+	except("net/http")
+	validate()
+
+	// now drop a more interesting one
+	name = "ignore simple"
+	ignore = map[string]bool{
+		"varied/simple": true,
+	}
+	// we get github.com/sdboyer/gps from m1p, too, so it should still be there
+	except("go/parser")
+	validate()
+
+	// now drop two
+	name = "ignore simple and namemismatch"
+	ignore = map[string]bool{
+		"varied/simple":       true,
+		"varied/namemismatch": true,
+	}
+	except("go/parser", "github.com/Masterminds/semver")
+	validate()
+
+	// make sure tests and main play nice with ignore
+	name = "ignore simple and namemismatch, and no tests"
+	tests = false
+	except("go/parser", "github.com/Masterminds/semver", "encoding/binary")
+	validate()
+	name = "ignore simple and namemismatch, and no main"
+	main, tests = false, true
+	except("go/parser", "github.com/Masterminds/semver", "net/http")
+	validate()
+	name = "ignore simple and namemismatch, and no main or tests"
+	main, tests = false, false
+	except("go/parser", "github.com/Masterminds/semver", "net/http", "encoding/binary")
+	validate()
+
+	main, tests = true, true
+
+	// ignore two that should knock out gps
+	name = "ignore both importers"
+	ignore = map[string]bool{
+		"varied/simple": true,
+		"varied/m1p":    true,
+	}
+	except("sort", "github.com/sdboyer/gps", "go/parser")
+	validate()
+
+	// finally, directly ignore some external packages
+	name = "ignore external"
+	ignore = map[string]bool{
+		"github.com/sdboyer/gps": true,
+		"go/parser":              true,
+		"sort":                   true,
+	}
+	except("sort", "github.com/sdboyer/gps", "go/parser")
+	validate()
+
+	// The only thing varied *doesn't* cover is disallowed path patterns
+	ptree, err := ListPackages(filepath.Join(getwd(t), "_testdata", "src", "disallow"), "disallow")
+	if err != nil {
+		t.Fatalf("listPackages failed on disallow test case: %s", err)
+	}
+
+	result := ptree.ExternalReach(false, false, nil).ListExternalImports()
+	expect = []string{"github.com/sdboyer/gps", "hash", "sort"}
+	if !reflect.DeepEqual(expect, result) {
+		t.Errorf("Wrong imports in %q case:\n\t(GOT): %s\n\t(WNT): %s", name, result, expect)
+	}
+}
+
+func TestExternalReach(t *testing.T) {
+	// There's enough in the 'varied' test case to test most of what matters
+	vptree, err := ListPackages(filepath.Join(getwd(t), "_testdata", "src", "varied"), "varied")
+	if err != nil {
+		t.Fatalf("listPackages failed on varied test case: %s", err)
+	}
+
+	// Set up vars for validate closure
+	var expect map[string][]string
+	var name string
+	var main, tests bool
+	var ignore map[string]bool
+
+	validate := func() {
+		result := vptree.ExternalReach(main, tests, ignore)
+		if !reflect.DeepEqual(expect, result) {
+			seen := make(map[string]bool)
+			for ip, epkgs := range expect {
+				seen[ip] = true
+				if pkgs, exists := result[ip]; !exists {
+					t.Errorf("ver(%q): expected import path %s was not present in result", name, ip)
+				} else {
+					if !reflect.DeepEqual(pkgs, epkgs) {
+						t.Errorf("ver(%q): did not get expected package set for import path %s:\n\t(GOT): %s\n\t(WNT): %s", name, ip, pkgs, epkgs)
+					}
+				}
+			}
+
+			for ip, pkgs := range result {
+				if seen[ip] {
+					continue
+				}
+				t.Errorf("ver(%q): Got packages for import path %s, but none were expected:\n\t%s", name, ip, pkgs)
+			}
+		}
+	}
+
+	all := map[string][]string{
+		"varied":                {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/gps", "go/parser", "hash", "net/http", "os", "sort"},
+		"varied/m1p":            {"github.com/sdboyer/gps", "os", "sort"},
+		"varied/namemismatch":   {"github.com/Masterminds/semver", "os"},
+		"varied/otherpath":      {"github.com/sdboyer/gps", "os", "sort"},
+		"varied/simple":         {"encoding/binary", "github.com/sdboyer/gps", "go/parser", "hash", "os", "sort"},
+		"varied/simple/another": {"encoding/binary", "github.com/sdboyer/gps", "hash", "os", "sort"},
+	}
+	// build a map to validate the exception inputs. do this because shit is
+	// hard enough to keep track of that it's preferable not to have silent
+	// success if a typo creeps in and we're trying to except an import that
+	// isn't in a pkg in the first place
+	valid := make(map[string]map[string]bool)
+	for ip, expkgs := range all {
+		m := make(map[string]bool)
+		for _, pkg := range expkgs {
+			m[pkg] = true
+		}
+		valid[ip] = m
+	}
+
+	// helper to compose expect, excepting specific packages
+	//
+	// this makes it easier to see what we're taking out on each test
+	except := func(pkgig ...string) {
+		// reinit expect with everything from all
+		expect = make(map[string][]string)
+		for ip, expkgs := range all {
+			sl := make([]string, len(expkgs))
+			copy(sl, expkgs)
+			expect[ip] = sl
+		}
+
+		// now build the dropmap
+		drop := make(map[string]map[string]bool)
+		for _, igstr := range pkgig {
+			// split on space; first elem is import path to pkg, the rest are
+			// the imports to drop.
+			not := strings.Split(igstr, " ")
+			var ip string
+			ip, not = not[0], not[1:]
+			if _, exists := valid[ip]; !exists {
+				t.Fatalf("%s is not a package name we're working with, doofus", ip)
+			}
+
+			// if only a single elem was passed, though, drop the whole thing
+			if len(not) == 0 {
+				delete(expect, ip)
+				continue
+			}
+
+			m := make(map[string]bool)
+			for _, imp := range not {
+				if !valid[ip][imp] {
+					t.Fatalf("%s is not a reachable import of %s, even in the all case", imp, ip)
+				}
+				m[imp] = true
+			}
+
+			drop[ip] = m
+		}
+
+		for ip, pkgs := range expect {
+			var npkgs []string
+			for _, imp := range pkgs {
+				if !drop[ip][imp] {
+					npkgs = append(npkgs, imp)
+				}
+			}
+
+			expect[ip] = npkgs
+		}
+	}
+
+	// first, validate all
+	name = "all"
+	main, tests = true, true
+	except()
+	validate()
+
+	// turn off main pkgs, which necessarily doesn't affect anything else
+	name = "no main"
+	main = false
+	except("varied")
+	validate()
+
+	// ignoring the "varied" pkg has same effect as disabling main pkgs
+	name = "ignore root"
+	ignore = map[string]bool{
+		"varied": true,
+	}
+	main = true
+	validate()
+
+	// when we drop tests, varied/otherpath loses its link to varied/m1p and
+	// varied/simple/another loses its test import, which has a fairly big
+	// cascade
+	name = "no tests"
+	tests = false
+	ignore = nil
+	except(
+		"varied encoding/binary",
+		"varied/simple encoding/binary",
+		"varied/simple/another encoding/binary",
+		"varied/otherpath github.com/sdboyer/gps os sort",
+	)
+
+	// almost the same as previous, but varied just goes away completely
+	name = "no main or tests"
+	main = false
+	except(
+		"varied",
+		"varied/simple encoding/binary",
+		"varied/simple/another encoding/binary",
+		"varied/otherpath github.com/sdboyer/gps os sort",
+	)
+	validate()
+
+	// focus on ignores now, so reset main and tests
+	main, tests = true, true
+
+	// now, the fun stuff. punch a hole in the middle by cutting out
+	// varied/simple
+	name = "ignore varied/simple"
+	ignore = map[string]bool{
+		"varied/simple": true,
+	}
+	except(
+		// root pkg loses on everything in varied/simple/another
+		"varied hash encoding/binary go/parser",
+		"varied/simple",
+	)
+	validate()
+
+	// widen the hole by excluding otherpath
+	name = "ignore varied/{otherpath,simple}"
+	ignore = map[string]bool{
+		"varied/otherpath": true,
+		"varied/simple":    true,
+	}
+	except(
+		// root pkg loses on everything in varied/simple/another and varied/m1p
+		"varied hash encoding/binary go/parser github.com/sdboyer/gps sort",
+		"varied/otherpath",
+		"varied/simple",
+	)
+	validate()
+
+	// remove namemismatch, though we're mostly beating a dead horse now
+	name = "ignore varied/{otherpath,simple,namemismatch}"
+	ignore["varied/namemismatch"] = true
+	except(
+		// root pkg loses on everything in varied/simple/another and varied/m1p
+		"varied hash encoding/binary go/parser github.com/sdboyer/gps sort os github.com/Masterminds/semver",
+		"varied/otherpath",
+		"varied/simple",
+		"varied/namemismatch",
+	)
+	validate()
+}
+
+var _ = map[string][]string{
+	"varied":                {"encoding/binary", "github.com/Masterminds/semver", "github.com/sdboyer/gps", "go/parser", "hash", "net/http", "os", "sort"},
+	"varied/m1p":            {"github.com/sdboyer/gps", "os", "sort"},
+	"varied/namemismatch":   {"github.com/Masterminds/semver", "os"},
+	"varied/otherpath":      {"github.com/sdboyer/gps", "os", "sort"},
+	"varied/simple":         {"encoding/binary", "github.com/sdboyer/gps", "go/parser", "hash", "os", "sort"},
+	"varied/simple/another": {"encoding/binary", "github.com/sdboyer/gps", "hash", "os", "sort"},
+}
+
+func getwd(t *testing.T) string {
+	cwd, err := os.Getwd()
+	if err != nil {
+		t.Fatal(err)
+	}
+	return cwd
+}
diff --git a/vendor/github.com/sdboyer/gps/appveyor.yml b/vendor/github.com/sdboyer/gps/appveyor.yml
new file mode 100644
index 0000000..8c6b1fd
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/appveyor.yml
@@ -0,0 +1,25 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\sdboyer\gps
+shallow_clone: true
+
+environment:
+  GOPATH: C:\gopath
+
+platform:
+  - x64
+
+install:
+  - go version
+  - go env
+  - choco install bzr
+  - set PATH=C:\Program Files (x86)\Bazaar\;C:\Program Files\Mercurial\;%PATH%
+build_script:
+  - go get github.com/Masterminds/glide
+  - C:\gopath\bin\glide install
+
+test_script:
+  - go test
+  - go build example.go
+
+deploy: off
diff --git a/vendor/github.com/sdboyer/gps/bridge.go b/vendor/github.com/sdboyer/gps/bridge.go
new file mode 100644
index 0000000..379cd4b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/bridge.go
@@ -0,0 +1,513 @@
+package gps
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"sync/atomic"
+
+	"github.com/Masterminds/semver"
+)
+
+// sourceBridges provide an adapter to SourceManagers that tailor operations
+// for a single solve run.
+type sourceBridge interface {
+	SourceManager // composes SourceManager
+	verifyRootDir(path string) error
+	pairRevision(id ProjectIdentifier, r Revision) []Version
+	pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion
+	vendorCodeExists(id ProjectIdentifier) (bool, error)
+	matches(id ProjectIdentifier, c Constraint, v Version) bool
+	matchesAny(id ProjectIdentifier, c1, c2 Constraint) bool
+	intersect(id ProjectIdentifier, c1, c2 Constraint) Constraint
+	breakLock()
+}
+
+// bridge is an adapter around a proper SourceManager. It provides localized
+// caching that's tailored to the requirements of a particular solve run.
+//
+// Finally, it provides authoritative version/constraint operations, ensuring
+// that any possible approach to a match - even those not literally encoded in
+// the inputs - is achieved.
+type bridge struct {
+	// The underlying, adapted-to SourceManager
+	sm SourceManager
+
+	// The solver which we're assisting.
+	//
+	// The link between solver and bridge is circular, which is typically a bit
+	// awkward, but the bridge needs access to so many of the input arguments
+	// held by the solver that it ends up being easier and saner to do this.
+	s *solver
+
+	// Simple, local cache of the root's PackageTree
+	crp *struct {
+		ptree PackageTree
+		err   error
+	}
+
+	// Map of project root name to their available version list. This cache is
+	// layered on top of the proper SourceManager's cache; the only difference
+	// is that this keeps the versions sorted in the direction required by the
+	// current solve run
+	vlists map[ProjectIdentifier][]Version
+
+	// Indicates whether lock breaking has already been run
+	lockbroken int32
+}
+
+// Global factory func to create a bridge. This exists solely to allow tests to
+// override it with a custom bridge and sm.
+var mkBridge func(*solver, SourceManager) sourceBridge = func(s *solver, sm SourceManager) sourceBridge {
+	return &bridge{
+		sm:     sm,
+		s:      s,
+		vlists: make(map[ProjectIdentifier][]Version),
+	}
+}
+
+func (b *bridge) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
+	if id.ProjectRoot == ProjectRoot(b.s.rpt.ImportRoot) {
+		return b.s.rm, b.s.rl, nil
+	}
+	return b.sm.GetManifestAndLock(id, v)
+}
+
+func (b *bridge) AnalyzerInfo() (string, *semver.Version) {
+	return b.sm.AnalyzerInfo()
+}
+
+func (b *bridge) ListVersions(id ProjectIdentifier) ([]Version, error) {
+	if vl, exists := b.vlists[id]; exists {
+		return vl, nil
+	}
+
+	vl, err := b.sm.ListVersions(id)
+	// TODO(sdboyer) cache errors, too?
+	if err != nil {
+		return nil, err
+	}
+
+	if b.s.params.Downgrade {
+		SortForDowngrade(vl)
+	} else {
+		SortForUpgrade(vl)
+	}
+
+	b.vlists[id] = vl
+	return vl, nil
+}
+
+func (b *bridge) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
+	return b.sm.RevisionPresentIn(id, r)
+}
+
+func (b *bridge) SourceExists(id ProjectIdentifier) (bool, error) {
+	return b.sm.SourceExists(id)
+}
+
+func (b *bridge) vendorCodeExists(id ProjectIdentifier) (bool, error) {
+	fi, err := os.Stat(filepath.Join(b.s.params.RootDir, "vendor", string(id.ProjectRoot)))
+	if err != nil {
+		return false, err
+	} else if fi.IsDir() {
+		return true, nil
+	}
+
+	return false, nil
+}
+
+func (b *bridge) pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion {
+	vl, err := b.ListVersions(id)
+	if err != nil {
+		return nil
+	}
+
+	// doing it like this is a bit sloppy
+	for _, v2 := range vl {
+		if p, ok := v2.(PairedVersion); ok {
+			if p.Matches(v) {
+				return p
+			}
+		}
+	}
+
+	return nil
+}
+
+func (b *bridge) pairRevision(id ProjectIdentifier, r Revision) []Version {
+	vl, err := b.ListVersions(id)
+	if err != nil {
+		return nil
+	}
+
+	p := []Version{r}
+	// doing it like this is a bit sloppy
+	for _, v2 := range vl {
+		if pv, ok := v2.(PairedVersion); ok {
+			if pv.Matches(r) {
+				p = append(p, pv)
+			}
+		}
+	}
+
+	return p
+}
+
+// matches performs a typical match check between the provided version and
+// constraint. If that basic check fails and the provided version is incomplete
+// (e.g. an unpaired version or bare revision), it will attempt to gather more
+// information on one or the other and re-perform the comparison.
+func (b *bridge) matches(id ProjectIdentifier, c2 Constraint, v Version) bool {
+	if c2.Matches(v) {
+		return true
+	}
+
+	// There's a wide field of possible ways that pairing might result in a
+	// match. For each possible type of version, start by carving out all the
+	// cases where the constraint would have provided an authoritative match
+	// result.
+	switch tv := v.(type) {
+	case PairedVersion:
+		switch tc := c2.(type) {
+		case PairedVersion, Revision, noneConstraint:
+			// These three would all have been authoritative matches
+			return false
+		case UnpairedVersion:
+			// Only way paired and unpaired could match is if they share an
+			// underlying rev
+			pv := b.pairVersion(id, tc)
+			if pv == nil {
+				return false
+			}
+			return pv.Matches(v)
+		case semverConstraint:
+			// Have to check all the possible versions for that rev to see if
+			// any match the semver constraint
+			for _, pv := range b.pairRevision(id, tv.Underlying()) {
+				if tc.Matches(pv) {
+					return true
+				}
+			}
+			return false
+		}
+
+	case Revision:
+		switch tc := c2.(type) {
+		case PairedVersion, Revision, noneConstraint:
+			// These three would all have been authoritative matches
+			return false
+		case UnpairedVersion:
+			// Only way paired and unpaired could match is if they share an
+			// underlying rev
+			pv := b.pairVersion(id, tc)
+			if pv == nil {
+				return false
+			}
+			return pv.Matches(v)
+		case semverConstraint:
+			// Have to check all the possible versions for the rev to see if
+			// any match the semver constraint
+			for _, pv := range b.pairRevision(id, tv) {
+				if tc.Matches(pv) {
+					return true
+				}
+			}
+			return false
+		}
+
+	// UnpairedVersion as input has the most weird cases. It's also the one
+	// we'll probably see the least
+	case UnpairedVersion:
+		switch tc := c2.(type) {
+		case noneConstraint:
+			// obviously
+			return false
+		case Revision, PairedVersion:
+			// Easy case for both - just pair the uv and see if it matches the revision
+			// constraint
+			pv := b.pairVersion(id, tv)
+			if pv == nil {
+				return false
+			}
+			return tc.Matches(pv)
+		case UnpairedVersion:
+			// Both are unpaired versions. See if they share an underlying rev.
+			pv := b.pairVersion(id, tv)
+			if pv == nil {
+				return false
+			}
+
+			pc := b.pairVersion(id, tc)
+			if pc == nil {
+				return false
+			}
+			return pc.Matches(pv)
+
+		case semverConstraint:
+			// semverConstraint can't ever match a rev, but we do need to check
+			// if any other versions corresponding to this rev work.
+			pv := b.pairVersion(id, tv)
+			if pv == nil {
+				return false
+			}
+
+			for _, ttv := range b.pairRevision(id, pv.Underlying()) {
+				if c2.Matches(ttv) {
+					return true
+				}
+			}
+			return false
+		}
+	default:
+		panic("unreachable")
+	}
+
+	return false
+}
+
+// matchesAny is the authoritative version of Constraint.MatchesAny.
+func (b *bridge) matchesAny(id ProjectIdentifier, c1, c2 Constraint) bool {
+	if c1.MatchesAny(c2) {
+		return true
+	}
+
+	// This approach is slightly wasteful, but just SO much less verbose, and
+	// more easily understood.
+	var uc1, uc2 Constraint
+	if v1, ok := c1.(Version); ok {
+		uc1 = b.vtu(id, v1)
+	} else {
+		uc1 = c1
+	}
+
+	if v2, ok := c2.(Version); ok {
+		uc2 = b.vtu(id, v2)
+	} else {
+		uc2 = c2
+	}
+
+	return uc1.MatchesAny(uc2)
+}
+
+// intersect is the authoritative version of Constraint.Intersect.
+func (b *bridge) intersect(id ProjectIdentifier, c1, c2 Constraint) Constraint {
+	rc := c1.Intersect(c2)
+	if rc != none {
+		return rc
+	}
+
+	// This approach is slightly wasteful, but just SO much less verbose, and
+	// more easily understood.
+	var uc1, uc2 Constraint
+	if v1, ok := c1.(Version); ok {
+		uc1 = b.vtu(id, v1)
+	} else {
+		uc1 = c1
+	}
+
+	if v2, ok := c2.(Version); ok {
+		uc2 = b.vtu(id, v2)
+	} else {
+		uc2 = c2
+	}
+
+	return uc1.Intersect(uc2)
+}
+
+// vtu creates a versionTypeUnion for the provided version.
+//
+// This union may (and typically will) end up being nothing more than the single
+// input version, but creating a versionTypeUnion guarantees that 'local'
+// constraint checks (direct method calls) are authoritative.
+func (b *bridge) vtu(id ProjectIdentifier, v Version) versionTypeUnion {
+	switch tv := v.(type) {
+	case Revision:
+		return versionTypeUnion(b.pairRevision(id, tv))
+	case PairedVersion:
+		return versionTypeUnion(b.pairRevision(id, tv.Underlying()))
+	case UnpairedVersion:
+		pv := b.pairVersion(id, tv)
+		if pv == nil {
+			return versionTypeUnion{tv}
+		}
+
+		return versionTypeUnion(b.pairRevision(id, pv.Underlying()))
+	}
+
+	return nil
+}
+
+// listPackages lists all the packages contained within the given project at a
+// particular version.
+//
+// The root project is handled separately, as the source manager isn't
+// responsible for that code.
+func (b *bridge) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	if id.ProjectRoot == ProjectRoot(b.s.rpt.ImportRoot) {
+		panic("should never call ListPackages on root project")
+	}
+
+	return b.sm.ListPackages(id, v)
+}
+
+func (b *bridge) ExportProject(id ProjectIdentifier, v Version, path string) error {
+	//return b.sm.ExportProject(id, v, path)
+	panic("bridge should never be used to ExportProject")
+}
+
+// verifyRoot ensures that the provided path to the project root is in good
+// working condition. This check is made only once, at the beginning of a solve
+// run.
+func (b *bridge) verifyRootDir(path string) error {
+	if fi, err := os.Stat(path); err != nil {
+		return badOptsFailure(fmt.Sprintf("could not read project root (%s): %s", path, err))
+	} else if !fi.IsDir() {
+		return badOptsFailure(fmt.Sprintf("project root (%s) is a file, not a directory", path))
+	}
+
+	return nil
+}
+
+func (b *bridge) DeduceProjectRoot(ip string) (ProjectRoot, error) {
+	return b.sm.DeduceProjectRoot(ip)
+}
+
+// breakLock is called when the solver has to break a version recorded in the
+// lock file. It prefetches all the projects in the solver's lock , so that the
+// information is already on hand if/when the solver needs it.
+//
+// Projects that have already been selected are skipped, as it's generally unlikely that the
+// solver will have to backtrack through and fully populate their version queues.
+func (b *bridge) breakLock() {
+	// No real conceivable circumstance in which multiple calls are made to
+	// this, but being that this is the entrance point to a bunch of async work,
+	// protect it with an atomic CAS in case things change in the future.
+	if !atomic.CompareAndSwapInt32(&b.lockbroken, 0, 1) {
+		return
+	}
+
+	for _, lp := range b.s.rl.Projects() {
+		if _, is := b.s.sel.selected(lp.pi); !is {
+			// ListPackages guarantees that all the necessary network work will
+			// be done, so go with that
+			//
+			// TODO(sdboyer) use this as an opportunity to detect
+			// inconsistencies between upstream and the lock (e.g., moved tags)?
+			pi, v := lp.pi, lp.Version()
+			go func() {
+				// Sync first
+				b.sm.SyncSourceFor(pi)
+				// Preload the package info for the locked version, too, as
+				// we're more likely to need that
+				b.sm.ListPackages(pi, v)
+			}()
+		}
+	}
+}
+
+func (b *bridge) SyncSourceFor(id ProjectIdentifier) error {
+	return b.sm.SyncSourceFor(id)
+}
+
+// versionTypeUnion represents a set of versions that are, within the scope of
+// this solver run, equivalent.
+//
+// The simple case here is just a pair - a normal version plus its underlying
+// revision - but if a tag or branch point at the same rev, then we consider
+// them equivalent. Again, however, this equivalency is short-lived; it must be
+// re-assessed during every solver run.
+//
+// The union members are treated as being OR'd together:  all constraint
+// operations attempt each member, and will take the most open/optimistic
+// answer.
+//
+// This technically does allow tags to match branches - something we
+// otherwise try hard to avoid - but because the original input constraint never
+// actually changes (and is never written out in the Result), there's no harmful
+// case of a user suddenly riding a branch when they expected a fixed tag.
+type versionTypeUnion []Version
+
+// This should generally not be called, but is required for the interface. If it
+// is called, we have a bigger problem (the type has escaped the solver); thus,
+// panic.
+func (av versionTypeUnion) String() string {
+	panic("versionTypeUnion should never be turned into a string; it is solver internal-only")
+}
+
+// This should generally not be called, but is required for the interface. If it
+// is called, we have a bigger problem (the type has escaped the solver); thus,
+// panic.
+func (av versionTypeUnion) Type() string {
+	panic("versionTypeUnion should never need to answer a Type() call; it is solver internal-only")
+}
+
+// Matches takes a version, and returns true if that version matches any version
+// contained in the union.
+//
+// This DOES allow tags to match branches, albeit indirectly through a revision.
+func (av versionTypeUnion) Matches(v Version) bool {
+	av2, oav := v.(versionTypeUnion)
+
+	for _, v1 := range av {
+		if oav {
+			for _, v2 := range av2 {
+				if v1.Matches(v2) {
+					return true
+				}
+			}
+		} else if v1.Matches(v) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// MatchesAny returns true if any of the contained versions (which are also
+// constraints) in the union successfully MatchAny with the provided
+// constraint.
+func (av versionTypeUnion) MatchesAny(c Constraint) bool {
+	av2, oav := c.(versionTypeUnion)
+
+	for _, v1 := range av {
+		if oav {
+			for _, v2 := range av2 {
+				if v1.MatchesAny(v2) {
+					return true
+				}
+			}
+		} else if v1.MatchesAny(c) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// Intersect takes a constraint, and attempts to intersect it with all the
+// versions contained in the union until one returns non-none. If that never
+// happens, then none is returned.
+//
+// In order to avoid weird version floating elsewhere in the solver, the union
+// always returns the input constraint. (This is probably obviously correct, but
+// is still worth noting.)
+func (av versionTypeUnion) Intersect(c Constraint) Constraint {
+	av2, oav := c.(versionTypeUnion)
+
+	for _, v1 := range av {
+		if oav {
+			for _, v2 := range av2 {
+				if rc := v1.Intersect(v2); rc != none {
+					return rc
+				}
+			}
+		} else if rc := v1.Intersect(c); rc != none {
+			return rc
+		}
+	}
+
+	return none
+}
+
+func (av versionTypeUnion) _private() {}
diff --git a/vendor/github.com/sdboyer/gps/circle.yml b/vendor/github.com/sdboyer/gps/circle.yml
new file mode 100644
index 0000000..8be1609
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/circle.yml
@@ -0,0 +1,23 @@
+machine:
+  environment:
+    GO15VENDOREXPERIMENT: 1
+    PROJECT_ROOT: "github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME"
+    RD: "$HOME/.go_workspace/src/$PROJECT_ROOT"
+dependencies:
+  pre:
+    - wget https://github.com/Masterminds/glide/releases/download/0.10.1/glide-0.10.1-linux-amd64.tar.gz
+    - tar -vxz -C $HOME/bin --strip=1 -f glide-0.10.1-linux-amd64.tar.gz
+  override:
+    - glide --home $HOME/.glide -y glide.yaml install --cache
+    - mkdir -p $RD
+    - rsync -azC --delete ./ $RD
+    #- ln -Tsf "$HOME/$CIRCLE_PROJECT_REPONAME" "$HOME/.go_workspace/src/$PROJECT_ROOT"
+  cache_directories:
+    - "~/.glide"
+test:
+  pre:
+    - go vet
+  override:
+    - cd $RD && go test -v -coverprofile=coverage.txt -covermode=atomic
+    - cd $RD && go build example.go
+    - cd $RD && bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/sdboyer/gps/codecov.yml b/vendor/github.com/sdboyer/gps/codecov.yml
new file mode 100644
index 0000000..cdc5202
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/codecov.yml
@@ -0,0 +1,6 @@
+coverage:
+  ignore:
+  - remove_go16.go
+  - remove_go17.go
+  - solve_failures.go
+  - discovery.go # copied from stdlib, don't need to test
diff --git a/vendor/github.com/sdboyer/gps/constraint_test.go b/vendor/github.com/sdboyer/gps/constraint_test.go
new file mode 100644
index 0000000..3863e65
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/constraint_test.go
@@ -0,0 +1,817 @@
+package gps
+
+import (
+	"fmt"
+	"testing"
+)
+
+// gu - helper func for stringifying what we assume is a VersionPair (otherwise
+// will panic), but is given as a Constraint
+func gu(v Constraint) string {
+	return fmt.Sprintf("%q at rev %q", v, v.(PairedVersion).Underlying())
+}
+
+func TestBranchConstraintOps(t *testing.T) {
+	v1 := NewBranch("master").(branchVersion)
+	v2 := NewBranch("test").(branchVersion)
+
+	if !v1.MatchesAny(any) {
+		t.Errorf("Branches should always match the any constraint")
+	}
+	if v1.Intersect(any) != v1 {
+		t.Errorf("Branches should always return self when intersecting the any constraint, but got %s", v1.Intersect(any))
+	}
+
+	if v1.MatchesAny(none) {
+		t.Errorf("Branches should never match the none constraint")
+	}
+	if v1.Intersect(none) != none {
+		t.Errorf("Branches should always return none when intersecting the none constraint, but got %s", v1.Intersect(none))
+	}
+
+	if v1.Matches(v2) {
+		t.Errorf("%s should not match %s", v1, v2)
+	}
+
+	if v1.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v1, v2)
+	}
+
+	if v1.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v1, v2)
+	}
+
+	// Add rev to one
+	snuffster := Revision("snuffleupagus")
+	v3 := v1.Is(snuffster).(versionPair)
+	if v2.Matches(v3) {
+		t.Errorf("%s should not match %s", v2, gu(v3))
+	}
+	if v3.Matches(v2) {
+		t.Errorf("%s should not match %s", gu(v3), v2)
+	}
+
+	if v2.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+	if v3.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+
+	if v2.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v2, gu(v3))
+	}
+	if v3.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), v2)
+	}
+
+	// Add different rev to the other
+	v4 := v2.Is(Revision("cookie monster")).(versionPair)
+	if v4.Matches(v3) {
+		t.Errorf("%s should not match %s", gu(v4), gu(v3))
+	}
+	if v3.Matches(v4) {
+		t.Errorf("%s should not match %s", gu(v3), gu(v4))
+	}
+
+	if v4.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+	if v3.MatchesAny(v4) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+
+	if v4.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v4), gu(v3))
+	}
+	if v3.Intersect(v4) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), gu(v4))
+	}
+
+	// Now add same rev to different branches
+	// TODO(sdboyer) this might not actually be a good idea, when you consider the
+	// semantics of floating versions...matching on an underlying rev might be
+	// nice in the short term, but it's probably shit most of the time
+	v5 := v2.Is(Revision("snuffleupagus")).(versionPair)
+	if !v5.Matches(v3) {
+		t.Errorf("%s should match %s", gu(v5), gu(v3))
+	}
+	if !v3.Matches(v5) {
+		t.Errorf("%s should match %s", gu(v3), gu(v5))
+	}
+
+	if !v5.MatchesAny(v3) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+	if !v3.MatchesAny(v5) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+
+	if v5.Intersect(v3) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v5), gu(v3))
+	}
+	if v3.Intersect(v5) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v3), gu(v5))
+	}
+
+	// Set up for cross-type constraint ops
+	cookie := Revision("cookie monster")
+	o1 := NewVersion("master").(plainVersion)
+	o2 := NewVersion("1.0.0").(semVersion)
+	o3 := o1.Is(cookie).(versionPair)
+	o4 := o2.Is(cookie).(versionPair)
+	v6 := v1.Is(cookie).(versionPair)
+
+	if v1.Matches(o1) {
+		t.Errorf("%s (branch) should not match %s (version) across types", v1, o1)
+	}
+
+	if v1.MatchesAny(o1) {
+		t.Errorf("%s (branch) should not allow any matches when combined with %s (version)", v1, o1)
+	}
+
+	if v1.Intersect(o1) != none {
+		t.Errorf("Intersection of %s (branch) with %s (version) should result in empty set", v1, o1)
+	}
+
+	if v1.Matches(o2) {
+		t.Errorf("%s (branch) should not match %s (semver) across types", v1, o2)
+	}
+
+	if v1.MatchesAny(o2) {
+		t.Errorf("%s (branch) should not allow any matches when combined with %s (semver)", v1, o2)
+	}
+
+	if v1.Intersect(o2) != none {
+		t.Errorf("Intersection of %s (branch) with %s (semver) should result in empty set", v1, o2)
+	}
+
+	if v1.Matches(o3) {
+		t.Errorf("%s (branch) should not match %s (version) across types", v1, gu(o3))
+	}
+
+	if v1.MatchesAny(o3) {
+		t.Errorf("%s (branch) should not allow any matches when combined with %s (version)", v1, gu(o3))
+	}
+
+	if v1.Intersect(o3) != none {
+		t.Errorf("Intersection of %s (branch) with %s (version) should result in empty set", v1, gu(o3))
+	}
+
+	if v1.Matches(o4) {
+		t.Errorf("%s (branch) should not match %s (semver) across types", v1, gu(o4))
+	}
+
+	if v1.MatchesAny(o4) {
+		t.Errorf("%s (branch) should not allow any matches when combined with %s (semver)", v1, gu(o4))
+	}
+
+	if v1.Intersect(o4) != none {
+		t.Errorf("Intersection of %s (branch) with %s (semver) should result in empty set", v1, gu(o4))
+	}
+
+	if !v6.Matches(o3) {
+		t.Errorf("%s (branch) should match %s (version) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if !v6.MatchesAny(o3) {
+		t.Errorf("%s (branch) should allow some matches when combined with %s (version) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if v6.Intersect(o3) != cookie {
+		t.Errorf("Intersection of %s (branch) with %s (version) should return shared underlying rev", gu(v6), gu(o3))
+	}
+
+	if !v6.Matches(o4) {
+		t.Errorf("%s (branch) should match %s (version) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if !v6.MatchesAny(o4) {
+		t.Errorf("%s (branch) should allow some matches when combined with %s (version) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if v6.Intersect(o4) != cookie {
+		t.Errorf("Intersection of %s (branch) with %s (version) should return shared underlying rev", gu(v6), gu(o4))
+	}
+}
+
+func TestVersionConstraintOps(t *testing.T) {
+	v1 := NewVersion("ab123").(plainVersion)
+	v2 := NewVersion("b2a13").(plainVersion)
+
+	if !v1.MatchesAny(any) {
+		t.Errorf("Versions should always match the any constraint")
+	}
+	if v1.Intersect(any) != v1 {
+		t.Errorf("Versions should always return self when intersecting the any constraint, but got %s", v1.Intersect(any))
+	}
+
+	if v1.MatchesAny(none) {
+		t.Errorf("Versions should never match the none constraint")
+	}
+	if v1.Intersect(none) != none {
+		t.Errorf("Versions should always return none when intersecting the none constraint, but got %s", v1.Intersect(none))
+	}
+
+	if v1.Matches(v2) {
+		t.Errorf("%s should not match %s", v1, v2)
+	}
+
+	if v1.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v1, v2)
+	}
+
+	if v1.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v1, v2)
+	}
+
+	// Add rev to one
+	snuffster := Revision("snuffleupagus")
+	v3 := v1.Is(snuffster).(versionPair)
+	if v2.Matches(v3) {
+		t.Errorf("%s should not match %s", v2, gu(v3))
+	}
+	if v3.Matches(v2) {
+		t.Errorf("%s should not match %s", gu(v3), v2)
+	}
+
+	if v2.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+	if v3.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+
+	if v2.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v2, gu(v3))
+	}
+	if v3.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), v2)
+	}
+
+	// Add different rev to the other
+	v4 := v2.Is(Revision("cookie monster")).(versionPair)
+	if v4.Matches(v3) {
+		t.Errorf("%s should not match %s", gu(v4), gu(v3))
+	}
+	if v3.Matches(v4) {
+		t.Errorf("%s should not match %s", gu(v3), gu(v4))
+	}
+
+	if v4.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+	if v3.MatchesAny(v4) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+
+	if v4.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v4), gu(v3))
+	}
+	if v3.Intersect(v4) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), gu(v4))
+	}
+
+	// Now add same rev to different versions, and things should line up
+	v5 := v2.Is(Revision("snuffleupagus")).(versionPair)
+	if !v5.Matches(v3) {
+		t.Errorf("%s should match %s", gu(v5), gu(v3))
+	}
+	if !v3.Matches(v5) {
+		t.Errorf("%s should match %s", gu(v3), gu(v5))
+	}
+
+	if !v5.MatchesAny(v3) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+	if !v3.MatchesAny(v5) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+
+	if v5.Intersect(v3) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v5), gu(v3))
+	}
+	if v3.Intersect(v5) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v3), gu(v5))
+	}
+
+	// Set up for cross-type constraint ops
+	cookie := Revision("cookie monster")
+	o1 := NewBranch("master").(branchVersion)
+	o2 := NewVersion("1.0.0").(semVersion)
+	o3 := o1.Is(cookie).(versionPair)
+	o4 := o2.Is(cookie).(versionPair)
+	v6 := v1.Is(cookie).(versionPair)
+
+	if v1.Matches(o1) {
+		t.Errorf("%s (version) should not match %s (branch) across types", v1, o1)
+	}
+
+	if v1.MatchesAny(o1) {
+		t.Errorf("%s (version) should not allow any matches when combined with %s (branch)", v1, o1)
+	}
+
+	if v1.Intersect(o1) != none {
+		t.Errorf("Intersection of %s (version) with %s (branch) should result in empty set", v1, o1)
+	}
+
+	if v1.Matches(o2) {
+		t.Errorf("%s (version) should not match %s (semver) across types", v1, o2)
+	}
+
+	if v1.MatchesAny(o2) {
+		t.Errorf("%s (version) should not allow any matches when combined with %s (semver)", v1, o2)
+	}
+
+	if v1.Intersect(o2) != none {
+		t.Errorf("Intersection of %s (version) with %s (semver) should result in empty set", v1, o2)
+	}
+
+	if v1.Matches(o3) {
+		t.Errorf("%s (version) should not match %s (branch) across types", v1, gu(o3))
+	}
+
+	if v1.MatchesAny(o3) {
+		t.Errorf("%s (version) should not allow any matches when combined with %s (branch)", v1, gu(o3))
+	}
+
+	if v1.Intersect(o3) != none {
+		t.Errorf("Intersection of %s (version) with %s (branch) should result in empty set", v1, gu(o3))
+	}
+
+	if v1.Matches(o4) {
+		t.Errorf("%s (version) should not match %s (semver) across types", v1, gu(o4))
+	}
+
+	if v1.MatchesAny(o4) {
+		t.Errorf("%s (version) should not allow any matches when combined with %s (semver)", v1, gu(o4))
+	}
+
+	if v1.Intersect(o4) != none {
+		t.Errorf("Intersection of %s (version) with %s (semver) should result in empty set", v1, gu(o4))
+	}
+
+	if !v6.Matches(o3) {
+		t.Errorf("%s (version) should match %s (branch) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if !v6.MatchesAny(o3) {
+		t.Errorf("%s (version) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if v6.Intersect(o3) != cookie {
+		t.Errorf("Intersection of %s (version) with %s (branch) should return shared underlying rev", gu(v6), gu(o3))
+	}
+
+	if !v6.Matches(o4) {
+		t.Errorf("%s (version) should match %s (branch) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if !v6.MatchesAny(o4) {
+		t.Errorf("%s (version) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if v6.Intersect(o4) != cookie {
+		t.Errorf("Intersection of %s (version) with %s (branch) should return shared underlying rev", gu(v6), gu(o4))
+	}
+}
+
+func TestSemverVersionConstraintOps(t *testing.T) {
+	v1 := NewVersion("1.0.0").(semVersion)
+	v2 := NewVersion("2.0.0").(semVersion)
+
+	if !v1.MatchesAny(any) {
+		t.Errorf("Semvers should always match the any constraint")
+	}
+	if v1.Intersect(any) != v1 {
+		t.Errorf("Semvers should always return self when intersecting the any constraint, but got %s", v1.Intersect(any))
+	}
+
+	if v1.MatchesAny(none) {
+		t.Errorf("Semvers should never match the none constraint")
+	}
+	if v1.Intersect(none) != none {
+		t.Errorf("Semvers should always return none when intersecting the none constraint, but got %s", v1.Intersect(none))
+	}
+
+	if v1.Matches(v2) {
+		t.Errorf("%s should not match %s", v1, v2)
+	}
+
+	if v1.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v1, v2)
+	}
+
+	if v1.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v1, v2)
+	}
+
+	// Add rev to one
+	snuffster := Revision("snuffleupagus")
+	v3 := v1.Is(snuffster).(versionPair)
+	if v2.Matches(v3) {
+		t.Errorf("%s should not match %s", v2, gu(v3))
+	}
+	if v3.Matches(v2) {
+		t.Errorf("%s should not match %s", gu(v3), v2)
+	}
+
+	if v2.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+	if v3.MatchesAny(v2) {
+		t.Errorf("%s should not allow any matches when combined with %s", v2, gu(v3))
+	}
+
+	if v2.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", v2, gu(v3))
+	}
+	if v3.Intersect(v2) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), v2)
+	}
+
+	// Add different rev to the other
+	v4 := v2.Is(Revision("cookie monster")).(versionPair)
+	if v4.Matches(v3) {
+		t.Errorf("%s should not match %s", gu(v4), gu(v3))
+	}
+	if v3.Matches(v4) {
+		t.Errorf("%s should not match %s", gu(v3), gu(v4))
+	}
+
+	if v4.MatchesAny(v3) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+	if v3.MatchesAny(v4) {
+		t.Errorf("%s should not allow any matches when combined with %s", gu(v4), gu(v3))
+	}
+
+	if v4.Intersect(v3) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v4), gu(v3))
+	}
+	if v3.Intersect(v4) != none {
+		t.Errorf("Intersection of %s with %s should result in empty set", gu(v3), gu(v4))
+	}
+
+	// Now add same rev to different versions, and things should line up
+	v5 := v2.Is(Revision("snuffleupagus")).(versionPair)
+	if !v5.Matches(v3) {
+		t.Errorf("%s should match %s", gu(v5), gu(v3))
+	}
+	if !v3.Matches(v5) {
+		t.Errorf("%s should match %s", gu(v3), gu(v5))
+	}
+
+	if !v5.MatchesAny(v3) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+	if !v3.MatchesAny(v5) {
+		t.Errorf("%s should allow some matches when combined with %s", gu(v5), gu(v3))
+	}
+
+	if v5.Intersect(v3) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v5), gu(v3))
+	}
+	if v3.Intersect(v5) != snuffster {
+		t.Errorf("Intersection of %s with %s should return underlying rev", gu(v3), gu(v5))
+	}
+
+	// Set up for cross-type constraint ops
+	cookie := Revision("cookie monster")
+	o1 := NewBranch("master").(branchVersion)
+	o2 := NewVersion("ab123").(plainVersion)
+	o3 := o1.Is(cookie).(versionPair)
+	o4 := o2.Is(cookie).(versionPair)
+	v6 := v1.Is(cookie).(versionPair)
+
+	if v1.Matches(o1) {
+		t.Errorf("%s (semver) should not match %s (branch) across types", v1, o1)
+	}
+
+	if v1.MatchesAny(o1) {
+		t.Errorf("%s (semver) should not allow any matches when combined with %s (branch)", v1, o1)
+	}
+
+	if v1.Intersect(o1) != none {
+		t.Errorf("Intersection of %s (semver) with %s (branch) should result in empty set", v1, o1)
+	}
+
+	if v1.Matches(o2) {
+		t.Errorf("%s (semver) should not match %s (version) across types", v1, o2)
+	}
+
+	if v1.MatchesAny(o2) {
+		t.Errorf("%s (semver) should not allow any matches when combined with %s (version)", v1, o2)
+	}
+
+	if v1.Intersect(o2) != none {
+		t.Errorf("Intersection of %s (semver) with %s (version) should result in empty set", v1, o2)
+	}
+
+	if v1.Matches(o3) {
+		t.Errorf("%s (semver) should not match %s (branch) across types", v1, gu(o3))
+	}
+
+	if v1.MatchesAny(o3) {
+		t.Errorf("%s (semver) should not allow any matches when combined with %s (branch)", v1, gu(o3))
+	}
+
+	if v1.Intersect(o3) != none {
+		t.Errorf("Intersection of %s (semver) with %s (branch) should result in empty set", v1, gu(o3))
+	}
+
+	if v1.Matches(o4) {
+		t.Errorf("%s (semver) should not match %s (version) across types", v1, gu(o4))
+	}
+
+	if v1.MatchesAny(o4) {
+		t.Errorf("%s (semver) should not allow any matches when combined with %s (version)", v1, gu(o4))
+	}
+
+	if v1.Intersect(o4) != none {
+		t.Errorf("Intersection of %s (semver) with %s (version) should result in empty set", v1, gu(o4))
+	}
+
+	if !v6.Matches(o3) {
+		t.Errorf("%s (semver) should match %s (branch) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if !v6.MatchesAny(o3) {
+		t.Errorf("%s (semver) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o3))
+	}
+
+	if v6.Intersect(o3) != cookie {
+		t.Errorf("Intersection of %s (semver) with %s (branch) should return shared underlying rev", gu(v6), gu(o3))
+	}
+
+	if !v6.Matches(o4) {
+		t.Errorf("%s (semver) should match %s (branch) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if !v6.MatchesAny(o4) {
+		t.Errorf("%s (semver) should allow some matches when combined with %s (branch) across types due to shared rev", gu(v6), gu(o4))
+	}
+
+	if v6.Intersect(o4) != cookie {
+		t.Errorf("Intersection of %s (semver) with %s (branch) should return shared underlying rev", gu(v6), gu(o4))
+	}
+
+	// Regression check - make sure that semVersion -> semverConstraint works
+	// the same as verified in the other test
+	c1, _ := NewSemverConstraint("=1.0.0")
+	if !v1.MatchesAny(c1) {
+		t.Errorf("%s (semver) should allow some matches - itself - when combined with an equivalent semverConstraint", gu(v1))
+	}
+	if v1.Intersect(c1) != v1 {
+		t.Errorf("Intersection of %s (semver) with equivalent semver constraint should return self, got %s", gu(v1), v1.Intersect(c1))
+	}
+
+	if !v6.MatchesAny(c1) {
+		t.Errorf("%s (semver pair) should allow some matches - itself - when combined with an equivalent semverConstraint", gu(v6))
+	}
+	if v6.Intersect(c1) != v6 {
+		t.Errorf("Intersection of %s (semver pair) with equivalent semver constraint should return self, got %s", gu(v6), v6.Intersect(c1))
+	}
+
+}
+
+// The other test is about the semverVersion, this is about semverConstraint
+func TestSemverConstraintOps(t *testing.T) {
+	v1 := NewBranch("master").(branchVersion)
+	v2 := NewVersion("ab123").(plainVersion)
+	v3 := NewVersion("1.0.0").(semVersion)
+
+	fozzie := Revision("fozzie bear")
+	v4 := v1.Is(fozzie).(versionPair)
+	v5 := v2.Is(fozzie).(versionPair)
+	v6 := v3.Is(fozzie).(versionPair)
+
+	// TODO(sdboyer) we can't use the same range as below b/c semver.rangeConstraint is
+	// still an incomparable type
+	c1, err := NewSemverConstraint("=1.0.0")
+	if err != nil {
+		t.Errorf("Failed to create constraint: %s", err)
+		t.FailNow()
+	}
+
+	if !c1.MatchesAny(any) {
+		t.Errorf("Semver constraints should always match the any constraint")
+	}
+	if c1.Intersect(any) != c1 {
+		t.Errorf("Semver constraints should always return self when intersecting the any constraint, but got %s", c1.Intersect(any))
+	}
+
+	if c1.MatchesAny(none) {
+		t.Errorf("Semver constraints should never match the none constraint")
+	}
+	if c1.Intersect(none) != none {
+		t.Errorf("Semver constraints should always return none when intersecting the none constraint, but got %s", c1.Intersect(none))
+	}
+
+	c1, err = NewSemverConstraint(">= 1.0.0")
+	if err != nil {
+		t.Errorf("Failed to create constraint: %s", err)
+		t.FailNow()
+	}
+
+	if c1.Matches(v1) {
+		t.Errorf("Semver constraint should not match simple branch")
+	}
+	if c1.Matches(v2) {
+		t.Errorf("Semver constraint should not match simple version")
+	}
+	if !c1.Matches(v3) {
+		t.Errorf("Semver constraint should match a simple semver version in its range")
+	}
+	if c1.Matches(v4) {
+		t.Errorf("Semver constraint should not match paired branch")
+	}
+	if c1.Matches(v5) {
+		t.Errorf("Semver constraint should not match paired version")
+	}
+	if !c1.Matches(v6) {
+		t.Errorf("Semver constraint should match a paired semver version in its range")
+	}
+
+	if c1.MatchesAny(v1) {
+		t.Errorf("Semver constraint should not allow any when intersected with simple branch")
+	}
+	if c1.MatchesAny(v2) {
+		t.Errorf("Semver constraint should not allow any when intersected with simple version")
+	}
+	if !c1.MatchesAny(v3) {
+		t.Errorf("Semver constraint should allow some when intersected with a simple semver version in its range")
+	}
+	if c1.MatchesAny(v4) {
+		t.Errorf("Semver constraint should not allow any when intersected with paired branch")
+	}
+	if c1.MatchesAny(v5) {
+		t.Errorf("Semver constraint should not allow any when intersected with paired version")
+	}
+	if !c1.MatchesAny(v6) {
+		t.Errorf("Semver constraint should allow some when intersected with a paired semver version in its range")
+	}
+
+	if c1.Intersect(v1) != none {
+		t.Errorf("Semver constraint should return none when intersected with a simple branch")
+	}
+	if c1.Intersect(v2) != none {
+		t.Errorf("Semver constraint should return none when intersected with a simple version")
+	}
+	if c1.Intersect(v3) != v3 {
+		t.Errorf("Semver constraint should return input when intersected with a simple semver version in its range")
+	}
+	if c1.Intersect(v4) != none {
+		t.Errorf("Semver constraint should return none when intersected with a paired branch")
+	}
+	if c1.Intersect(v5) != none {
+		t.Errorf("Semver constraint should return none when intersected with a paired version")
+	}
+	if c1.Intersect(v6) != v6 {
+		t.Errorf("Semver constraint should return input when intersected with a paired semver version in its range")
+	}
+}
+
+// Test that certain types of cross-version comparisons work when they are
+// expressed as a version union (but that others don't).
+func TestVersionUnion(t *testing.T) {
+	rev := Revision("flooboofoobooo")
+	v1 := NewBranch("master")
+	v2 := NewBranch("test")
+	v3 := NewVersion("1.0.0").Is(rev)
+	v4 := NewVersion("1.0.1")
+	v5 := NewVersion("v2.0.5").Is(Revision("notamatch"))
+
+	uv1 := versionTypeUnion{v1, v4, rev}
+
+	if uv1.MatchesAny(none) {
+		t.Errorf("Union can't match none")
+	}
+	if none.MatchesAny(uv1) {
+		t.Errorf("Union can't match none")
+	}
+
+	if !uv1.MatchesAny(any) {
+		t.Errorf("Union must match any")
+	}
+	if !any.MatchesAny(uv1) {
+		t.Errorf("Union must match any")
+	}
+
+	// Basic matching
+	if !uv1.Matches(v4) {
+		t.Errorf("Union should match on branch to branch")
+	}
+	if !v4.Matches(uv1) {
+		t.Errorf("Union should reverse-match on branch to branch")
+	}
+
+	if !uv1.Matches(v3) {
+		t.Errorf("Union should match on rev to paired rev")
+	}
+	if !v3.Matches(uv1) {
+		t.Errorf("Union should reverse-match on rev to paired rev")
+	}
+
+	if uv1.Matches(v2) {
+		t.Errorf("Union should not match on anything in disjoint unpaired")
+	}
+	if v2.Matches(uv1) {
+		t.Errorf("Union should not reverse-match on anything in disjoint unpaired")
+	}
+
+	if uv1.Matches(v5) {
+		t.Errorf("Union should not match on anything in disjoint pair")
+	}
+	if v5.Matches(uv1) {
+		t.Errorf("Union should not reverse-match on anything in disjoint pair")
+	}
+
+	// MatchesAny - repeat Matches for safety, but add more, too
+	if !uv1.MatchesAny(v4) {
+		t.Errorf("Union should match on branch to branch")
+	}
+	if !v4.MatchesAny(uv1) {
+		t.Errorf("Union should reverse-match on branch to branch")
+	}
+
+	if !uv1.MatchesAny(v3) {
+		t.Errorf("Union should match on rev to paired rev")
+	}
+	if !v3.MatchesAny(uv1) {
+		t.Errorf("Union should reverse-match on rev to paired rev")
+	}
+
+	if uv1.MatchesAny(v2) {
+		t.Errorf("Union should not match on anything in disjoint unpaired")
+	}
+	if v2.MatchesAny(uv1) {
+		t.Errorf("Union should not reverse-match on anything in disjoint unpaired")
+	}
+
+	if uv1.MatchesAny(v5) {
+		t.Errorf("Union should not match on anything in disjoint pair")
+	}
+	if v5.MatchesAny(uv1) {
+		t.Errorf("Union should not reverse-match on anything in disjoint pair")
+	}
+
+	c1, _ := NewSemverConstraint("~1.0.0")
+	c2, _ := NewSemverConstraint("~2.0.0")
+	if !uv1.MatchesAny(c1) {
+		t.Errorf("Union should have some overlap due to containing 1.0.1 version")
+	}
+	if !c1.MatchesAny(uv1) {
+		t.Errorf("Union should have some overlap due to containing 1.0.1 version")
+	}
+
+	if uv1.MatchesAny(c2) {
+		t.Errorf("Union should have no overlap with ~2.0.0 semver range")
+	}
+	if c2.MatchesAny(uv1) {
+		t.Errorf("Union should have no overlap with ~2.0.0 semver range")
+	}
+
+	// Intersect - repeat all previous
+	if uv1.Intersect(v4) != v4 {
+		t.Errorf("Union intersection on contained version should return that version")
+	}
+	if v4.Intersect(uv1) != v4 {
+		t.Errorf("Union reverse-intersection on contained version should return that version")
+	}
+
+	if uv1.Intersect(v3) != rev {
+		t.Errorf("Union intersection on paired version w/matching rev should return rev, got %s", uv1.Intersect(v3))
+	}
+	if v3.Intersect(uv1) != rev {
+		t.Errorf("Union reverse-intersection on paired version w/matching rev should return rev, got %s", v3.Intersect(uv1))
+	}
+
+	if uv1.Intersect(v2) != none {
+		t.Errorf("Union should not intersect with anything in disjoint unpaired")
+	}
+	if v2.Intersect(uv1) != none {
+		t.Errorf("Union should not reverse-intersect with anything in disjoint unpaired")
+	}
+
+	if uv1.Intersect(v5) != none {
+		t.Errorf("Union should not intersect with anything in disjoint pair")
+	}
+	if v5.Intersect(uv1) != none {
+		t.Errorf("Union should not reverse-intersect with anything in disjoint pair")
+	}
+
+	if uv1.Intersect(c1) != v4 {
+		t.Errorf("Union intersecting with semver range should return 1.0.1 version, got %s", uv1.Intersect(c1))
+	}
+	if c1.Intersect(uv1) != v4 {
+		t.Errorf("Union reverse-intersecting with semver range should return 1.0.1 version, got %s", c1.Intersect(uv1))
+	}
+
+	if uv1.Intersect(c2) != none {
+		t.Errorf("Union intersecting with non-overlapping semver range should return none, got %s", uv1.Intersect(c2))
+	}
+	if c2.Intersect(uv1) != none {
+		t.Errorf("Union reverse-intersecting with non-overlapping semver range should return none, got %s", uv1.Intersect(c2))
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/constraints.go b/vendor/github.com/sdboyer/gps/constraints.go
new file mode 100644
index 0000000..cf1b484
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/constraints.go
@@ -0,0 +1,295 @@
+package gps
+
+import (
+	"fmt"
+	"sort"
+
+	"github.com/Masterminds/semver"
+)
+
+var (
+	none = noneConstraint{}
+	any  = anyConstraint{}
+)
+
+// A Constraint provides structured limitations on the versions that are
+// admissible for a given project.
+//
+// As with Version, it has a private method because the gps's internal
+// implementation of the problem is complete, and the system relies on type
+// magic to operate.
+type Constraint interface {
+	fmt.Stringer
+	// Matches indicates if the provided Version is allowed by the Constraint.
+	Matches(Version) bool
+	// MatchesAny indicates if the intersection of the Constraint with the
+	// provided Constraint would yield a Constraint that could allow *any*
+	// Version.
+	MatchesAny(Constraint) bool
+	// Intersect computes the intersection of the Constraint with the provided
+	// Constraint.
+	Intersect(Constraint) Constraint
+	_private()
+}
+
+func (semverConstraint) _private() {}
+func (anyConstraint) _private()    {}
+func (noneConstraint) _private()   {}
+
+// NewSemverConstraint attempts to construct a semver Constraint object from the
+// input string.
+//
+// If the input string cannot be made into a valid semver Constraint, an error
+// is returned.
+func NewSemverConstraint(body string) (Constraint, error) {
+	c, err := semver.NewConstraint(body)
+	if err != nil {
+		return nil, err
+	}
+	// If we got a simple semver.Version, simplify by returning our
+	// corresponding type
+	if sv, ok := c.(*semver.Version); ok {
+		return semVersion{sv: sv}, nil
+	}
+	return semverConstraint{c: c}, nil
+}
+
+type semverConstraint struct {
+	c semver.Constraint
+}
+
+func (c semverConstraint) String() string {
+	return c.c.String()
+}
+
+func (c semverConstraint) Matches(v Version) bool {
+	switch tv := v.(type) {
+	case versionTypeUnion:
+		for _, elem := range tv {
+			if c.Matches(elem) {
+				return true
+			}
+		}
+	case semVersion:
+		return c.c.Matches(tv.sv) == nil
+	case versionPair:
+		if tv2, ok := tv.v.(semVersion); ok {
+			return c.c.Matches(tv2.sv) == nil
+		}
+	}
+
+	return false
+}
+
+func (c semverConstraint) MatchesAny(c2 Constraint) bool {
+	return c.Intersect(c2) != none
+}
+
+func (c semverConstraint) Intersect(c2 Constraint) Constraint {
+	switch tc := c2.(type) {
+	case anyConstraint:
+		return c
+	case versionTypeUnion:
+		for _, elem := range tc {
+			if rc := c.Intersect(elem); rc != none {
+				return rc
+			}
+		}
+	case semverConstraint:
+		rc := c.c.Intersect(tc.c)
+		if !semver.IsNone(rc) {
+			return semverConstraint{c: rc}
+		}
+	case semVersion:
+		rc := c.c.Intersect(tc.sv)
+		if !semver.IsNone(rc) {
+			// If single version intersected with constraint, we know the result
+			// must be the single version, so just return it back out
+			return c2
+		}
+	case versionPair:
+		if tc2, ok := tc.v.(semVersion); ok {
+			rc := c.c.Intersect(tc2.sv)
+			if !semver.IsNone(rc) {
+				// same reasoning as previous case
+				return c2
+			}
+		}
+	}
+
+	return none
+}
+
+// IsAny indicates if the provided constraint is the wildcard "Any" constraint.
+func IsAny(c Constraint) bool {
+	_, ok := c.(anyConstraint)
+	return ok
+}
+
+// Any returns a constraint that will match anything.
+func Any() Constraint {
+	return anyConstraint{}
+}
+
+// anyConstraint is an unbounded constraint - it matches all other types of
+// constraints. It mirrors the behavior of the semver package's any type.
+type anyConstraint struct{}
+
+func (anyConstraint) String() string {
+	return "*"
+}
+
+func (anyConstraint) Matches(Version) bool {
+	return true
+}
+
+func (anyConstraint) MatchesAny(Constraint) bool {
+	return true
+}
+
+func (anyConstraint) Intersect(c Constraint) Constraint {
+	return c
+}
+
+// noneConstraint is the empty set - it matches no versions. It mirrors the
+// behavior of the semver package's none type.
+type noneConstraint struct{}
+
+func (noneConstraint) String() string {
+	return ""
+}
+
+func (noneConstraint) Matches(Version) bool {
+	return false
+}
+
+func (noneConstraint) MatchesAny(Constraint) bool {
+	return false
+}
+
+func (noneConstraint) Intersect(Constraint) Constraint {
+	return none
+}
+
+// A ProjectConstraint combines a ProjectIdentifier with a Constraint. It
+// indicates that, if packages contained in the ProjectIdentifier enter the
+// depgraph, they must do so at a version that is allowed by the Constraint.
+type ProjectConstraint struct {
+	Ident      ProjectIdentifier
+	Constraint Constraint
+}
+
+type workingConstraint struct {
+	Ident                     ProjectIdentifier
+	Constraint                Constraint
+	overrNet, overrConstraint bool
+}
+
+type ProjectConstraints map[ProjectRoot]ProjectProperties
+
+func pcSliceToMap(l []ProjectConstraint, r ...[]ProjectConstraint) ProjectConstraints {
+	final := make(ProjectConstraints)
+
+	for _, pc := range l {
+		final[pc.Ident.ProjectRoot] = ProjectProperties{
+			NetworkName: pc.Ident.NetworkName,
+			Constraint:  pc.Constraint,
+		}
+	}
+
+	for _, pcs := range r {
+		for _, pc := range pcs {
+			if pp, exists := final[pc.Ident.ProjectRoot]; exists {
+				// Technically this should be done through a bridge for
+				// cross-version-type matching...but this is a one off for root and
+				// that's just ridiculous for this.
+				pp.Constraint = pp.Constraint.Intersect(pc.Constraint)
+				final[pc.Ident.ProjectRoot] = pp
+			} else {
+				final[pc.Ident.ProjectRoot] = ProjectProperties{
+					NetworkName: pc.Ident.NetworkName,
+					Constraint:  pc.Constraint,
+				}
+			}
+		}
+	}
+
+	return final
+}
+
+func (m ProjectConstraints) asSortedSlice() []ProjectConstraint {
+	pcs := make([]ProjectConstraint, len(m))
+
+	k := 0
+	for pr, pp := range m {
+		pcs[k] = ProjectConstraint{
+			Ident: ProjectIdentifier{
+				ProjectRoot: pr,
+				NetworkName: pp.NetworkName,
+			},
+			Constraint: pp.Constraint,
+		}
+		k++
+	}
+
+	sort.Stable(sortedConstraints(pcs))
+	return pcs
+}
+
+// overrideAll treats the ProjectConstraints map as an override map, and applies
+// overridden values to the input.
+//
+// A slice of workingConstraint is returned, allowing differentiation between
+// values that were or were not overridden.
+func (m ProjectConstraints) overrideAll(in []ProjectConstraint) (out []workingConstraint) {
+	out = make([]workingConstraint, len(in))
+	k := 0
+	for _, pc := range in {
+		out[k] = m.override(pc)
+		k++
+	}
+
+	return
+}
+
+// override replaces a single ProjectConstraint with a workingConstraint,
+// overriding its values if a corresponding entry exists in the
+// ProjectConstraints map.
+func (m ProjectConstraints) override(pc ProjectConstraint) workingConstraint {
+	wc := workingConstraint{
+		Ident:      pc.Ident,
+		Constraint: pc.Constraint,
+	}
+
+	if pp, has := m[pc.Ident.ProjectRoot]; has {
+		// The rule for overrides is that *any* non-zero value for the prop
+		// should be considered an override, even if it's equal to what's
+		// already there.
+		if pp.Constraint != nil {
+			wc.Constraint = pp.Constraint
+			wc.overrConstraint = true
+		}
+
+		if pp.NetworkName != "" {
+			wc.Ident.NetworkName = pp.NetworkName
+			wc.overrNet = true
+		}
+
+	}
+
+	return wc
+}
+
+type sortedConstraints []ProjectConstraint
+
+func (s sortedConstraints) Len() int {
+	return len(s)
+}
+
+func (s sortedConstraints) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s sortedConstraints) Less(i, j int) bool {
+	return s[i].Ident.less(s[j].Ident)
+}
diff --git a/vendor/github.com/sdboyer/gps/deduce.go b/vendor/github.com/sdboyer/gps/deduce.go
new file mode 100644
index 0000000..25dc93d
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/deduce.go
@@ -0,0 +1,777 @@
+package gps
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"path"
+	"regexp"
+	"strings"
+)
+
+var (
+	gitSchemes = []string{"https", "ssh", "git", "http"}
+	bzrSchemes = []string{"https", "bzr+ssh", "bzr", "http"}
+	hgSchemes  = []string{"https", "ssh", "http"}
+	svnSchemes = []string{"https", "http", "svn", "svn+ssh"}
+)
+
+func validateVCSScheme(scheme, typ string) bool {
+	// everything allows plain ssh
+	if scheme == "ssh" {
+		return true
+	}
+
+	var schemes []string
+	switch typ {
+	case "git":
+		schemes = gitSchemes
+	case "bzr":
+		schemes = bzrSchemes
+	case "hg":
+		schemes = hgSchemes
+	case "svn":
+		schemes = svnSchemes
+	default:
+		panic(fmt.Sprint("unsupported vcs type", scheme))
+	}
+
+	for _, valid := range schemes {
+		if scheme == valid {
+			return true
+		}
+	}
+	return false
+}
+
+// Regexes for the different known import path flavors
+var (
+	// This regex allowed some usernames that github currently disallows. They
+	// may have allowed them in the past; keeping it in case we need to revert.
+	//ghRegex      = regexp.MustCompile(`^(?P<root>github\.com/([A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`)
+	ghRegex      = regexp.MustCompile(`^(?P<root>github\.com(/[A-Za-z0-9][-A-Za-z0-9]*[A-Za-z0-9]/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	gpinNewRegex = regexp.MustCompile(`^(?P<root>gopkg\.in(?:(/[a-zA-Z0-9][-a-zA-Z0-9]+)?)(/[a-zA-Z][-.a-zA-Z0-9]*)\.((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(?:-unstable)?)(?:\.git)?)((?:/[a-zA-Z0-9][-.a-zA-Z0-9]*)*)$`)
+	//gpinOldRegex = regexp.MustCompile(`^(?P<root>gopkg\.in/(?:([a-z0-9][-a-z0-9]+)/)?((?:v0|v[1-9][0-9]*)(?:\.0|\.[1-9][0-9]*){0,2}(-unstable)?)/([a-zA-Z][-a-zA-Z0-9]*)(?:\.git)?)((?:/[a-zA-Z][-a-zA-Z0-9]*)*)$`)
+	bbRegex = regexp.MustCompile(`^(?P<root>bitbucket\.org(?P<bitname>/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	//lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net/([A-Za-z0-9-._]+)(/[A-Za-z0-9-._]+)?)(/.+)?`)
+	lpRegex = regexp.MustCompile(`^(?P<root>launchpad\.net(/[A-Za-z0-9-._]+))((?:/[A-Za-z0-9_.\-]+)*)?`)
+	//glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net/([A-Za-z0-9_.\-]+)|~[A-Za-z0-9_.\-]+/(\+git|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+)$`)
+	glpRegex = regexp.MustCompile(`^(?P<root>git\.launchpad\.net(/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	//gcRegex      = regexp.MustCompile(`^(?P<root>code\.google\.com/[pr]/(?P<project>[a-z0-9\-]+)(\.(?P<subrepo>[a-z0-9\-]+))?)(/[A-Za-z0-9_.\-]+)*$`)
+	jazzRegex         = regexp.MustCompile(`^(?P<root>hub\.jazz\.net(/git/[a-z0-9]+/[A-Za-z0-9_.\-]+))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	apacheRegex       = regexp.MustCompile(`^(?P<root>git\.apache\.org(/[a-z0-9_.\-]+\.git))((?:/[A-Za-z0-9_.\-]+)*)$`)
+	vcsExtensionRegex = regexp.MustCompile(`^(?P<root>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?/[A-Za-z0-9_.\-/~]*?\.(?P<vcs>bzr|git|hg|svn))((?:/[A-Za-z0-9_.\-]+)*)$`)
+)
+
+// Other helper regexes
+var (
+	scpSyntaxRe = regexp.MustCompile(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`)
+	pathvld     = regexp.MustCompile(`^([A-Za-z0-9-]+)(\.[A-Za-z0-9-]+)+(/[A-Za-z0-9-_.~]+)*$`)
+)
+
+func pathDeducerTrie() deducerTrie {
+	dxt := newDeducerTrie()
+
+	dxt.Insert("github.com/", githubDeducer{regexp: ghRegex})
+	dxt.Insert("gopkg.in/", gopkginDeducer{regexp: gpinNewRegex})
+	dxt.Insert("bitbucket.org/", bitbucketDeducer{regexp: bbRegex})
+	dxt.Insert("launchpad.net/", launchpadDeducer{regexp: lpRegex})
+	dxt.Insert("git.launchpad.net/", launchpadGitDeducer{regexp: glpRegex})
+	dxt.Insert("hub.jazz.net/", jazzDeducer{regexp: jazzRegex})
+	dxt.Insert("git.apache.org/", apacheDeducer{regexp: apacheRegex})
+
+	return dxt
+}
+
+type pathDeducer interface {
+	deduceRoot(string) (string, error)
+	deduceSource(string, *url.URL) (maybeSource, error)
+}
+
+type githubDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m githubDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on github.com", path)
+	}
+
+	return "github.com" + v[2], nil
+}
+
+func (m githubDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on github.com", path)
+	}
+
+	u.Host = "github.com"
+	u.Path = v[2]
+
+	if u.Scheme == "ssh" && u.User != nil && u.User.Username() != "git" {
+		return nil, fmt.Errorf("github ssh must be accessed via the 'git' user; %s was provided", u.User.Username())
+	} else if u.Scheme != "" {
+		if !validateVCSScheme(u.Scheme, "git") {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme)
+		}
+		if u.Scheme == "ssh" {
+			u.User = url.User("git")
+		}
+		return maybeGitSource{url: u}, nil
+	}
+
+	mb := make(maybeSources, len(gitSchemes))
+	for k, scheme := range gitSchemes {
+		u2 := *u
+		if scheme == "ssh" {
+			u2.User = url.User("git")
+		}
+		u2.Scheme = scheme
+		mb[k] = maybeGitSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type bitbucketDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m bitbucketDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on bitbucket.org", path)
+	}
+
+	return "bitbucket.org" + v[2], nil
+}
+
+func (m bitbucketDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on bitbucket.org", path)
+	}
+
+	u.Host = "bitbucket.org"
+	u.Path = v[2]
+
+	// This isn't definitive, but it'll probably catch most
+	isgit := strings.HasSuffix(u.Path, ".git") || (u.User != nil && u.User.Username() == "git")
+	ishg := strings.HasSuffix(u.Path, ".hg") || (u.User != nil && u.User.Username() == "hg")
+
+	// TODO(sdboyer) resolve scm ambiguity if needed by querying bitbucket's REST API
+	if u.Scheme != "" {
+		validgit, validhg := validateVCSScheme(u.Scheme, "git"), validateVCSScheme(u.Scheme, "hg")
+		if isgit {
+			if !validgit {
+				// This is unreachable for now, as the git schemes are a
+				// superset of the hg schemes
+				return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme)
+			}
+			return maybeGitSource{url: u}, nil
+		} else if ishg {
+			if !validhg {
+				return nil, fmt.Errorf("%s is not a valid scheme for accessing an hg repository", u.Scheme)
+			}
+			return maybeHgSource{url: u}, nil
+		} else if !validgit && !validhg {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing either a git or hg repository", u.Scheme)
+		}
+
+		// No other choice, make an option for both git and hg
+		return maybeSources{
+			maybeHgSource{url: u},
+			maybeGitSource{url: u},
+		}, nil
+	}
+
+	mb := make(maybeSources, 0)
+	// git is probably more common, even on bitbucket. however, bitbucket
+	// appears to fail _extremely_ slowly on git pings (ls-remote) when the
+	// underlying repository is actually an hg repository, so it's better
+	// to try hg first.
+	if !isgit {
+		for _, scheme := range hgSchemes {
+			u2 := *u
+			if scheme == "ssh" {
+				u2.User = url.User("hg")
+			}
+			u2.Scheme = scheme
+			mb = append(mb, maybeHgSource{url: &u2})
+		}
+	}
+
+	if !ishg {
+		for _, scheme := range gitSchemes {
+			u2 := *u
+			if scheme == "ssh" {
+				u2.User = url.User("git")
+			}
+			u2.Scheme = scheme
+			mb = append(mb, maybeGitSource{url: &u2})
+		}
+	}
+
+	return mb, nil
+}
+
+type gopkginDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m gopkginDeducer) deduceRoot(p string) (string, error) {
+	v, err := m.parseAndValidatePath(p)
+	if err != nil {
+		return "", err
+	}
+
+	return v[1], nil
+}
+
+func (m gopkginDeducer) parseAndValidatePath(p string) ([]string, error) {
+	v := m.regexp.FindStringSubmatch(p)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on gopkg.in", p)
+	}
+
+	// We duplicate some logic from the gopkg.in server in order to validate the
+	// import path string without having to make a network request
+	if strings.Contains(v[4], ".") {
+		return nil, fmt.Errorf("%s is not a valid import path; gopkg.in only allows major versions (%q instead of %q)",
+			p, v[4][:strings.Index(v[4], ".")], v[4])
+	}
+
+	return v, nil
+}
+
+func (m gopkginDeducer) deduceSource(p string, u *url.URL) (maybeSource, error) {
+	// Reuse root detection logic for initial validation
+	v, err := m.parseAndValidatePath(p)
+	if err != nil {
+		return nil, err
+	}
+
+	// Putting a scheme on gopkg.in would be really weird, disallow it
+	if u.Scheme != "" {
+		return nil, fmt.Errorf("Specifying alternate schemes on gopkg.in imports is not permitted")
+	}
+
+	// gopkg.in is always backed by github
+	u.Host = "github.com"
+	if v[2] == "" {
+		elem := v[3][1:]
+		u.Path = path.Join("/go-"+elem, elem)
+	} else {
+		u.Path = path.Join(v[2], v[3])
+	}
+
+	mb := make(maybeSources, len(gitSchemes))
+	for k, scheme := range gitSchemes {
+		u2 := *u
+		if scheme == "ssh" {
+			u2.User = url.User("git")
+		}
+		u2.Scheme = scheme
+		mb[k] = maybeGitSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type launchpadDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m launchpadDeducer) deduceRoot(path string) (string, error) {
+	// TODO(sdboyer) lp handling is nasty - there's ambiguities which can only really
+	// be resolved with a metadata request. See https://github.com/golang/go/issues/11436
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on launchpad.net", path)
+	}
+
+	return "launchpad.net" + v[2], nil
+}
+
+func (m launchpadDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on launchpad.net", path)
+	}
+
+	u.Host = "launchpad.net"
+	u.Path = v[2]
+
+	if u.Scheme != "" {
+		if !validateVCSScheme(u.Scheme, "bzr") {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing a bzr repository", u.Scheme)
+		}
+		return maybeBzrSource{url: u}, nil
+	}
+
+	mb := make(maybeSources, len(bzrSchemes))
+	for k, scheme := range bzrSchemes {
+		u2 := *u
+		u2.Scheme = scheme
+		mb[k] = maybeBzrSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type launchpadGitDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m launchpadGitDeducer) deduceRoot(path string) (string, error) {
+	// TODO(sdboyer) same ambiguity issues as with normal bzr lp
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on git.launchpad.net", path)
+	}
+
+	return "git.launchpad.net" + v[2], nil
+}
+
+func (m launchpadGitDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on git.launchpad.net", path)
+	}
+
+	u.Host = "git.launchpad.net"
+	u.Path = v[2]
+
+	if u.Scheme != "" {
+		if !validateVCSScheme(u.Scheme, "git") {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme)
+		}
+		return maybeGitSource{url: u}, nil
+	}
+
+	mb := make(maybeSources, len(gitSchemes))
+	for k, scheme := range gitSchemes {
+		u2 := *u
+		u2.Scheme = scheme
+		mb[k] = maybeGitSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type jazzDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m jazzDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on hub.jazz.net", path)
+	}
+
+	return "hub.jazz.net" + v[2], nil
+}
+
+func (m jazzDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on hub.jazz.net", path)
+	}
+
+	u.Host = "hub.jazz.net"
+	u.Path = v[2]
+
+	switch u.Scheme {
+	case "":
+		u.Scheme = "https"
+		fallthrough
+	case "https":
+		return maybeGitSource{url: u}, nil
+	default:
+		return nil, fmt.Errorf("IBM's jazz hub only supports https, %s is not allowed", u.String())
+	}
+}
+
+type apacheDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m apacheDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s is not a valid path for a source on git.apache.org", path)
+	}
+
+	return "git.apache.org" + v[2], nil
+}
+
+func (m apacheDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s is not a valid path for a source on git.apache.org", path)
+	}
+
+	u.Host = "git.apache.org"
+	u.Path = v[2]
+
+	if u.Scheme != "" {
+		if !validateVCSScheme(u.Scheme, "git") {
+			return nil, fmt.Errorf("%s is not a valid scheme for accessing a git repository", u.Scheme)
+		}
+		return maybeGitSource{url: u}, nil
+	}
+
+	mb := make(maybeSources, len(gitSchemes))
+	for k, scheme := range gitSchemes {
+		u2 := *u
+		u2.Scheme = scheme
+		mb[k] = maybeGitSource{url: &u2}
+	}
+
+	return mb, nil
+}
+
+type vcsExtensionDeducer struct {
+	regexp *regexp.Regexp
+}
+
+func (m vcsExtensionDeducer) deduceRoot(path string) (string, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return "", fmt.Errorf("%s contains no vcs extension hints for matching", path)
+	}
+
+	return v[1], nil
+}
+
+func (m vcsExtensionDeducer) deduceSource(path string, u *url.URL) (maybeSource, error) {
+	v := m.regexp.FindStringSubmatch(path)
+	if v == nil {
+		return nil, fmt.Errorf("%s contains no vcs extension hints for matching", path)
+	}
+
+	switch v[4] {
+	case "git", "hg", "bzr":
+		x := strings.SplitN(v[1], "/", 2)
+		// TODO(sdboyer) is this actually correct for bzr?
+		u.Host = x[0]
+		u.Path = "/" + x[1]
+
+		if u.Scheme != "" {
+			if !validateVCSScheme(u.Scheme, v[4]) {
+				return nil, fmt.Errorf("%s is not a valid scheme for accessing %s repositories (path %s)", u.Scheme, v[4], path)
+			}
+
+			switch v[4] {
+			case "git":
+				return maybeGitSource{url: u}, nil
+			case "bzr":
+				return maybeBzrSource{url: u}, nil
+			case "hg":
+				return maybeHgSource{url: u}, nil
+			}
+		}
+
+		var schemes []string
+		var mb maybeSources
+		var f func(k int, u *url.URL)
+
+		switch v[4] {
+		case "git":
+			schemes = gitSchemes
+			f = func(k int, u *url.URL) {
+				mb[k] = maybeGitSource{url: u}
+			}
+		case "bzr":
+			schemes = bzrSchemes
+			f = func(k int, u *url.URL) {
+				mb[k] = maybeBzrSource{url: u}
+			}
+		case "hg":
+			schemes = hgSchemes
+			f = func(k int, u *url.URL) {
+				mb[k] = maybeHgSource{url: u}
+			}
+		}
+
+		mb = make(maybeSources, len(schemes))
+		for k, scheme := range schemes {
+			u2 := *u
+			u2.Scheme = scheme
+			f(k, &u2)
+		}
+
+		return mb, nil
+	default:
+		return nil, fmt.Errorf("unknown repository type: %q", v[4])
+	}
+}
+
+type stringFuture func() (string, error)
+type sourceFuture func() (source, string, error)
+type partialSourceFuture func(string, ProjectAnalyzer) sourceFuture
+
+type deductionFuture struct {
+	// rslow indicates that the root future may be a slow call (that it has to
+	// hit the network for some reason)
+	rslow bool
+	root  stringFuture
+	psf   partialSourceFuture
+}
+
+// deduceFromPath takes an import path and attempts to deduce various
+// metadata about it - what type of source should handle it, and where its
+// "root" is (for vcs repositories, the repository root).
+//
+// The results are wrapped in futures, as most of these operations require at
+// least some network activity to complete. For the first return value, network
+// activity will be triggered when the future is called. For the second,
+// network activity is triggered only when calling the sourceFuture returned
+// from the partialSourceFuture.
+func (sm *SourceMgr) deduceFromPath(path string) (deductionFuture, error) {
+	opath := path
+	u, path, err := normalizeURI(path)
+	if err != nil {
+		return deductionFuture{}, err
+	}
+
+	// Helpers to futurize the results from deducers
+	strfut := func(s string) stringFuture {
+		return func() (string, error) {
+			return s, nil
+		}
+	}
+
+	srcfut := func(mb maybeSource) partialSourceFuture {
+		return func(cachedir string, an ProjectAnalyzer) sourceFuture {
+			var src source
+			var ident string
+			var err error
+
+			c := make(chan struct{}, 1)
+			go func() {
+				defer close(c)
+				src, ident, err = mb.try(cachedir, an)
+			}()
+
+			return func() (source, string, error) {
+				<-c
+				return src, ident, err
+			}
+		}
+	}
+
+	// First, try the root path-based matches
+	if _, mtchi, has := sm.dxt.LongestPrefix(path); has {
+		mtch := mtchi.(pathDeducer)
+		root, err := mtch.deduceRoot(path)
+		if err != nil {
+			return deductionFuture{}, err
+		}
+		mb, err := mtch.deduceSource(path, u)
+		if err != nil {
+			return deductionFuture{}, err
+		}
+
+		return deductionFuture{
+			rslow: false,
+			root:  strfut(root),
+			psf:   srcfut(mb),
+		}, nil
+	}
+
+	// Next, try the vcs extension-based (infix) matcher
+	exm := vcsExtensionDeducer{regexp: vcsExtensionRegex}
+	if root, err := exm.deduceRoot(path); err == nil {
+		mb, err := exm.deduceSource(path, u)
+		if err != nil {
+			return deductionFuture{}, err
+		}
+
+		return deductionFuture{
+			rslow: false,
+			root:  strfut(root),
+			psf:   srcfut(mb),
+		}, nil
+	}
+
+	// No luck so far. maybe it's one of them vanity imports?
+	// We have to get a little fancier for the metadata lookup by chaining the
+	// source future onto the metadata future
+
+	// Declare these out here so they're available for the source future
+	var vcs string
+	var ru *url.URL
+
+	// Kick off the vanity metadata fetch
+	var importroot string
+	var futerr error
+	c := make(chan struct{}, 1)
+	go func() {
+		defer close(c)
+		var reporoot string
+		importroot, vcs, reporoot, futerr = parseMetadata(path)
+		if futerr != nil {
+			futerr = fmt.Errorf("unable to deduce repository and source type for: %q", opath)
+			return
+		}
+
+		// If we got something back at all, then it supercedes the actual input for
+		// the real URL to hit
+		ru, futerr = url.Parse(reporoot)
+		if futerr != nil {
+			futerr = fmt.Errorf("server returned bad URL when searching for vanity import: %q", reporoot)
+			importroot = ""
+			return
+		}
+	}()
+
+	// Set up the root func to catch the result
+	root := func() (string, error) {
+		<-c
+		return importroot, futerr
+	}
+
+	src := func(cachedir string, an ProjectAnalyzer) sourceFuture {
+		var src source
+		var ident string
+		var err error
+
+		c := make(chan struct{}, 1)
+		go func() {
+			defer close(c)
+			// make sure the metadata future is finished (without errors), thus
+			// guaranteeing that ru and vcs will be populated
+			_, err := root()
+			if err != nil {
+				return
+			}
+			ident = ru.String()
+
+			var m maybeSource
+			switch vcs {
+			case "git":
+				m = maybeGitSource{url: ru}
+			case "bzr":
+				m = maybeBzrSource{url: ru}
+			case "hg":
+				m = maybeHgSource{url: ru}
+			}
+
+			if m != nil {
+				src, ident, err = m.try(cachedir, an)
+			} else {
+				err = fmt.Errorf("unsupported vcs type %s", vcs)
+			}
+		}()
+
+		return func() (source, string, error) {
+			<-c
+			return src, ident, err
+		}
+	}
+
+	return deductionFuture{
+		rslow: true,
+		root:  root,
+		psf:   src,
+	}, nil
+}
+
+func normalizeURI(p string) (u *url.URL, newpath string, err error) {
+	if m := scpSyntaxRe.FindStringSubmatch(p); m != nil {
+		// Match SCP-like syntax and convert it to a URL.
+		// Eg, "git@github.com:user/repo" becomes
+		// "ssh://git@github.com/user/repo".
+		u = &url.URL{
+			Scheme: "ssh",
+			User:   url.User(m[1]),
+			Host:   m[2],
+			Path:   "/" + m[3],
+			// TODO(sdboyer) This is what stdlib sets; grok why better
+			//RawPath: m[3],
+		}
+	} else {
+		u, err = url.Parse(p)
+		if err != nil {
+			return nil, "", fmt.Errorf("%q is not a valid URI", p)
+		}
+	}
+
+	// If no scheme was passed, then the entire path will have been put into
+	// u.Path. Either way, construct the normalized path correctly.
+	if u.Host == "" {
+		newpath = p
+	} else {
+		newpath = path.Join(u.Host, u.Path)
+	}
+
+	if !pathvld.MatchString(newpath) {
+		return nil, "", fmt.Errorf("%q is not a valid import path", newpath)
+	}
+
+	return
+}
+
+// fetchMetadata fetches the remote metadata for path.
+func fetchMetadata(path string) (rc io.ReadCloser, err error) {
+	defer func() {
+		if err != nil {
+			err = fmt.Errorf("unable to determine remote metadata protocol: %s", err)
+		}
+	}()
+
+	// try https first
+	rc, err = doFetchMetadata("https", path)
+	if err == nil {
+		return
+	}
+
+	rc, err = doFetchMetadata("http", path)
+	return
+}
+
+func doFetchMetadata(scheme, path string) (io.ReadCloser, error) {
+	url := fmt.Sprintf("%s://%s?go-get=1", scheme, path)
+	switch scheme {
+	case "https", "http":
+		resp, err := http.Get(url)
+		if err != nil {
+			return nil, fmt.Errorf("failed to access url %q", url)
+		}
+		return resp.Body, nil
+	default:
+		return nil, fmt.Errorf("unknown remote protocol scheme: %q", scheme)
+	}
+}
+
+// parseMetadata fetches and decodes remote metadata for path.
+func parseMetadata(path string) (string, string, string, error) {
+	rc, err := fetchMetadata(path)
+	if err != nil {
+		return "", "", "", err
+	}
+	defer rc.Close()
+
+	imports, err := parseMetaGoImports(rc)
+	if err != nil {
+		return "", "", "", err
+	}
+	match := -1
+	for i, im := range imports {
+		if !strings.HasPrefix(path, im.Prefix) {
+			continue
+		}
+		if match != -1 {
+			return "", "", "", fmt.Errorf("multiple meta tags match import path %q", path)
+		}
+		match = i
+	}
+	if match == -1 {
+		return "", "", "", fmt.Errorf("go-import metadata not found")
+	}
+	return imports[match].Prefix, imports[match].VCS, imports[match].RepoRoot, nil
+}
diff --git a/vendor/github.com/sdboyer/gps/deduce_test.go b/vendor/github.com/sdboyer/gps/deduce_test.go
new file mode 100644
index 0000000..23ffe38
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/deduce_test.go
@@ -0,0 +1,619 @@
+package gps
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"net/url"
+	"reflect"
+	"sync"
+	"testing"
+)
+
+type pathDeductionFixture struct {
+	in     string
+	root   string
+	rerr   error
+	mb     maybeSource
+	srcerr error
+}
+
+// helper func to generate testing *url.URLs, panicking on err
+func mkurl(s string) (u *url.URL) {
+	var err error
+	u, err = url.Parse(s)
+	if err != nil {
+		panic(fmt.Sprint("string is not a valid URL:", s))
+	}
+	return
+}
+
+var pathDeductionFixtures = map[string][]pathDeductionFixture{
+	"github": []pathDeductionFixture{
+		{
+			in:   "github.com/sdboyer/gps",
+			root: "github.com/sdboyer/gps",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			in:   "github.com/sdboyer/gps/foo",
+			root: "github.com/sdboyer/gps",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			// TODO(sdboyer) is this a problem for enforcing uniqueness? do we
+			// need to collapse these extensions?
+			in:   "github.com/sdboyer/gps.git/foo",
+			root: "github.com/sdboyer/gps.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps.git")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps.git")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps.git")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps.git")},
+			},
+		},
+		{
+			in:   "git@github.com:sdboyer/gps",
+			root: "github.com/sdboyer/gps",
+			mb:   maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+		},
+		{
+			in:   "https://github.com/sdboyer/gps",
+			root: "github.com/sdboyer/gps",
+			mb:   maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+		},
+		{
+			in:   "https://github.com/sdboyer/gps/foo/bar",
+			root: "github.com/sdboyer/gps",
+			mb:   maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+		},
+		// some invalid github username patterns
+		{
+			in:   "github.com/-sdboyer/gps/foo",
+			rerr: errors.New("github.com/-sdboyer/gps/foo is not a valid path for a source on github.com"),
+		},
+		{
+			in:   "github.com/sdboyer-/gps/foo",
+			rerr: errors.New("github.com/sdboyer-/gps/foo is not a valid path for a source on github.com"),
+		},
+		{
+			in:   "github.com/sdbo.yer/gps/foo",
+			rerr: errors.New("github.com/sdbo.yer/gps/foo is not a valid path for a source on github.com"),
+		},
+		{
+			in:   "github.com/sdbo_yer/gps/foo",
+			rerr: errors.New("github.com/sdbo_yer/gps/foo is not a valid path for a source on github.com"),
+		},
+		// Regression - gh does allow two-letter usernames
+		{
+			in:   "github.com/kr/pretty",
+			root: "github.com/kr/pretty",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/kr/pretty")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/kr/pretty")},
+				maybeGitSource{url: mkurl("git://github.com/kr/pretty")},
+				maybeGitSource{url: mkurl("http://github.com/kr/pretty")},
+			},
+		},
+	},
+	"gopkg.in": []pathDeductionFixture{
+		{
+			in:   "gopkg.in/sdboyer/gps.v0",
+			root: "gopkg.in/sdboyer/gps.v0",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			in:   "gopkg.in/sdboyer/gps.v0/foo",
+			root: "gopkg.in/sdboyer/gps.v0",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			in:   "gopkg.in/sdboyer/gps.v1/foo/bar",
+			root: "gopkg.in/sdboyer/gps.v1",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("git://github.com/sdboyer/gps")},
+				maybeGitSource{url: mkurl("http://github.com/sdboyer/gps")},
+			},
+		},
+		{
+			in:   "gopkg.in/yaml.v1",
+			root: "gopkg.in/yaml.v1",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("git://github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("http://github.com/go-yaml/yaml")},
+			},
+		},
+		{
+			in:   "gopkg.in/yaml.v1/foo/bar",
+			root: "gopkg.in/yaml.v1",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("git://github.com/go-yaml/yaml")},
+				maybeGitSource{url: mkurl("http://github.com/go-yaml/yaml")},
+			},
+		},
+		{
+			in:   "gopkg.in/inf.v0",
+			root: "gopkg.in/inf.v0",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://github.com/go-inf/inf")},
+				maybeGitSource{url: mkurl("ssh://git@github.com/go-inf/inf")},
+				maybeGitSource{url: mkurl("git://github.com/go-inf/inf")},
+				maybeGitSource{url: mkurl("http://github.com/go-inf/inf")},
+			},
+		},
+		{
+			// gopkg.in only allows specifying major version in import path
+			in:   "gopkg.in/yaml.v1.2",
+			rerr: errors.New("gopkg.in/yaml.v1.2 is not a valid import path; gopkg.in only allows major versions (\"v1\" instead of \"v1.2\")"),
+		},
+	},
+	"jazz": []pathDeductionFixture{
+		// IBM hub devops services - fixtures borrowed from go get
+		{
+			in:   "hub.jazz.net/git/user1/pkgname",
+			root: "hub.jazz.net/git/user1/pkgname",
+			mb:   maybeGitSource{url: mkurl("https://hub.jazz.net/git/user1/pkgname")},
+		},
+		{
+			in:   "hub.jazz.net/git/user1/pkgname/submodule/submodule/submodule",
+			root: "hub.jazz.net/git/user1/pkgname",
+			mb:   maybeGitSource{url: mkurl("https://hub.jazz.net/git/user1/pkgname")},
+		},
+		{
+			in:   "hub.jazz.net/someotherprefix",
+			rerr: errors.New("hub.jazz.net/someotherprefix is not a valid path for a source on hub.jazz.net"),
+		},
+		{
+			in:   "hub.jazz.net/someotherprefix/user1/packagename",
+			rerr: errors.New("hub.jazz.net/someotherprefix/user1/packagename is not a valid path for a source on hub.jazz.net"),
+		},
+		// Spaces are not valid in user names or package names
+		{
+			in:   "hub.jazz.net/git/User 1/pkgname",
+			rerr: errors.New("hub.jazz.net/git/User 1/pkgname is not a valid path for a source on hub.jazz.net"),
+		},
+		{
+			in:   "hub.jazz.net/git/user1/pkg name",
+			rerr: errors.New("hub.jazz.net/git/user1/pkg name is not a valid path for a source on hub.jazz.net"),
+		},
+		// Dots are not valid in user names
+		{
+			in:   "hub.jazz.net/git/user.1/pkgname",
+			rerr: errors.New("hub.jazz.net/git/user.1/pkgname is not a valid path for a source on hub.jazz.net"),
+		},
+		{
+			in:   "hub.jazz.net/git/user1/pkg.name",
+			root: "hub.jazz.net/git/user1/pkg.name",
+			mb:   maybeGitSource{url: mkurl("https://hub.jazz.net/git/user1/pkg.name")},
+		},
+		// User names cannot have uppercase letters
+		{
+			in:   "hub.jazz.net/git/USER/pkgname",
+			rerr: errors.New("hub.jazz.net/git/USER/pkgname is not a valid path for a source on hub.jazz.net"),
+		},
+	},
+	"bitbucket": []pathDeductionFixture{
+		{
+			in:   "bitbucket.org/sdboyer/reporoot",
+			root: "bitbucket.org/sdboyer/reporoot",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot")},
+				maybeHgSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("git://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")},
+			},
+		},
+		{
+			in:   "bitbucket.org/sdboyer/reporoot/foo/bar",
+			root: "bitbucket.org/sdboyer/reporoot",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot")},
+				maybeHgSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("git://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot")},
+			},
+		},
+		{
+			in:   "https://bitbucket.org/sdboyer/reporoot/foo/bar",
+			root: "bitbucket.org/sdboyer/reporoot",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+				maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot")},
+			},
+		},
+		// Less standard behaviors possible due to the hg/git ambiguity
+		{
+			in:   "bitbucket.org/sdboyer/reporoot.git",
+			root: "bitbucket.org/sdboyer/reporoot.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot.git")},
+				maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot.git")},
+				maybeGitSource{url: mkurl("git://bitbucket.org/sdboyer/reporoot.git")},
+				maybeGitSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot.git")},
+			},
+		},
+		{
+			in:   "git@bitbucket.org:sdboyer/reporoot.git",
+			root: "bitbucket.org/sdboyer/reporoot.git",
+			mb:   maybeGitSource{url: mkurl("ssh://git@bitbucket.org/sdboyer/reporoot.git")},
+		},
+		{
+			in:   "bitbucket.org/sdboyer/reporoot.hg",
+			root: "bitbucket.org/sdboyer/reporoot.hg",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://bitbucket.org/sdboyer/reporoot.hg")},
+				maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot.hg")},
+				maybeHgSource{url: mkurl("http://bitbucket.org/sdboyer/reporoot.hg")},
+			},
+		},
+		{
+			in:   "hg@bitbucket.org:sdboyer/reporoot",
+			root: "bitbucket.org/sdboyer/reporoot",
+			mb:   maybeHgSource{url: mkurl("ssh://hg@bitbucket.org/sdboyer/reporoot")},
+		},
+		{
+			in:     "git://bitbucket.org/sdboyer/reporoot.hg",
+			root:   "bitbucket.org/sdboyer/reporoot.hg",
+			srcerr: errors.New("git is not a valid scheme for accessing an hg repository"),
+		},
+	},
+	"launchpad": []pathDeductionFixture{
+		// tests for launchpad, mostly bazaar
+		// TODO(sdboyer) need more tests to deal w/launchpad's oddities
+		{
+			in:   "launchpad.net/govcstestbzrrepo",
+			root: "launchpad.net/govcstestbzrrepo",
+			mb: maybeSources{
+				maybeBzrSource{url: mkurl("https://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("bzr+ssh://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("bzr://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("http://launchpad.net/govcstestbzrrepo")},
+			},
+		},
+		{
+			in:   "launchpad.net/govcstestbzrrepo/foo/bar",
+			root: "launchpad.net/govcstestbzrrepo",
+			mb: maybeSources{
+				maybeBzrSource{url: mkurl("https://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("bzr+ssh://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("bzr://launchpad.net/govcstestbzrrepo")},
+				maybeBzrSource{url: mkurl("http://launchpad.net/govcstestbzrrepo")},
+			},
+		},
+		{
+			in:   "launchpad.net/repo root",
+			rerr: errors.New("launchpad.net/repo root is not a valid path for a source on launchpad.net"),
+		},
+	},
+	"git.launchpad": []pathDeductionFixture{
+		{
+			in:   "git.launchpad.net/reporoot",
+			root: "git.launchpad.net/reporoot",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("ssh://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("git://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("http://git.launchpad.net/reporoot")},
+			},
+		},
+		{
+			in:   "git.launchpad.net/reporoot/foo/bar",
+			root: "git.launchpad.net/reporoot",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("ssh://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("git://git.launchpad.net/reporoot")},
+				maybeGitSource{url: mkurl("http://git.launchpad.net/reporoot")},
+			},
+		},
+		{
+			in:   "git.launchpad.net/repo root",
+			rerr: errors.New("git.launchpad.net/repo root is not a valid path for a source on launchpad.net"),
+		},
+	},
+	"apache": []pathDeductionFixture{
+		{
+			in:   "git.apache.org/package-name.git",
+			root: "git.apache.org/package-name.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("ssh://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("git://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("http://git.apache.org/package-name.git")},
+			},
+		},
+		{
+			in:   "git.apache.org/package-name.git/foo/bar",
+			root: "git.apache.org/package-name.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("ssh://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("git://git.apache.org/package-name.git")},
+				maybeGitSource{url: mkurl("http://git.apache.org/package-name.git")},
+			},
+		},
+	},
+	"vcsext": []pathDeductionFixture{
+		// VCS extension-based syntax
+		{
+			in:   "foobar.com/baz.git",
+			root: "foobar.com/baz.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("ssh://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("git://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("http://foobar.com/baz.git")},
+			},
+		},
+		{
+			in:   "foobar.com/baz.git/extra/path",
+			root: "foobar.com/baz.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("ssh://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("git://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("http://foobar.com/baz.git")},
+			},
+		},
+		{
+			in:   "foobar.com/baz.bzr",
+			root: "foobar.com/baz.bzr",
+			mb: maybeSources{
+				maybeBzrSource{url: mkurl("https://foobar.com/baz.bzr")},
+				maybeBzrSource{url: mkurl("bzr+ssh://foobar.com/baz.bzr")},
+				maybeBzrSource{url: mkurl("bzr://foobar.com/baz.bzr")},
+				maybeBzrSource{url: mkurl("http://foobar.com/baz.bzr")},
+			},
+		},
+		{
+			in:   "foo-bar.com/baz.hg",
+			root: "foo-bar.com/baz.hg",
+			mb: maybeSources{
+				maybeHgSource{url: mkurl("https://foo-bar.com/baz.hg")},
+				maybeHgSource{url: mkurl("ssh://foo-bar.com/baz.hg")},
+				maybeHgSource{url: mkurl("http://foo-bar.com/baz.hg")},
+			},
+		},
+		{
+			in:   "git@foobar.com:baz.git",
+			root: "foobar.com/baz.git",
+			mb:   maybeGitSource{url: mkurl("ssh://git@foobar.com/baz.git")},
+		},
+		{
+			in:   "bzr+ssh://foobar.com/baz.bzr",
+			root: "foobar.com/baz.bzr",
+			mb:   maybeBzrSource{url: mkurl("bzr+ssh://foobar.com/baz.bzr")},
+		},
+		{
+			in:   "ssh://foobar.com/baz.bzr",
+			root: "foobar.com/baz.bzr",
+			mb:   maybeBzrSource{url: mkurl("ssh://foobar.com/baz.bzr")},
+		},
+		{
+			in:   "https://foobar.com/baz.hg",
+			root: "foobar.com/baz.hg",
+			mb:   maybeHgSource{url: mkurl("https://foobar.com/baz.hg")},
+		},
+		{
+			in:     "git://foobar.com/baz.hg",
+			root:   "foobar.com/baz.hg",
+			srcerr: errors.New("git is not a valid scheme for accessing hg repositories (path foobar.com/baz.hg)"),
+		},
+		// who knows why anyone would do this, but having a second vcs ext
+		// shouldn't throw us off - only the first one counts
+		{
+			in:   "foobar.com/baz.git/quark/quizzle.bzr/quorum",
+			root: "foobar.com/baz.git",
+			mb: maybeSources{
+				maybeGitSource{url: mkurl("https://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("ssh://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("git://foobar.com/baz.git")},
+				maybeGitSource{url: mkurl("http://foobar.com/baz.git")},
+			},
+		},
+	},
+	"vanity": []pathDeductionFixture{
+		// Vanity imports
+		{
+			in:   "golang.org/x/exp",
+			root: "golang.org/x/exp",
+			mb:   maybeGitSource{url: mkurl("https://go.googlesource.com/exp")},
+		},
+		{
+			in:   "golang.org/x/exp/inotify",
+			root: "golang.org/x/exp",
+			mb:   maybeGitSource{url: mkurl("https://go.googlesource.com/exp")},
+		},
+		{
+			in:   "rsc.io/pdf",
+			root: "rsc.io/pdf",
+			mb:   maybeGitSource{url: mkurl("https://github.com/rsc/pdf")},
+		},
+	},
+}
+
+func TestDeduceFromPath(t *testing.T) {
+	for typ, fixtures := range pathDeductionFixtures {
+		var deducer pathDeducer
+		switch typ {
+		case "github":
+			deducer = githubDeducer{regexp: ghRegex}
+		case "gopkg.in":
+			deducer = gopkginDeducer{regexp: gpinNewRegex}
+		case "jazz":
+			deducer = jazzDeducer{regexp: jazzRegex}
+		case "bitbucket":
+			deducer = bitbucketDeducer{regexp: bbRegex}
+		case "launchpad":
+			deducer = launchpadDeducer{regexp: lpRegex}
+		case "git.launchpad":
+			deducer = launchpadGitDeducer{regexp: glpRegex}
+		case "apache":
+			deducer = apacheDeducer{regexp: apacheRegex}
+		case "vcsext":
+			deducer = vcsExtensionDeducer{regexp: vcsExtensionRegex}
+		default:
+			// Should just be the vanity imports, which we do elsewhere
+			continue
+		}
+
+		var printmb func(mb maybeSource) string
+		printmb = func(mb maybeSource) string {
+			switch tmb := mb.(type) {
+			case maybeSources:
+				var buf bytes.Buffer
+				fmt.Fprintf(&buf, "%v maybeSources:", len(tmb))
+				for _, elem := range tmb {
+					fmt.Fprintf(&buf, "\n\t\t%s", printmb(elem))
+				}
+				return buf.String()
+			case maybeGitSource:
+				return fmt.Sprintf("%T: %s", tmb, ufmt(tmb.url))
+			case maybeBzrSource:
+				return fmt.Sprintf("%T: %s", tmb, ufmt(tmb.url))
+			case maybeHgSource:
+				return fmt.Sprintf("%T: %s", tmb, ufmt(tmb.url))
+			default:
+				t.Errorf("Unknown maybeSource type: %T", mb)
+				t.FailNow()
+			}
+			return ""
+		}
+
+		for _, fix := range fixtures {
+			u, in, uerr := normalizeURI(fix.in)
+			if uerr != nil {
+				if fix.rerr == nil {
+					t.Errorf("(in: %s) bad input URI %s", fix.in, uerr)
+				}
+				continue
+			}
+
+			root, rerr := deducer.deduceRoot(in)
+			if fix.rerr != nil {
+				if rerr == nil {
+					t.Errorf("(in: %s, %T) Expected error on deducing root, got none:\n\t(WNT) %s", in, deducer, fix.rerr)
+				} else if fix.rerr.Error() != rerr.Error() {
+					t.Errorf("(in: %s, %T) Got unexpected error on deducing root:\n\t(GOT) %s\n\t(WNT) %s", in, deducer, rerr, fix.rerr)
+				}
+			} else if rerr != nil {
+				t.Errorf("(in: %s, %T) Got unexpected error on deducing root:\n\t(GOT) %s", in, deducer, rerr)
+			} else if root != fix.root {
+				t.Errorf("(in: %s, %T) Deducer did not return expected root:\n\t(GOT) %s\n\t(WNT) %s", in, deducer, root, fix.root)
+			}
+
+			mb, mberr := deducer.deduceSource(in, u)
+			if fix.srcerr != nil {
+				if mberr == nil {
+					t.Errorf("(in: %s, %T) Expected error on deducing source, got none:\n\t(WNT) %s", in, deducer, fix.srcerr)
+				} else if fix.srcerr.Error() != mberr.Error() {
+					t.Errorf("(in: %s, %T) Got unexpected error on deducing source:\n\t(GOT) %s\n\t(WNT) %s", in, deducer, mberr, fix.srcerr)
+				}
+			} else if mberr != nil {
+				// don't complain the fix already expected an rerr
+				if fix.rerr == nil {
+					t.Errorf("(in: %s, %T) Got unexpected error on deducing source:\n\t(GOT) %s", in, deducer, mberr)
+				}
+			} else if !reflect.DeepEqual(mb, fix.mb) {
+				if mb == nil {
+					t.Errorf("(in: %s, %T) Deducer returned source maybes, but none expected:\n\t(GOT) (none)\n\t(WNT) %s", in, deducer, printmb(fix.mb))
+				} else if fix.mb == nil {
+					t.Errorf("(in: %s, %T) Deducer returned source maybes, but none expected:\n\t(GOT) %s\n\t(WNT) (none)", in, deducer, printmb(mb))
+				} else {
+					t.Errorf("(in: %s, %T) Deducer did not return expected source:\n\t(GOT) %s\n\t(WNT) %s", in, deducer, printmb(mb), printmb(fix.mb))
+				}
+			}
+		}
+	}
+}
+
+func TestVanityDeduction(t *testing.T) {
+	if testing.Short() {
+		t.Skip("Skipping slow test in short mode")
+	}
+
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	vanities := pathDeductionFixtures["vanity"]
+	wg := &sync.WaitGroup{}
+	wg.Add(len(vanities))
+
+	for _, fix := range vanities {
+		go func(fix pathDeductionFixture) {
+			defer wg.Done()
+			pr, err := sm.DeduceProjectRoot(fix.in)
+			if err != nil {
+				t.Errorf("(in: %s) Unexpected err on deducing project root: %s", fix.in, err)
+				return
+			} else if string(pr) != fix.root {
+				t.Errorf("(in: %s) Deducer did not return expected root:\n\t(GOT) %s\n\t(WNT) %s", fix.in, pr, fix.root)
+			}
+
+			_, srcf, err := sm.deducePathAndProcess(fix.in)
+			if err != nil {
+				t.Errorf("(in: %s) Unexpected err on deducing source: %s", fix.in, err)
+				return
+			}
+
+			_, ident, err := srcf()
+			if err != nil {
+				t.Errorf("(in: %s) Unexpected err on executing source future: %s", fix.in, err)
+				return
+			}
+
+			ustr := fix.mb.(maybeGitSource).url.String()
+			if ident != ustr {
+				t.Errorf("(in: %s) Deduced repo ident does not match fixture:\n\t(GOT) %s\n\t(WNT) %s", fix.in, ident, ustr)
+			}
+		}(fix)
+	}
+
+	wg.Wait()
+}
+
+// borrow from stdlib
+// more useful string for debugging than fmt's struct printer
+func ufmt(u *url.URL) string {
+	var user, pass interface{}
+	if u.User != nil {
+		user = u.User.Username()
+		if p, ok := u.User.Password(); ok {
+			pass = p
+		}
+	}
+	return fmt.Sprintf("host=%q, path=%q, opaque=%q, scheme=%q, user=%#v, pass=%#v, rawpath=%q, rawq=%q, frag=%q",
+		u.Host, u.Path, u.Opaque, u.Scheme, user, pass, u.RawPath, u.RawQuery, u.Fragment)
+}
diff --git a/vendor/github.com/sdboyer/gps/discovery.go b/vendor/github.com/sdboyer/gps/discovery.go
new file mode 100644
index 0000000..8da4a66
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/discovery.go
@@ -0,0 +1,83 @@
+// Copyright 2012 The Go Authors.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gps
+
+// This code is taken from cmd/go/discovery.go; it is the logic go get itself
+// uses to interpret meta imports information.
+
+import (
+	"encoding/xml"
+	"fmt"
+	"io"
+	"strings"
+)
+
+// charsetReader returns a reader for the given charset. Currently
+// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
+// error which is printed by go get, so the user can find why the package
+// wasn't downloaded if the encoding is not supported. Note that, in
+// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
+// greater than 0x7f are not rejected).
+func charsetReader(charset string, input io.Reader) (io.Reader, error) {
+	switch strings.ToLower(charset) {
+	case "ascii":
+		return input, nil
+	default:
+		return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
+	}
+}
+
+type metaImport struct {
+	Prefix, VCS, RepoRoot string
+}
+
+// parseMetaGoImports returns meta imports from the HTML in r.
+// Parsing ends at the end of the <head> section or the beginning of the <body>.
+func parseMetaGoImports(r io.Reader) (imports []metaImport, err error) {
+	d := xml.NewDecoder(r)
+	d.CharsetReader = charsetReader
+	d.Strict = false
+	var t xml.Token
+	for {
+		t, err = d.RawToken()
+		if err != nil {
+			if err == io.EOF || len(imports) > 0 {
+				err = nil
+			}
+			return
+		}
+		if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
+			return
+		}
+		if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
+			return
+		}
+		e, ok := t.(xml.StartElement)
+		if !ok || !strings.EqualFold(e.Name.Local, "meta") {
+			continue
+		}
+		if attrValue(e.Attr, "name") != "go-import" {
+			continue
+		}
+		if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 {
+			imports = append(imports, metaImport{
+				Prefix:   f[0],
+				VCS:      f[1],
+				RepoRoot: f[2],
+			})
+		}
+	}
+}
+
+// attrValue returns the attribute value for the case-insensitive key
+// `name', or the empty string if nothing is found.
+func attrValue(attrs []xml.Attr, name string) string {
+	for _, a := range attrs {
+		if strings.EqualFold(a.Name.Local, name) {
+			return a.Value
+		}
+	}
+	return ""
+}
diff --git a/vendor/github.com/sdboyer/gps/example.go b/vendor/github.com/sdboyer/gps/example.go
new file mode 100644
index 0000000..728439f
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/example.go
@@ -0,0 +1,68 @@
+// +build ignore
+
+package main
+
+import (
+	"go/build"
+	"log"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/Masterminds/semver"
+	"github.com/sdboyer/gps"
+)
+
+// This is probably the simplest possible implementation of gps. It does the
+// substantive work that `go get` does, except:
+//  1. It drops the resulting tree into vendor instead of GOPATH
+//  2. It prefers semver tags (if available) over branches
+//  3. It removes any vendor directories nested within dependencies
+//
+//  This will compile and work...and then blow away any vendor directory present
+//  in the cwd. Be careful!
+func main() {
+	// Operate on the current directory
+	root, _ := os.Getwd()
+	// Assume the current directory is correctly placed on a GOPATH, and derive
+	// the ProjectRoot from it
+	srcprefix := filepath.Join(build.Default.GOPATH, "src") + string(filepath.Separator)
+	importroot := filepath.ToSlash(strings.TrimPrefix(root, srcprefix))
+
+	// Set up params, including tracing
+	params := gps.SolveParameters{
+		RootDir:     root,
+		Trace:       true,
+		TraceLogger: log.New(os.Stdout, "", 0),
+	}
+	params.RootPackageTree, _ = gps.ListPackages(root, importroot)
+
+	// Set up a SourceManager with the NaiveAnalyzer
+	sourcemgr, _ := gps.NewSourceManager(NaiveAnalyzer{}, ".repocache")
+	defer sourcemgr.Release()
+
+	// Prep and run the solver
+	solver, _ := gps.Prepare(params, sourcemgr)
+	solution, err := solver.Solve()
+	if err == nil {
+		// If no failure, blow away the vendor dir and write a new one out,
+		// stripping nested vendor directories as we go.
+		os.RemoveAll(filepath.Join(root, "vendor"))
+		gps.WriteDepTree(filepath.Join(root, "vendor"), solution, sourcemgr, true)
+	}
+}
+
+type NaiveAnalyzer struct{}
+
+// DeriveManifestAndLock gets called when the solver needs manifest/lock data
+// for a particular project (the gps.ProjectRoot parameter) at a particular
+// version. That version will be checked out in a directory rooted at path.
+func (a NaiveAnalyzer) DeriveManifestAndLock(path string, n gps.ProjectRoot) (gps.Manifest, gps.Lock, error) {
+	return nil, nil, nil
+}
+
+// Reports the name and version of the analyzer. This is mostly irrelevant.
+func (a NaiveAnalyzer) Info() (name string, version *semver.Version) {
+	v, _ := semver.NewVersion("v0.0.1")
+	return "example-analyzer", v
+}
diff --git a/vendor/github.com/sdboyer/gps/flags.go b/vendor/github.com/sdboyer/gps/flags.go
new file mode 100644
index 0000000..d9a3a1d
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/flags.go
@@ -0,0 +1,37 @@
+package gps
+
+// sourceExistence values represent the extent to which a project "exists."
+type sourceExistence uint8
+
+const (
+	// ExistsInVendorRoot indicates that a project exists in a vendor directory
+	// at the predictable location based on import path. It does NOT imply, much
+	// less guarantee, any of the following:
+	//   - That the code at the expected location under vendor is at the version
+	//   given in a lock file
+	//   - That the code at the expected location under vendor is from the
+	//   expected upstream project at all
+	//   - That, if this flag is not present, the project does not exist at some
+	//   unexpected/nested location under vendor
+	//   - That the full repository history is available. In fact, the
+	//   assumption should be that if only this flag is on, the full repository
+	//   history is likely not available (locally)
+	//
+	// In short, the information encoded in this flag should not be construed as
+	// exhaustive.
+	existsInVendorRoot sourceExistence = 1 << iota
+
+	// ExistsInCache indicates that a project exists on-disk in the local cache.
+	// It does not guarantee that an upstream exists, thus it cannot imply
+	// that the cache is at all correct - up-to-date, or even of the expected
+	// upstream project repository.
+	//
+	// Additionally, this refers only to the existence of the local repository
+	// itself; it says nothing about the existence or completeness of the
+	// separate metadata cache.
+	existsInCache
+
+	// ExistsUpstream indicates that a project repository was locatable at the
+	// path provided by a project's URI (a base import path).
+	existsUpstream
+)
diff --git a/vendor/github.com/sdboyer/gps/glide.lock b/vendor/github.com/sdboyer/gps/glide.lock
new file mode 100644
index 0000000..ea36f4b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/glide.lock
@@ -0,0 +1,19 @@
+hash: 2252a285ab27944a4d7adcba8dbd03980f59ba652f12db39fa93b927c345593e
+updated: 2016-06-06T22:10:37.696580463-04:00
+imports:
+- name: github.com/armon/go-radix
+  version: 4239b77079c7b5d1243b7b4736304ce8ddb6f0f2
+- name: github.com/hashicorp/go-immutable-radix
+  version: 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
+- name: github.com/hashicorp/golang-lru
+  version: a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+- name: github.com/Masterminds/semver
+  version: 0a2c9fc0eee2c4cbb9526877c4a54da047fdcadd
+  vcs: git
+- name: github.com/Masterminds/vcs
+  version: 7a21de0acff824ccf45f633cc844a19625149c2f
+  vcs: git
+- name: github.com/termie/go-shutil
+  version: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
+  vcs: git
+devImports: []
diff --git a/vendor/github.com/sdboyer/gps/glide.yaml b/vendor/github.com/sdboyer/gps/glide.yaml
new file mode 100644
index 0000000..5e379fa
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/glide.yaml
@@ -0,0 +1,9 @@
+package: github.com/sdboyer/gps
+owners:
+- name: Sam Boyer
+  email: tech@samboyer.org
+dependencies:
+- package: github.com/Masterminds/semver
+  branch: 2.x
+- package: github.com/termie/go-shutil
+  version: bcacb06fecaeec8dc42af03c87c6949f4a05c74c
diff --git a/vendor/github.com/sdboyer/gps/hash.go b/vendor/github.com/sdboyer/gps/hash.go
new file mode 100644
index 0000000..acede5c
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/hash.go
@@ -0,0 +1,92 @@
+package gps
+
+import (
+	"crypto/sha256"
+	"sort"
+)
+
+// HashInputs computes a hash digest of all data in SolveParams and the
+// RootManifest that act as function inputs to Solve().
+//
+// The digest returned from this function is the same as the digest that would
+// be included with a Solve() Result. As such, it's appropriate for comparison
+// against the digest stored in a lock file, generated by a previous Solve(): if
+// the digests match, then manifest and lock are in sync, and a Solve() is
+// unnecessary.
+//
+// (Basically, this is for memoization.)
+func (s *solver) HashInputs() []byte {
+	c, tc := s.rm.DependencyConstraints(), s.rm.TestDependencyConstraints()
+	// Apply overrides to the constraints from the root. Otherwise, the hash
+	// would be computed on the basis of a constraint from root that doesn't
+	// actually affect solving.
+	p := s.ovr.overrideAll(pcSliceToMap(c, tc).asSortedSlice())
+
+	// We have everything we need; now, compute the hash.
+	h := sha256.New()
+	for _, pd := range p {
+		h.Write([]byte(pd.Ident.ProjectRoot))
+		h.Write([]byte(pd.Ident.NetworkName))
+		// FIXME Constraint.String() is a surjective-only transformation - tags
+		// and branches with the same name are written out as the same string.
+		// This could, albeit rarely, result in input collisions when a real
+		// change has occurred.
+		h.Write([]byte(pd.Constraint.String()))
+	}
+
+	// The stdlib and old appengine packages play the same functional role in
+	// solving as ignores. Because they change, albeit quite infrequently, we
+	// have to include them in the hash.
+	h.Write([]byte(stdlibPkgs))
+	h.Write([]byte(appenginePkgs))
+
+	// Write each of the packages, or the errors that were found for a
+	// particular subpath, into the hash.
+	for _, perr := range s.rpt.Packages {
+		if perr.Err != nil {
+			h.Write([]byte(perr.Err.Error()))
+		} else {
+			h.Write([]byte(perr.P.Name))
+			h.Write([]byte(perr.P.CommentPath))
+			h.Write([]byte(perr.P.ImportPath))
+			for _, imp := range perr.P.Imports {
+				h.Write([]byte(imp))
+			}
+			for _, imp := range perr.P.TestImports {
+				h.Write([]byte(imp))
+			}
+		}
+	}
+
+	// Add the package ignores, if any.
+	if len(s.ig) > 0 {
+		// Dump and sort the ignores
+		ig := make([]string, len(s.ig))
+		k := 0
+		for pkg := range s.ig {
+			ig[k] = pkg
+			k++
+		}
+		sort.Strings(ig)
+
+		for _, igp := range ig {
+			h.Write([]byte(igp))
+		}
+	}
+
+	for _, pc := range s.ovr.asSortedSlice() {
+		h.Write([]byte(pc.Ident.ProjectRoot))
+		if pc.Ident.NetworkName != "" {
+			h.Write([]byte(pc.Ident.NetworkName))
+		}
+		if pc.Constraint != nil {
+			h.Write([]byte(pc.Constraint.String()))
+		}
+	}
+
+	an, av := s.b.AnalyzerInfo()
+	h.Write([]byte(an))
+	h.Write([]byte(av.String()))
+
+	return h.Sum(nil)
+}
diff --git a/vendor/github.com/sdboyer/gps/hash_test.go b/vendor/github.com/sdboyer/gps/hash_test.go
new file mode 100644
index 0000000..a257252
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/hash_test.go
@@ -0,0 +1,350 @@
+package gps
+
+import (
+	"bytes"
+	"crypto/sha256"
+	"testing"
+)
+
+func TestHashInputs(t *testing.T) {
+	fix := basicFixtures["shared dependency with overlapping constraints"]
+
+	params := SolveParameters{
+		RootDir:         string(fix.ds[0].n),
+		RootPackageTree: fix.rootTree(),
+		Manifest:        fix.rootmanifest(),
+	}
+
+	s, err := Prepare(params, newdepspecSM(fix.ds, nil))
+	if err != nil {
+		t.Errorf("Unexpected error while prepping solver: %s", err)
+		t.FailNow()
+	}
+
+	dig := s.HashInputs()
+	h := sha256.New()
+
+	elems := []string{
+		"a",
+		"1.0.0",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"root",
+		"a",
+		"b",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct := h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+}
+
+func TestHashInputsIgnores(t *testing.T) {
+	fix := basicFixtures["shared dependency with overlapping constraints"]
+
+	rm := fix.rootmanifest().(simpleRootManifest).dup()
+	rm.ig = map[string]bool{
+		"foo": true,
+		"bar": true,
+	}
+
+	params := SolveParameters{
+		RootDir:         string(fix.ds[0].n),
+		RootPackageTree: fix.rootTree(),
+		Manifest:        rm,
+	}
+
+	s, err := Prepare(params, newdepspecSM(fix.ds, nil))
+	if err != nil {
+		t.Errorf("Unexpected error while prepping solver: %s", err)
+		t.FailNow()
+	}
+
+	dig := s.HashInputs()
+	h := sha256.New()
+
+	elems := []string{
+		"a",
+		"1.0.0",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"bar",
+		"foo",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct := h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+}
+
+func TestHashInputsOverrides(t *testing.T) {
+	fix := basicFixtures["shared dependency with overlapping constraints"]
+
+	rm := fix.rootmanifest().(simpleRootManifest).dup()
+	// First case - override something not in the root, just with network name
+	rm.ovr = map[ProjectRoot]ProjectProperties{
+		"c": ProjectProperties{
+			NetworkName: "car",
+		},
+	}
+	params := SolveParameters{
+		RootDir:         string(fix.ds[0].n),
+		RootPackageTree: fix.rootTree(),
+		Manifest:        rm,
+	}
+
+	s, err := Prepare(params, newdepspecSM(fix.ds, nil))
+	if err != nil {
+		t.Errorf("Unexpected error while prepping solver: %s", err)
+		t.FailNow()
+	}
+
+	dig := s.HashInputs()
+	h := sha256.New()
+
+	elems := []string{
+		"a",
+		"1.0.0",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"c",
+		"car",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct := h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override not in root, just with constraint
+	rm.ovr["d"] = ProjectProperties{
+		Constraint: NewBranch("foobranch"),
+	}
+	dig = s.HashInputs()
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"1.0.0",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override not in root, both constraint and network name
+	rm.ovr["e"] = ProjectProperties{
+		NetworkName: "groucho",
+		Constraint:  NewBranch("plexiglass"),
+	}
+	dig = s.HashInputs()
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"1.0.0",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"e",
+		"groucho",
+		"plexiglass",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override in root, just constraint
+	rm.ovr["a"] = ProjectProperties{
+		Constraint: NewVersion("fluglehorn"),
+	}
+	dig = s.HashInputs()
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"fluglehorn",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"a",
+		"fluglehorn",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"e",
+		"groucho",
+		"plexiglass",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override in root, only network name
+	rm.ovr["a"] = ProjectProperties{
+		NetworkName: "nota",
+	}
+	dig = s.HashInputs()
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"nota",
+		"1.0.0",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"a",
+		"nota",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"e",
+		"groucho",
+		"plexiglass",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+
+	// Override in root, network name and constraint
+	rm.ovr["a"] = ProjectProperties{
+		NetworkName: "nota",
+		Constraint:  NewVersion("fluglehorn"),
+	}
+	dig = s.HashInputs()
+	h = sha256.New()
+
+	elems = []string{
+		"a",
+		"nota",
+		"fluglehorn",
+		"b",
+		"1.0.0",
+		stdlibPkgs,
+		appenginePkgs,
+		"root",
+		"",
+		"root",
+		"a",
+		"b",
+		"a",
+		"nota",
+		"fluglehorn",
+		"c",
+		"car",
+		"d",
+		"foobranch",
+		"e",
+		"groucho",
+		"plexiglass",
+		"depspec-sm-builtin",
+		"1.0.0",
+	}
+	for _, v := range elems {
+		h.Write([]byte(v))
+	}
+	correct = h.Sum(nil)
+
+	if !bytes.Equal(dig, correct) {
+		t.Errorf("Hashes are not equal")
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/header.png b/vendor/github.com/sdboyer/gps/header.png
new file mode 100644
index 0000000..d39bed6
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/header.png
Binary files differ
diff --git a/vendor/github.com/sdboyer/gps/import_mode_go15.go b/vendor/github.com/sdboyer/gps/import_mode_go15.go
new file mode 100644
index 0000000..5ef11c2
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/import_mode_go15.go
@@ -0,0 +1,13 @@
+// +build !go1.6
+
+package gps
+
+import "go/build"
+
+// analysisImportMode returns the import mode used for build.Import() calls for
+// standard package analysis.
+//
+// build.NoVendor was added in go1.6, so we have to omit it here.
+func analysisImportMode() build.ImportMode {
+	return build.ImportComment
+}
diff --git a/vendor/github.com/sdboyer/gps/import_mode_go16.go b/vendor/github.com/sdboyer/gps/import_mode_go16.go
new file mode 100644
index 0000000..edb534a
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/import_mode_go16.go
@@ -0,0 +1,11 @@
+// +build go1.6
+
+package gps
+
+import "go/build"
+
+// analysisImportMode returns the import mode used for build.Import() calls for
+// standard package analysis.
+func analysisImportMode() build.ImportMode {
+	return build.ImportComment | build.IgnoreVendor
+}
diff --git a/vendor/github.com/sdboyer/gps/lock.go b/vendor/github.com/sdboyer/gps/lock.go
new file mode 100644
index 0000000..729d501
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/lock.go
@@ -0,0 +1,170 @@
+package gps
+
+import "sort"
+
+// Lock represents data from a lock file (or however the implementing tool
+// chooses to store it) at a particular version that is relevant to the
+// satisfiability solving process.
+//
+// In general, the information produced by gps on finding a successful
+// solution is all that would be necessary to constitute a lock file, though
+// tools can include whatever other information they want in their storage.
+type Lock interface {
+	// Indicates the version of the solver used to generate this lock data
+	//SolverVersion() string
+
+	// The hash of inputs to gps that resulted in this lock data
+	InputHash() []byte
+
+	// Projects returns the list of LockedProjects contained in the lock data.
+	Projects() []LockedProject
+}
+
+// LockedProject is a single project entry from a lock file. It expresses the
+// project's name, one or both of version and underlying revision, the network
+// URI for accessing it, the path at which it should be placed within a vendor
+// directory, and the packages that are used in it.
+type LockedProject struct {
+	pi   ProjectIdentifier
+	v    UnpairedVersion
+	r    Revision
+	pkgs []string
+}
+
+// SimpleLock is a helper for tools to easily describe lock data when they know
+// that no hash, or other complex information, is available.
+type SimpleLock []LockedProject
+
+var _ Lock = SimpleLock{}
+
+// InputHash always returns an empty string for SimpleLock. This makes it useless
+// as a stable lock to be written to disk, but still useful for some ephemeral
+// purposes.
+func (SimpleLock) InputHash() []byte {
+	return nil
+}
+
+// Projects returns the entire contents of the SimpleLock.
+func (l SimpleLock) Projects() []LockedProject {
+	return l
+}
+
+// NewLockedProject creates a new LockedProject struct with a given name,
+// version, and upstream repository URL.
+//
+// Note that passing a nil version will cause a panic. This is a correctness
+// measure to ensure that the solver is never exposed to a version-less lock
+// entry. Such a case would be meaningless - the solver would have no choice but
+// to simply dismiss that project. By creating a hard failure case via panic
+// instead, we are trying to avoid inflicting the resulting pain on the user by
+// instead forcing a decision on the Analyzer implementation.
+func NewLockedProject(id ProjectIdentifier, v Version, pkgs []string) LockedProject {
+	if v == nil {
+		panic("must provide a non-nil version to create a LockedProject")
+	}
+
+	lp := LockedProject{
+		pi:   id,
+		pkgs: pkgs,
+	}
+
+	switch tv := v.(type) {
+	case Revision:
+		lp.r = tv
+	case branchVersion:
+		lp.v = tv
+	case semVersion:
+		lp.v = tv
+	case plainVersion:
+		lp.v = tv
+	case versionPair:
+		lp.r = tv.r
+		lp.v = tv.v
+	}
+
+	return lp
+}
+
+// Ident returns the identifier describing the project. This includes both the
+// local name (the root name by which the project is referenced in import paths)
+// and the network name, where the upstream source lives.
+func (lp LockedProject) Ident() ProjectIdentifier {
+	return lp.pi
+}
+
+// Version assembles together whatever version and/or revision data is
+// available into a single Version.
+func (lp LockedProject) Version() Version {
+	if lp.r == "" {
+		return lp.v
+	}
+
+	if lp.v == nil {
+		return lp.r
+	}
+
+	return lp.v.Is(lp.r)
+}
+
+func (lp LockedProject) toAtom() atom {
+	pa := atom{
+		id: lp.Ident(),
+	}
+
+	if lp.v == nil {
+		pa.v = lp.r
+	} else if lp.r != "" {
+		pa.v = lp.v.Is(lp.r)
+	} else {
+		pa.v = lp.v
+	}
+
+	return pa
+}
+
+type safeLock struct {
+	h []byte
+	p []LockedProject
+}
+
+func (sl safeLock) InputHash() []byte {
+	return sl.h
+}
+
+func (sl safeLock) Projects() []LockedProject {
+	return sl.p
+}
+
+// prepLock ensures a lock is prepared and safe for use by the solver. This is
+// mostly about defensively ensuring that no outside routine can modify the lock
+// while the solver is in-flight.
+//
+// This is achieved by copying the lock's data into a new safeLock.
+func prepLock(l Lock) Lock {
+	pl := l.Projects()
+
+	rl := safeLock{h: l.InputHash()}
+	copy(rl.p, pl)
+
+	return rl
+}
+
+// SortLockedProjects sorts a slice of LockedProject in alphabetical order by
+// ProjectRoot.
+func SortLockedProjects(lps []LockedProject) {
+	sort.Stable(lpsorter(lps))
+}
+
+type lpsorter []LockedProject
+
+func (lps lpsorter) Swap(i, j int) {
+	lps[i], lps[j] = lps[j], lps[i]
+}
+
+func (lps lpsorter) Len() int {
+	return len(lps)
+}
+
+func (lps lpsorter) Less(i, j int) bool {
+	return lps[i].pi.ProjectRoot < lps[j].pi.ProjectRoot
+}
diff --git a/vendor/github.com/sdboyer/gps/lock_test.go b/vendor/github.com/sdboyer/gps/lock_test.go
new file mode 100644
index 0000000..b580502
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/lock_test.go
@@ -0,0 +1,26 @@
+package gps
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestLockedProjectSorting(t *testing.T) {
+	// version doesn't matter here
+	lps := []LockedProject{
+		NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), nil),
+		NewLockedProject(mkPI("foo"), NewVersion("nada"), nil),
+		NewLockedProject(mkPI("bar"), NewVersion("zip"), nil),
+		NewLockedProject(mkPI("qux"), NewVersion("zilch"), nil),
+	}
+	lps2 := make([]LockedProject, len(lps))
+	copy(lps2, lps)
+
+	SortLockedProjects(lps2)
+
+	// only the two should have switched positions
+	lps[0], lps[2] = lps[2], lps[0]
+	if !reflect.DeepEqual(lps, lps2) {
+		t.Errorf("SortLockedProject did not sort as expected:\n\t(GOT) %s\n\t(WNT) %s", lps2, lps)
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/manager_test.go b/vendor/github.com/sdboyer/gps/manager_test.go
new file mode 100644
index 0000000..0daaef9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/manager_test.go
@@ -0,0 +1,565 @@
+package gps
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"runtime"
+	"sync"
+	"testing"
+
+	"github.com/Masterminds/semver"
+)
+
+var bd string
+
+// An analyzer that passes nothing back, but doesn't error. This is the naive
+// case - no constraints, no lock, and no errors. The SourceMgr will interpret
+// this as open/Any constraints on everything in the import graph.
+type naiveAnalyzer struct{}
+
+func (naiveAnalyzer) DeriveManifestAndLock(string, ProjectRoot) (Manifest, Lock, error) {
+	return nil, nil, nil
+}
+
+func (a naiveAnalyzer) Info() (name string, version *semver.Version) {
+	return "naive-analyzer", sv("v0.0.1")
+}
+
+func sv(s string) *semver.Version {
+	sv, err := semver.NewVersion(s)
+	if err != nil {
+		panic(fmt.Sprintf("Error creating semver from %q: %s", s, err))
+	}
+
+	return sv
+}
+
+func mkNaiveSM(t *testing.T) (*SourceMgr, func()) {
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+		t.FailNow()
+	}
+
+	sm, err := NewSourceManager(naiveAnalyzer{}, cpath)
+	if err != nil {
+		t.Errorf("Unexpected error on SourceManager creation: %s", err)
+		t.FailNow()
+	}
+
+	return sm, func() {
+		sm.Release()
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}
+}
+
+func init() {
+	_, filename, _, _ := runtime.Caller(1)
+	bd = path.Dir(filename)
+}
+
+func TestSourceManagerInit(t *testing.T) {
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	sm, err := NewSourceManager(naiveAnalyzer{}, cpath)
+
+	if err != nil {
+		t.Errorf("Unexpected error on SourceManager creation: %s", err)
+	}
+
+	_, err = NewSourceManager(naiveAnalyzer{}, cpath)
+	if err == nil {
+		t.Errorf("Creating second SourceManager should have failed due to file lock contention")
+	} else if te, ok := err.(CouldNotCreateLockError); !ok {
+		t.Errorf("Should have gotten CouldNotCreateLockError error type, but got %T", te)
+	}
+
+	if _, err = os.Stat(path.Join(cpath, "sm.lock")); err != nil {
+		t.Errorf("Global cache lock file not created correctly")
+	}
+
+	sm.Release()
+	err = removeAll(cpath)
+	if err != nil {
+		t.Errorf("removeAll failed: %s", err)
+	}
+
+	if _, err = os.Stat(path.Join(cpath, "sm.lock")); !os.IsNotExist(err) {
+		t.Errorf("Global cache lock file not cleared correctly on Release()")
+		t.FailNow()
+	}
+
+	// Set another one up at the same spot now, just to be sure
+	sm, err = NewSourceManager(naiveAnalyzer{}, cpath)
+	if err != nil {
+		t.Errorf("Creating a second SourceManager should have succeeded when the first was released, but failed with err %s", err)
+	}
+
+	sm.Release()
+	err = removeAll(cpath)
+	if err != nil {
+		t.Errorf("removeAll failed: %s", err)
+	}
+}
+
+func TestSourceInit(t *testing.T) {
+	// This test is a bit slow, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping project manager init test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+		t.FailNow()
+	}
+
+	sm, err := NewSourceManager(naiveAnalyzer{}, cpath)
+	if err != nil {
+		t.Errorf("Unexpected error on SourceManager creation: %s", err)
+		t.FailNow()
+	}
+
+	defer func() {
+		sm.Release()
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}()
+
+	id := mkPI("github.com/Masterminds/VCSTestRepo").normalize()
+	v, err := sm.ListVersions(id)
+	if err != nil {
+		t.Errorf("Unexpected error during initial project setup/fetching %s", err)
+	}
+
+	if len(v) != 3 {
+		t.Errorf("Expected three version results from the test repo, got %v", len(v))
+	} else {
+		rev := Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")
+		expected := []Version{
+			NewVersion("1.0.0").Is(rev),
+			NewBranch("master").Is(rev),
+			NewBranch("test").Is(rev),
+		}
+
+		// SourceManager itself doesn't guarantee ordering; sort them here so we
+		// can dependably check output
+		SortForUpgrade(v)
+
+		for k, e := range expected {
+			if !v[k].Matches(e) {
+				t.Errorf("Expected version %s in position %v but got %s", e, k, v[k])
+			}
+		}
+
+		if !v[1].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected master branch version to have isDefault flag, but it did not")
+		}
+		if v[2].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected test branch version not to have isDefault flag, but it did")
+		}
+	}
+
+	// Two birds, one stone - make sure the internal ProjectManager vlist cache
+	// works (or at least doesn't not work) by asking for the versions again,
+	// and do it through smcache to ensure its sorting works, as well.
+	smc := &bridge{
+		sm:     sm,
+		vlists: make(map[ProjectIdentifier][]Version),
+		s:      &solver{},
+	}
+
+	v, err = smc.ListVersions(id)
+	if err != nil {
+		t.Errorf("Unexpected error during initial project setup/fetching %s", err)
+	}
+
+	rev := Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")
+	if len(v) != 3 {
+		t.Errorf("Expected three version results from the test repo, got %v", len(v))
+	} else {
+		expected := []Version{
+			NewVersion("1.0.0").Is(rev),
+			NewBranch("master").Is(rev),
+			NewBranch("test").Is(rev),
+		}
+
+		for k, e := range expected {
+			if !v[k].Matches(e) {
+				t.Errorf("Expected version %s in position %v but got %s", e, k, v[k])
+			}
+		}
+
+		if !v[1].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected master branch version to have isDefault flag, but it did not")
+		}
+		if v[2].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected test branch version not to have isDefault flag, but it did")
+		}
+	}
+
+	present, err := smc.RevisionPresentIn(id, rev)
+	if err != nil {
+		t.Errorf("Should have found revision in source, but got err: %s", err)
+	} else if !present {
+		t.Errorf("Should have found revision in source, but did not")
+	}
+
+	// SyncSourceFor will ensure we have everything
+	err = smc.SyncSourceFor(id)
+	if err != nil {
+		t.Errorf("SyncSourceFor failed with unexpected error: %s", err)
+	}
+
+	// Ensure that the appropriate cache dirs and files exist
+	_, err = os.Stat(filepath.Join(cpath, "sources", "https---github.com-Masterminds-VCSTestRepo", ".git"))
+	if err != nil {
+		t.Error("Cache repo does not exist in expected location")
+	}
+
+	_, err = os.Stat(filepath.Join(cpath, "metadata", "github.com", "Masterminds", "VCSTestRepo", "cache.json"))
+	if err != nil {
+		// TODO(sdboyer) disabled until we get caching working
+		//t.Error("Metadata cache json file does not exist in expected location")
+	}
+
+	// Ensure source existence values are what we expect
+	var exists bool
+	exists, err = sm.SourceExists(id)
+	if err != nil {
+		t.Errorf("Error on checking SourceExists: %s", err)
+	}
+	if !exists {
+		t.Error("Source should exist after non-erroring call to ListVersions")
+	}
+}
+
+func TestDefaultBranchAssignment(t *testing.T) {
+	if testing.Short() {
+		t.Skip("Skipping default branch assignment test in short mode")
+	}
+
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	id := mkPI("github.com/sdboyer/test-multibranch")
+	v, err := sm.ListVersions(id)
+	if err != nil {
+		t.Errorf("Unexpected error during initial project setup/fetching %s", err)
+	}
+
+	if len(v) != 3 {
+		t.Errorf("Expected three version results from the test repo, got %v", len(v))
+	} else {
+		brev := Revision("fda020843ac81352004b9dca3fcccdd517600149")
+		mrev := Revision("9f9c3a591773d9b28128309ac7a9a72abcab267d")
+		expected := []Version{
+			NewBranch("branchone").Is(brev),
+			NewBranch("otherbranch").Is(brev),
+			NewBranch("master").Is(mrev),
+		}
+
+		SortForUpgrade(v)
+
+		for k, e := range expected {
+			if !v[k].Matches(e) {
+				t.Errorf("Expected version %s in position %v but got %s", e, k, v[k])
+			}
+		}
+
+		if !v[0].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected branchone branch version to have isDefault flag, but it did not")
+		}
+		if !v[0].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected otherbranch branch version to have isDefault flag, but it did not")
+		}
+		if v[2].(versionPair).v.(branchVersion).isDefault {
+			t.Error("Expected master branch version not to have isDefault flag, but it did")
+		}
+	}
+}
+
+func TestMgrMethodsFailWithBadPath(t *testing.T) {
+	// a symbol will always bork it up
+	bad := mkPI("foo/##&^").normalize()
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	var err error
+	if _, err = sm.SourceExists(bad); err == nil {
+		t.Error("SourceExists() did not error on bad input")
+	}
+	if err = sm.SyncSourceFor(bad); err == nil {
+		t.Error("SyncSourceFor() did not error on bad input")
+	}
+	if _, err = sm.ListVersions(bad); err == nil {
+		t.Error("ListVersions() did not error on bad input")
+	}
+	if _, err = sm.RevisionPresentIn(bad, Revision("")); err == nil {
+		t.Error("RevisionPresentIn() did not error on bad input")
+	}
+	if _, err = sm.ListPackages(bad, nil); err == nil {
+		t.Error("ListPackages() did not error on bad input")
+	}
+	if _, _, err = sm.GetManifestAndLock(bad, nil); err == nil {
+		t.Error("GetManifestAndLock() did not error on bad input")
+	}
+	if err = sm.ExportProject(bad, nil, ""); err == nil {
+		t.Error("ExportProject() did not error on bad input")
+	}
+}
+
+func TestGetSources(t *testing.T) {
+	// This test is a tad slow, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping source setup test in short mode")
+	}
+
+	sm, clean := mkNaiveSM(t)
+
+	pil := []ProjectIdentifier{
+		mkPI("github.com/Masterminds/VCSTestRepo").normalize(),
+		mkPI("bitbucket.org/mattfarina/testhgrepo").normalize(),
+		mkPI("launchpad.net/govcstestbzrrepo").normalize(),
+	}
+
+	wg := &sync.WaitGroup{}
+	wg.Add(3)
+	for _, pi := range pil {
+		go func(lpi ProjectIdentifier) {
+			nn := lpi.netName()
+			src, err := sm.getSourceFor(lpi)
+			if err != nil {
+				t.Errorf("(src %q) unexpected error setting up source: %s", nn, err)
+				return
+			}
+
+			// Re-get the same, make sure they are the same
+			src2, err := sm.getSourceFor(lpi)
+			if err != nil {
+				t.Errorf("(src %q) unexpected error re-getting source: %s", nn, err)
+			} else if src != src2 {
+				t.Errorf("(src %q) first and second sources are not eq", nn)
+			}
+
+			// All of them _should_ select https, so this should work
+			lpi.NetworkName = "https://" + lpi.NetworkName
+			src3, err := sm.getSourceFor(lpi)
+			if err != nil {
+				t.Errorf("(src %q) unexpected error getting explicit https source: %s", nn, err)
+			} else if src != src3 {
+				t.Errorf("(src %q) explicit https source should reuse autodetected https source", nn)
+			}
+
+			// Now put in http, and they should differ
+			lpi.NetworkName = "http://" + string(lpi.ProjectRoot)
+			src4, err := sm.getSourceFor(lpi)
+			if err != nil {
+				t.Errorf("(src %q) unexpected error getting explicit http source: %s", nn, err)
+			} else if src == src4 {
+				t.Errorf("(src %q) explicit http source should create a new src", nn)
+			}
+
+			wg.Done()
+		}(pi)
+	}
+
+	wg.Wait()
+
+	// nine entries (of which three are dupes): for each vcs, raw import path,
+	// the https url, and the http url
+	if len(sm.srcs) != 9 {
+		t.Errorf("Should have nine discrete entries in the srcs map, got %v", len(sm.srcs))
+	}
+	clean()
+}
+
+// Regression test for #32
+func TestGetInfoListVersionsOrdering(t *testing.T) {
+	// This test is quite slow, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping slow test in short mode")
+	}
+
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	// setup done, now do the test
+
+	id := mkPI("github.com/Masterminds/VCSTestRepo").normalize()
+
+	_, _, err := sm.GetManifestAndLock(id, NewVersion("1.0.0"))
+	if err != nil {
+		t.Errorf("Unexpected error from GetInfoAt %s", err)
+	}
+
+	v, err := sm.ListVersions(id)
+	if err != nil {
+		t.Errorf("Unexpected error from ListVersions %s", err)
+	}
+
+	if len(v) != 3 {
+		t.Errorf("Expected three results from ListVersions, got %v", len(v))
+	}
+}
+
+func TestDeduceProjectRoot(t *testing.T) {
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	in := "github.com/sdboyer/gps"
+	pr, err := sm.DeduceProjectRoot(in)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", in, err)
+	}
+	if string(pr) != in {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 1 {
+		t.Errorf("Root path trie should have one element after one deduction, has %v", sm.rootxt.Len())
+	}
+
+	pr, err = sm.DeduceProjectRoot(in)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", in, err)
+	} else if string(pr) != in {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 1 {
+		t.Errorf("Root path trie should still have one element after performing the same deduction twice; has %v", sm.rootxt.Len())
+	}
+
+	// Now do a subpath
+	sub := path.Join(in, "foo")
+	pr, err = sm.DeduceProjectRoot(sub)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", sub, err)
+	} else if string(pr) != in {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 2 {
+		t.Errorf("Root path trie should have two elements, one for root and one for subpath; has %v", sm.rootxt.Len())
+	}
+
+	// Now do a fully different root, but still on github
+	in2 := "github.com/bagel/lox"
+	sub2 := path.Join(in2, "cheese")
+	pr, err = sm.DeduceProjectRoot(sub2)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", sub2, err)
+	} else if string(pr) != in2 {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 4 {
+		t.Errorf("Root path trie should have four elements, one for each unique root and subpath; has %v", sm.rootxt.Len())
+	}
+
+	// Ensure that our prefixes are bounded by path separators
+	in4 := "github.com/bagel/loxx"
+	pr, err = sm.DeduceProjectRoot(in4)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", in4, err)
+	} else if string(pr) != in4 {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 5 {
+		t.Errorf("Root path trie should have five elements, one for each unique root and subpath; has %v", sm.rootxt.Len())
+	}
+
+	// Ensure that vcs extension-based matching comes through
+	in5 := "ffffrrrraaaaaapppppdoesnotresolve.com/baz.git"
+	pr, err = sm.DeduceProjectRoot(in5)
+	if err != nil {
+		t.Errorf("Problem while detecting root of %q %s", in5, err)
+	} else if string(pr) != in5 {
+		t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in)
+	}
+	if sm.rootxt.Len() != 6 {
+		t.Errorf("Root path trie should have six elements, one for each unique root and subpath; has %v", sm.rootxt.Len())
+	}
+}
+
+// Test that the future returned from SourceMgr.deducePathAndProcess() is safe
+// to call concurrently.
+//
+// Obviously, this is just a heuristic; passage does not guarantee correctness
+// (though failure does guarantee incorrectness)
+func TestMultiDeduceThreadsafe(t *testing.T) {
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	in := "github.com/sdboyer/gps"
+	rootf, srcf, err := sm.deducePathAndProcess(in)
+	if err != nil {
+		t.Errorf("Known-good path %q had unexpected basic deduction error: %s", in, err)
+		t.FailNow()
+	}
+
+	cnum := 50
+	wg := &sync.WaitGroup{}
+
+	// Set up channel for everything else to block on
+	c := make(chan struct{}, 1)
+	f := func(rnum int) {
+		defer func() {
+			wg.Done()
+			if e := recover(); e != nil {
+				t.Errorf("goroutine number %v panicked with err: %s", rnum, e)
+			}
+		}()
+		<-c
+		_, err := rootf()
+		if err != nil {
+			t.Errorf("err was non-nil on root detection in goroutine number %v: %s", rnum, err)
+		}
+	}
+
+	for k := range make([]struct{}, cnum) {
+		wg.Add(1)
+		go f(k)
+		runtime.Gosched()
+	}
+	close(c)
+	wg.Wait()
+	if sm.rootxt.Len() != 1 {
+		t.Errorf("Root path trie should have just one element; has %v", sm.rootxt.Len())
+	}
+
+	// repeat for srcf
+	wg2 := &sync.WaitGroup{}
+	c = make(chan struct{}, 1)
+	f = func(rnum int) {
+		defer func() {
+			wg2.Done()
+			if e := recover(); e != nil {
+				t.Errorf("goroutine number %v panicked with err: %s", rnum, e)
+			}
+		}()
+		<-c
+		_, _, err := srcf()
+		if err != nil {
+			t.Errorf("err was non-nil on root detection in goroutine number %v: %s", rnum, err)
+		}
+	}
+
+	for k := range make([]struct{}, cnum) {
+		wg2.Add(1)
+		go f(k)
+		runtime.Gosched()
+	}
+	close(c)
+	wg2.Wait()
+	if len(sm.srcs) != 2 {
+		t.Errorf("Sources map should have just two elements, but has %v", len(sm.srcs))
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/manifest.go b/vendor/github.com/sdboyer/gps/manifest.go
new file mode 100644
index 0000000..ff23ec0
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/manifest.go
@@ -0,0 +1,140 @@
+package gps
+
+// Manifest represents manifest-type data for a project at a particular version.
+// That means dependency constraints, both for normal dependencies and for
+// tests. The constraints expressed in a manifest determine the set of versions that
+// are acceptable to try for a given project.
+//
+// Expressing a constraint in a manifest does not guarantee that a particular
+// dependency will be present. It only guarantees that if packages in the
+// project specified by the dependency are discovered through static analysis of
+// the (transitive) import graph, then they will conform to the constraint.
+//
+// This does entail that manifests can express constraints on projects they do
+// not themselves import. This is by design, but its implications are complex.
+// See the gps docs for more information: https://github.com/sdboyer/gps/wiki
+type Manifest interface {
+	// Returns a list of project-level constraints.
+	DependencyConstraints() []ProjectConstraint
+
+	// Returns a list of constraints applicable to test imports.
+	//
+	// These are applied only when tests are incorporated. Typically, that
+	// will only be for root manifests.
+	TestDependencyConstraints() []ProjectConstraint
+}
+
+// RootManifest extends Manifest to add special controls over solving that are
+// only afforded to the root project.
+type RootManifest interface {
+	Manifest
+
+	// Overrides returns a list of ProjectConstraints that will unconditionally
+	// supercede any ProjectConstraint declarations made in either the root
+	// manifest, or in any dependency's manifest.
+	//
+	// Overrides are a special control afforded only to root manifests. Tool
+	// users should be encouraged to use them only as a last resort; they do not
+	// "play well with others" (that is their express goal), and overreliance on
+	// them can harm the ecosystem as a whole.
+	Overrides() ProjectConstraints
+
+	// IngorePackages returns a set of import paths to ignore. These import
+	// paths can be within the root project, or part of other projects. Ignoring
+	// a package means that both it and its (unique) imports will be disregarded
+	// by all relevant solver operations.
+	IgnorePackages() map[string]bool
+}
+
+// SimpleManifest is a helper for tools to enumerate manifest data. It's
+// generally intended for ephemeral manifests, such as those Analyzers create on
+// the fly for projects with no manifest metadata, or metadata through a foreign
+// tool's idioms.
+type SimpleManifest struct {
+	Deps     []ProjectConstraint
+	TestDeps []ProjectConstraint
+}
+
+var _ Manifest = SimpleManifest{}
+
+// DependencyConstraints returns the project's dependencies.
+func (m SimpleManifest) DependencyConstraints() []ProjectConstraint {
+	return m.Deps
+}
+
+// TestDependencyConstraints returns the project's test dependencies.
+func (m SimpleManifest) TestDependencyConstraints() []ProjectConstraint {
+	return m.TestDeps
+}
+
+// simpleRootManifest exists so that we have a safe value to swap into solver
+// params when a nil Manifest is provided.
+//
+// Also, for tests.
+type simpleRootManifest struct {
+	c   []ProjectConstraint
+	tc  []ProjectConstraint
+	ovr ProjectConstraints
+	ig  map[string]bool
+}
+
+func (m simpleRootManifest) DependencyConstraints() []ProjectConstraint {
+	return m.c
+}
+func (m simpleRootManifest) TestDependencyConstraints() []ProjectConstraint {
+	return m.tc
+}
+func (m simpleRootManifest) Overrides() ProjectConstraints {
+	return m.ovr
+}
+func (m simpleRootManifest) IgnorePackages() map[string]bool {
+	return m.ig
+}
+func (m simpleRootManifest) dup() simpleRootManifest {
+	m2 := simpleRootManifest{
+		c:   make([]ProjectConstraint, len(m.c)),
+		tc:  make([]ProjectConstraint, len(m.tc)),
+		ovr: ProjectConstraints{},
+		ig:  map[string]bool{},
+	}
+
+	copy(m2.c, m.c)
+	copy(m2.tc, m.tc)
+
+	for k, v := range m.ovr {
+		m2.ovr[k] = v
+	}
+	for k, v := range m.ig {
+		m2.ig[k] = v
+	}
+
+	return m2
+}
+
+// prepManifest ensures a manifest is prepared and safe for use by the solver.
+// This is mostly about ensuring that no outside routine can modify the manifest
+// while the solver is in-flight.
+//
+// This is achieved by copying the manifest's data into a new SimpleManifest.
+func prepManifest(m Manifest) Manifest {
+	if m == nil {
+		return SimpleManifest{}
+	}
+
+	deps := m.DependencyConstraints()
+	ddeps := m.TestDependencyConstraints()
+
+	rm := SimpleManifest{
+		Deps:     make([]ProjectConstraint, len(deps)),
+		TestDeps: make([]ProjectConstraint, len(ddeps)),
+	}
+
+	for k, d := range deps {
+		rm.Deps[k] = d
+	}
+	for k, d := range ddeps {
+		rm.TestDeps[k] = d
+	}
+
+	return rm
+}
diff --git a/vendor/github.com/sdboyer/gps/maybe_source.go b/vendor/github.com/sdboyer/gps/maybe_source.go
new file mode 100644
index 0000000..34fd5d5
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/maybe_source.go
@@ -0,0 +1,153 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"net/url"
+	"path/filepath"
+
+	"github.com/Masterminds/vcs"
+)
+
+type maybeSource interface {
+	try(cachedir string, an ProjectAnalyzer) (source, string, error)
+}
+
+type maybeSources []maybeSource
+
+func (mbs maybeSources) try(cachedir string, an ProjectAnalyzer) (source, string, error) {
+	var e sourceFailures
+	for _, mb := range mbs {
+		src, ident, err := mb.try(cachedir, an)
+		if err == nil {
+			return src, ident, nil
+		}
+		e = append(e, sourceSetupFailure{
+			ident: ident,
+			err:   err,
+		})
+	}
+	return nil, "", e
+}
+
+type sourceSetupFailure struct {
+	ident string
+	err   error
+}
+
+func (e sourceSetupFailure) Error() string {
+	return fmt.Sprintf("failed to set up %q, error %s", e.ident, e.err.Error())
+}
+
+type sourceFailures []sourceSetupFailure
+
+func (sf sourceFailures) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "No valid source could be created:\n")
+	for _, e := range sf {
+		fmt.Fprintf(&buf, "\t%s", e.Error())
+	}
+
+	return buf.String()
+}
+
+type maybeGitSource struct {
+	url *url.URL
+}
+
+func (m maybeGitSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) {
+	ustr := m.url.String()
+	path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr))
+	r, err := vcs.NewGitRepo(ustr, path)
+	if err != nil {
+		return nil, "", err
+	}
+
+	src := &gitSource{
+		baseVCSSource: baseVCSSource{
+			an: an,
+			dc: newMetaCache(),
+			crepo: &repo{
+				r:     r,
+				rpath: path,
+			},
+		},
+	}
+
+	src.baseVCSSource.lvfunc = src.listVersions
+
+	_, err = src.listVersions()
+	if err != nil {
+		return nil, "", err
+	}
+
+	return src, ustr, nil
+}
+
+type maybeBzrSource struct {
+	url *url.URL
+}
+
+func (m maybeBzrSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) {
+	ustr := m.url.String()
+	path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr))
+	r, err := vcs.NewBzrRepo(ustr, path)
+	if err != nil {
+		return nil, "", err
+	}
+	if !r.Ping() {
+		return nil, "", fmt.Errorf("Remote repository at %s does not exist, or is inaccessible", ustr)
+	}
+
+	src := &bzrSource{
+		baseVCSSource: baseVCSSource{
+			an: an,
+			dc: newMetaCache(),
+			ex: existence{
+				s: existsUpstream,
+				f: existsUpstream,
+			},
+			crepo: &repo{
+				r:     r,
+				rpath: path,
+			},
+		},
+	}
+	src.baseVCSSource.lvfunc = src.listVersions
+
+	return src, ustr, nil
+}
+
+type maybeHgSource struct {
+	url *url.URL
+}
+
+func (m maybeHgSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) {
+	ustr := m.url.String()
+	path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr))
+	r, err := vcs.NewHgRepo(ustr, path)
+	if err != nil {
+		return nil, "", err
+	}
+	if !r.Ping() {
+		return nil, "", fmt.Errorf("Remote repository at %s does not exist, or is inaccessible", ustr)
+	}
+
+	src := &hgSource{
+		baseVCSSource: baseVCSSource{
+			an: an,
+			dc: newMetaCache(),
+			ex: existence{
+				s: existsUpstream,
+				f: existsUpstream,
+			},
+			crepo: &repo{
+				r:     r,
+				rpath: path,
+			},
+		},
+	}
+	src.baseVCSSource.lvfunc = src.listVersions
+
+	return src, ustr, nil
+}
diff --git a/vendor/github.com/sdboyer/gps/remove_go16.go b/vendor/github.com/sdboyer/gps/remove_go16.go
new file mode 100644
index 0000000..a25ea2f
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/remove_go16.go
@@ -0,0 +1,44 @@
+// +build !go1.7
+
+package gps
+
+import (
+	"os"
+	"path/filepath"
+	"runtime"
+)
+
+// removeAll removes path and any children it contains. It deals correctly with
+// removal on Windows where, prior to Go 1.7, there were issues when files were
+// set to read-only.
+func removeAll(path string) error {
+	// Only need special handling for windows
+	if runtime.GOOS != "windows" {
+		return os.RemoveAll(path)
+	}
+
+	// Simple case: if Remove works, we're done.
+	err := os.Remove(path)
+	if err == nil || os.IsNotExist(err) {
+		return nil
+	}
+
+	// make sure all files are writable so we can delete them
+	err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
+		if err != nil && err != filepath.SkipDir {
+			// walk gave us some error, give it back.
+			return err
+		}
+		mode := info.Mode()
+		if mode|0200 == mode {
+			return nil
+		}
+
+		return os.Chmod(path, mode|0200)
+	})
+	if err != nil {
+		return err
+	}
+
+	return os.Remove(path)
+}
diff --git a/vendor/github.com/sdboyer/gps/remove_go17.go b/vendor/github.com/sdboyer/gps/remove_go17.go
new file mode 100644
index 0000000..59c19a6
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/remove_go17.go
@@ -0,0 +1,11 @@
+// +build go1.7
+
+package gps
+
+import "os"
+
+// go1.7 and later deal with the file perms issue in os.RemoveAll(), so our
+// workaround is no longer necessary.
+func removeAll(path string) error {
+	return os.RemoveAll(path)
+}
diff --git a/vendor/github.com/sdboyer/gps/result.go b/vendor/github.com/sdboyer/gps/result.go
new file mode 100644
index 0000000..d62d06b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/result.go
@@ -0,0 +1,77 @@
+package gps
+
+import (
+	"fmt"
+	"os"
+	"path"
+	"path/filepath"
+)
+
+// A Solution is returned by a solver run. It is mostly just a Lock, with some
+// additional methods that report information about the solve run.
+type Solution interface {
+	Lock
+	Attempts() int
+}
+
+type solution struct {
+	// A list of the projects selected by the solver.
+	p []LockedProject
+
+	// The number of solutions that were attempted
+	att int
+
+	// The hash digest of the input opts
+	hd []byte
+}
+
+// WriteDepTree takes a basedir and a Lock, and exports all the projects
+// listed in the lock to the appropriate target location within the basedir.
+//
+// It requires a SourceManager to do the work, and takes a flag indicating
+// whether or not to strip vendor directories contained in the exported
+// dependencies.
+func WriteDepTree(basedir string, l Lock, sm SourceManager, sv bool) error {
+	if l == nil {
+		return fmt.Errorf("must provide non-nil Lock to WriteDepTree")
+	}
+
+	err := os.MkdirAll(basedir, 0777)
+	if err != nil {
+		return err
+	}
+
+	// TODO(sdboyer) parallelize
+	for _, p := range l.Projects() {
+		to := path.Join(basedir, string(p.Ident().ProjectRoot))
+
+		err := os.MkdirAll(to, 0777)
+		if err != nil {
+			return err
+		}
+
+		err = sm.ExportProject(p.Ident(), p.Version(), to)
+		if err != nil {
+			removeAll(basedir)
+			return fmt.Errorf("error while exporting %s: %s", p.Ident().ProjectRoot, err)
+		}
+		if sv {
+			filepath.Walk(to, stripVendor)
+		}
+		// TODO(sdboyer) dump version metadata file
+	}
+
+	return nil
+}
+
+func (r solution) Projects() []LockedProject {
+	return r.p
+}
+
+func (r solution) Attempts() int {
+	return r.att
+}
+
+func (r solution) InputHash() []byte {
+	return r.hd
+}
diff --git a/vendor/github.com/sdboyer/gps/result_test.go b/vendor/github.com/sdboyer/gps/result_test.go
new file mode 100644
index 0000000..d0fd972
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/result_test.go
@@ -0,0 +1,111 @@
+package gps
+
+import (
+	"os"
+	"path"
+	"testing"
+)
+
+var basicResult solution
+var kub atom
+
+func pi(n string) ProjectIdentifier {
+	return ProjectIdentifier{
+		ProjectRoot: ProjectRoot(n),
+	}
+}
+
+func init() {
+	basicResult = solution{
+		att: 1,
+		p: []LockedProject{
+			pa2lp(atom{
+				id: pi("github.com/sdboyer/testrepo"),
+				v:  NewBranch("master").Is(Revision("4d59fb584b15a94d7401e356d2875c472d76ef45")),
+			}, nil),
+			pa2lp(atom{
+				id: pi("github.com/Masterminds/VCSTestRepo"),
+				v:  NewVersion("1.0.0").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+			}, nil),
+		},
+	}
+
+	// just in case something needs punishing, kubernetes is happy to oblige
+	kub = atom{
+		id: pi("github.com/kubernetes/kubernetes"),
+		v:  NewVersion("1.0.0").Is(Revision("528f879e7d3790ea4287687ef0ab3f2a01cc2718")),
+	}
+}
+
+func TestWriteDepTree(t *testing.T) {
+	// This test is a bit slow, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping dep tree writing test in short mode")
+	}
+
+	r := basicResult
+
+	tmp := path.Join(os.TempDir(), "vsolvtest")
+	os.RemoveAll(tmp)
+
+	sm, clean := mkNaiveSM(t)
+	defer clean()
+
+	// nil lock/result should err immediately
+	err := WriteDepTree(path.Join(tmp, "export"), nil, sm, true)
+	if err == nil {
+		t.Errorf("Should error if nil lock is passed to WriteDepTree")
+	}
+
+	err = WriteDepTree(path.Join(tmp, "export"), r, sm, true)
+	if err != nil {
+		t.Errorf("Unexpected error while creating vendor tree: %s", err)
+	}
+
+	// TODO(sdboyer) add more checks
+}
+
+func BenchmarkCreateVendorTree(b *testing.B) {
+	// We're fs-bound here, so restrict to single parallelism
+	b.SetParallelism(1)
+
+	r := basicResult
+	tmp := path.Join(os.TempDir(), "vsolvtest")
+
+	clean := true
+	sm, err := NewSourceManager(naiveAnalyzer{}, path.Join(tmp, "cache"))
+	if err != nil {
+		b.Errorf("NewSourceManager errored unexpectedly: %q", err)
+		clean = false
+	}
+
+	// Prefetch the projects before timer starts
+	for _, lp := range r.p {
+		err := sm.SyncSourceFor(lp.Ident())
+		if err != nil {
+			b.Errorf("failed getting project info during prefetch: %s", err)
+			clean = false
+		}
+	}
+
+	if clean {
+		b.ResetTimer()
+		b.StopTimer()
+		exp := path.Join(tmp, "export")
+		for i := 0; i < b.N; i++ {
+			// Order the loop this way to make it easy to disable final cleanup, to
+			// ease manual inspection
+			os.RemoveAll(exp)
+			b.StartTimer()
+			err = WriteDepTree(exp, r, sm, true)
+			b.StopTimer()
+			if err != nil {
+				b.Errorf("unexpected error after %v iterations: %s", i, err)
+				break
+			}
+		}
+	}
+
+	sm.Release()
+	os.RemoveAll(tmp) // comment this to leave temp dir behind for inspection
+}
diff --git a/vendor/github.com/sdboyer/gps/satisfy.go b/vendor/github.com/sdboyer/gps/satisfy.go
new file mode 100644
index 0000000..78cffa0
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/satisfy.go
@@ -0,0 +1,279 @@
+package gps
+
+// check performs constraint checks on the provided atom. The set of checks
+// differ slightly depending on whether the atom is pkgonly, or if it's the
+// entire project being added for the first time.
+//
+// The goal is to determine whether selecting the atom would result in a state
+// where all the solver requirements are still satisfied.
+func (s *solver) check(a atomWithPackages, pkgonly bool) error {
+	pa := a.a
+	if nilpa == pa {
+		// This shouldn't be able to happen, but if it does, it unequivocally
+		// indicates a logical bug somewhere, so blowing up is preferable
+		panic("canary - checking version of empty ProjectAtom")
+	}
+
+	// If we're pkgonly, then base atom was already determined to be allowable,
+	// so we can skip the checkAtomAllowable step.
+	if !pkgonly {
+		if err := s.checkAtomAllowable(pa); err != nil {
+			s.traceInfo(err)
+			return err
+		}
+	}
+
+	if err := s.checkRequiredPackagesExist(a); err != nil {
+		s.traceInfo(err)
+		return err
+	}
+
+	deps, err := s.getImportsAndConstraintsOf(a)
+	if err != nil {
+		// An err here would be from the package fetcher; pass it straight back
+		// TODO(sdboyer) can we traceInfo this?
+		return err
+	}
+
+	// TODO(sdboyer) this deps list contains only packages not already selected
+	// from the target atom (assuming one is selected at all). It's fine for
+	// now, but won't be good enough when we get around to doing static
+	// analysis.
+	for _, dep := range deps {
+		if err := s.checkIdentMatches(a, dep); err != nil {
+			s.traceInfo(err)
+			return err
+		}
+		if err := s.checkDepsConstraintsAllowable(a, dep); err != nil {
+			s.traceInfo(err)
+			return err
+		}
+		if err := s.checkDepsDisallowsSelected(a, dep); err != nil {
+			s.traceInfo(err)
+			return err
+		}
+		// TODO(sdboyer) decide how to refactor in order to re-enable this. Checking for
+		// revision existence is important...but kinda obnoxious.
+		//if err := s.checkRevisionExists(a, dep); err != nil {
+		//s.traceInfo(err)
+		//return err
+		//}
+		if err := s.checkPackageImportsFromDepExist(a, dep); err != nil {
+			s.traceInfo(err)
+			return err
+		}
+
+		// TODO(sdboyer) add check that fails if adding this atom would create a loop
+	}
+
+	return nil
+}
+
+// checkAtomAllowable ensures that an atom itself is acceptable with respect to
+// the constraints established by the current solution.
+func (s *solver) checkAtomAllowable(pa atom) error {
+	constraint := s.sel.getConstraint(pa.id)
+	if s.b.matches(pa.id, constraint, pa.v) {
+		return nil
+	}
+	// TODO(sdboyer) collect constraint failure reason (wait...aren't we, below?)
+
+	deps := s.sel.getDependenciesOn(pa.id)
+	var failparent []dependency
+	for _, dep := range deps {
+		if !s.b.matches(pa.id, dep.dep.Constraint, pa.v) {
+			s.fail(dep.depender.id)
+			failparent = append(failparent, dep)
+		}
+	}
+
+	err := &versionNotAllowedFailure{
+		goal:       pa,
+		failparent: failparent,
+		c:          constraint,
+	}
+
+	return err
+}
+
+// checkRequiredPackagesExist ensures that all required packages enumerated by
+// existing dependencies on this atom are actually present in the atom.
+func (s *solver) checkRequiredPackagesExist(a atomWithPackages) error {
+	ptree, err := s.b.ListPackages(a.a.id, a.a.v)
+	if err != nil {
+		// TODO(sdboyer) handle this more gracefully
+		return err
+	}
+
+	deps := s.sel.getDependenciesOn(a.a.id)
+	fp := make(map[string]errDeppers)
+	// We inspect these in a bit of a roundabout way, in order to incrementally
+	// build up the failure we'd return if there is, indeed, a missing package.
+	// TODO(sdboyer) rechecking all of these every time is wasteful. Is there a shortcut?
+	for _, dep := range deps {
+		for _, pkg := range dep.dep.pl {
+			if errdep, seen := fp[pkg]; seen {
+				errdep.deppers = append(errdep.deppers, dep.depender)
+				fp[pkg] = errdep
+			} else {
+				perr, has := ptree.Packages[pkg]
+				if !has || perr.Err != nil {
+					fp[pkg] = errDeppers{
+						err:     perr.Err,
+						deppers: []atom{dep.depender},
+					}
+				}
+			}
+		}
+	}
+
+	if len(fp) > 0 {
+		return &checkeeHasProblemPackagesFailure{
+			goal:    a.a,
+			failpkg: fp,
+		}
+	}
+	return nil
+}
+
+// checkDepsConstraintsAllowable checks that the constraints of an atom on a
+// given dep are valid with respect to existing constraints.
+func (s *solver) checkDepsConstraintsAllowable(a atomWithPackages, cdep completeDep) error {
+	dep := cdep.workingConstraint
+	constraint := s.sel.getConstraint(dep.Ident)
+	// Ensure the constraint expressed by the dep has at least some possible
+	// intersection with the intersection of existing constraints.
+	if s.b.matchesAny(dep.Ident, constraint, dep.Constraint) {
+		return nil
+	}
+
+	siblings := s.sel.getDependenciesOn(dep.Ident)
+	// No admissible versions - visit all siblings and identify the disagreement(s)
+	var failsib []dependency
+	var nofailsib []dependency
+	for _, sibling := range siblings {
+		if !s.b.matchesAny(dep.Ident, sibling.dep.Constraint, dep.Constraint) {
+			s.fail(sibling.depender.id)
+			failsib = append(failsib, sibling)
+		} else {
+			nofailsib = append(nofailsib, sibling)
+		}
+	}
+
+	return &disjointConstraintFailure{
+		goal:      dependency{depender: a.a, dep: cdep},
+		failsib:   failsib,
+		nofailsib: nofailsib,
+		c:         constraint,
+	}
+}
+
+// checkDepsDisallowsSelected ensures that an atom's constraints on a particular
+// dep are not incompatible with the version of that dep that's already been
+// selected.
+func (s *solver) checkDepsDisallowsSelected(a atomWithPackages, cdep completeDep) error {
+	dep := cdep.workingConstraint
+	selected, exists := s.sel.selected(dep.Ident)
+	if exists && !s.b.matches(dep.Ident, dep.Constraint, selected.a.v) {
+		s.fail(dep.Ident)
+
+		return &constraintNotAllowedFailure{
+			goal: dependency{depender: a.a, dep: cdep},
+			v:    selected.a.v,
+		}
+	}
+	return nil
+}
+
+// checkIdentMatches ensures that the LocalName of a dep introduced by an atom,
+// has the same NetworkName as what's already been selected (assuming anything's
+// been selected).
+//
+// In other words, this ensures that the solver never simultaneously selects two
+// identifiers with the same local name, but that disagree about where their
+// network source is.
+func (s *solver) checkIdentMatches(a atomWithPackages, cdep completeDep) error {
+	dep := cdep.workingConstraint
+	if curid, has := s.sel.getIdentFor(dep.Ident.ProjectRoot); has && !curid.equiv(dep.Ident) {
+		deps := s.sel.getDependenciesOn(a.a.id)
+		// Fail all the other deps, as there's no way atom can ever be
+		// compatible with them
+		for _, d := range deps {
+			s.fail(d.depender.id)
+		}
+
+		return &sourceMismatchFailure{
+			shared:   dep.Ident.ProjectRoot,
+			sel:      deps,
+			current:  curid.netName(),
+			mismatch: dep.Ident.netName(),
+			prob:     a.a,
+		}
+	}
+
+	return nil
+}
+
+// checkPackageImportsFromDepExist ensures that, if the dep is already selected,
+// the newly-required set of packages being placed on it exist and are valid.
+func (s *solver) checkPackageImportsFromDepExist(a atomWithPackages, cdep completeDep) error {
+	sel, is := s.sel.selected(cdep.workingConstraint.Ident)
+	if !is {
+		// dep is not already selected; nothing to do
+		return nil
+	}
+
+	ptree, err := s.b.ListPackages(sel.a.id, sel.a.v)
+	if err != nil {
+		// TODO(sdboyer) handle this more gracefully
+		return err
+	}
+
+	e := &depHasProblemPackagesFailure{
+		goal: dependency{
+			depender: a.a,
+			dep:      cdep,
+		},
+		v:    sel.a.v,
+		prob: make(map[string]error),
+	}
+
+	for _, pkg := range cdep.pl {
+		perr, has := ptree.Packages[pkg]
+		if !has || perr.Err != nil {
+			if has {
+				e.prob[pkg] = perr.Err
+			} else {
+				e.prob[pkg] = nil
+			}
+		}
+	}
+
+	if len(e.prob) > 0 {
+		return e
+	}
+	return nil
+}
+
+// checkRevisionExists ensures that if a dependency is constrained by a
+// revision, that that revision actually exists.
+func (s *solver) checkRevisionExists(a atomWithPackages, cdep completeDep) error {
+	r, isrev := cdep.Constraint.(Revision)
+	if !isrev {
+		// Constraint is not a revision; nothing to do
+		return nil
+	}
+
+	present, _ := s.b.RevisionPresentIn(cdep.Ident, r)
+	if present {
+		return nil
+	}
+
+	return &nonexistentRevisionFailure{
+		goal: dependency{
+			depender: a.a,
+			dep:      cdep,
+		},
+		r: r,
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/selection.go b/vendor/github.com/sdboyer/gps/selection.go
new file mode 100644
index 0000000..7f03c51
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/selection.go
@@ -0,0 +1,214 @@
+package gps
+
+type selection struct {
+	projects []selected
+	deps     map[ProjectRoot][]dependency
+	sm       sourceBridge
+}
+
+type selected struct {
+	a     atomWithPackages
+	first bool
+}
+
+func (s *selection) getDependenciesOn(id ProjectIdentifier) []dependency {
+	if deps, exists := s.deps[id.ProjectRoot]; exists {
+		return deps
+	}
+
+	return nil
+}
+
+// getIdentFor returns the ProjectIdentifier (so, the network name) currently in
+// use for the provided ProjectRoot.
+//
+// If no dependencies are present yet that designate a network name for
+// the provided root, this will return an empty ProjectIdentifier and false.
+func (s *selection) getIdentFor(pr ProjectRoot) (ProjectIdentifier, bool) {
+	deps := s.getDependenciesOn(ProjectIdentifier{ProjectRoot: pr})
+	if len(deps) == 0 {
+		return ProjectIdentifier{}, false
+	}
+
+	// For now, at least, the solver maintains (assumes?) the invariant that
+	// whatever is first in the deps list decides the net name to be used.
+	return deps[0].dep.Ident, true
+}
+
+// pushSelection pushes a new atomWithPackages onto the selection stack, along
+// with an indicator as to whether this selection indicates a new project *and*
+// packages, or merely some new packages on a project that was already selected.
+func (s *selection) pushSelection(a atomWithPackages, pkgonly bool) {
+	s.projects = append(s.projects, selected{
+		a:     a,
+		first: !pkgonly,
+	})
+}
+
+// popSelection removes and returns the last atomWithPackages from the selection
+// stack, along with an indication of whether that element was the first from
+// that project - that is, if it represented an addition of both a project and
+// one or more packages to the overall selection.
+func (s *selection) popSelection() (atomWithPackages, bool) {
+	var sel selected
+	sel, s.projects = s.projects[len(s.projects)-1], s.projects[:len(s.projects)-1]
+	return sel.a, sel.first
+}
+
+func (s *selection) pushDep(dep dependency) {
+	s.deps[dep.dep.Ident.ProjectRoot] = append(s.deps[dep.dep.Ident.ProjectRoot], dep)
+}
+
+func (s *selection) popDep(id ProjectIdentifier) (dep dependency) {
+	deps := s.deps[id.ProjectRoot]
+	dep, s.deps[id.ProjectRoot] = deps[len(deps)-1], deps[:len(deps)-1]
+	return dep
+}
+
+func (s *selection) depperCount(id ProjectIdentifier) int {
+	return len(s.deps[id.ProjectRoot])
+}
+
+func (s *selection) setDependenciesOn(id ProjectIdentifier, deps []dependency) {
+	s.deps[id.ProjectRoot] = deps
+}
+
+// Compute a list of the unique packages within the given ProjectIdentifier that
+// have dependers, and the number of dependers they have.
+func (s *selection) getRequiredPackagesIn(id ProjectIdentifier) map[string]int {
+	// TODO(sdboyer) this is horribly inefficient to do on the fly; we need a method to
+	// precompute it on pushing a new dep, and preferably with an immut
+	// structure so that we can pop with zero cost.
+	uniq := make(map[string]int)
+	for _, dep := range s.deps[id.ProjectRoot] {
+		for _, pkg := range dep.dep.pl {
+			if count, has := uniq[pkg]; has {
+				count++
+				uniq[pkg] = count
+			} else {
+				uniq[pkg] = 1
+			}
+		}
+	}
+
+	return uniq
+}
+
+// Compute a list of the unique packages within the given ProjectIdentifier that
+// are currently selected, and the number of times each package has been
+// independently selected.
+func (s *selection) getSelectedPackagesIn(id ProjectIdentifier) map[string]int {
+	// TODO(sdboyer) this is horribly inefficient to do on the fly; we need a method to
+	// precompute it on pushing a new dep, and preferably with an immut
+	// structure so that we can pop with zero cost.
+	uniq := make(map[string]int)
+	for _, p := range s.projects {
+		if p.a.a.id.eq(id) {
+			for _, pkg := range p.a.pl {
+				if count, has := uniq[pkg]; has {
+					count++
+					uniq[pkg] = count
+				} else {
+					uniq[pkg] = 1
+				}
+			}
+		}
+	}
+
+	return uniq
+}
+
+func (s *selection) getConstraint(id ProjectIdentifier) Constraint {
+	deps, exists := s.deps[id.ProjectRoot]
+	if !exists || len(deps) == 0 {
+		return any
+	}
+
+	// TODO(sdboyer) recomputing this sucks and is quite wasteful. Precompute/cache it
+	// on changes to the constraint set, instead.
+
+	// The solver itself is expected to maintain the invariant that all the
+	// constraints kept here collectively admit a non-empty set of versions. We
+	// assume this is the case here while assembling a composite constraint.
+
+	// Start with the open set
+	var ret Constraint = any
+	for _, dep := range deps {
+		ret = s.sm.intersect(id, ret, dep.dep.Constraint)
+	}
+
+	return ret
+}
+
+// selected checks to see if the given ProjectIdentifier has been selected, and
+// if so, returns the corresponding atomWithPackages.
+//
+// It walks the projects selection list from front to back and returns the first
+// match it finds, which means it will always and only return the base selection
+// of the project, without any additional package selections that may or may not
+// have happened later.
+func (s *selection) selected(id ProjectIdentifier) (atomWithPackages, bool) {
+	for _, p := range s.projects {
+		if p.a.a.id.ProjectRoot == id.ProjectRoot {
+			return p.a, true
+		}
+	}
+
+	return atomWithPackages{a: nilpa}, false
+}
+
+type unselected struct {
+	sl  []bimodalIdentifier
+	cmp func(i, j int) bool
+}
+
+func (u unselected) Len() int {
+	return len(u.sl)
+}
+
+func (u unselected) Less(i, j int) bool {
+	return u.cmp(i, j)
+}
+
+func (u unselected) Swap(i, j int) {
+	u.sl[i], u.sl[j] = u.sl[j], u.sl[i]
+}
+
+func (u *unselected) Push(x interface{}) {
+	u.sl = append(u.sl, x.(bimodalIdentifier))
+}
+
+func (u *unselected) Pop() (v interface{}) {
+	v, u.sl = u.sl[len(u.sl)-1], u.sl[:len(u.sl)-1]
+	return v
+}
+
+// remove takes a ProjectIdentifier out of the priority queue, if present.
+//
+// There are, generally, two ways this gets called: to remove the unselected
+// item from the front of the queue while that item is being unselected, and
+// during backtracking, when an item becomes unnecessary because the item that
+// induced it was popped off.
+//
+// The worst case for both of these is O(n), but in practice the first case is
+// be O(1), as we iterate the queue from front to back.
+func (u *unselected) remove(bmi bimodalIdentifier) {
+	for k, pi := range u.sl {
+		if pi.id.eq(bmi.id) {
+			// Simple slice comparison - assume they're both sorted the same
+			for k, pkg := range pi.pl {
+				if bmi.pl[k] != pkg {
+					break
+				}
+			}
+
+			if k == len(u.sl)-1 {
+				// if we're on the last element, just pop, no splice
+				u.sl = u.sl[:len(u.sl)-1]
+			} else {
+				u.sl = append(u.sl[:k], u.sl[k+1:]...)
+			}
+			break
+		}
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/solve_basic_test.go b/vendor/github.com/sdboyer/gps/solve_basic_test.go
new file mode 100644
index 0000000..9fe9780
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solve_basic_test.go
@@ -0,0 +1,1655 @@
+package gps
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+
+	"github.com/Masterminds/semver"
+)
+
+var regfrom = regexp.MustCompile(`^(\w*) from (\w*) ([0-9\.\*]*)`)
+
+// nvSplit splits an "info" string on " " into the pair of name and
+// version/constraint, and returns each individually.
+//
+// This is for narrow use - panics if there are less than two resulting items in
+// the slice.
+func nvSplit(info string) (id ProjectIdentifier, version string) {
+	if strings.Contains(info, " from ") {
+		parts := regfrom.FindStringSubmatch(info)
+		info = parts[1] + " " + parts[3]
+		id.NetworkName = parts[2]
+	}
+
+	s := strings.SplitN(info, " ", 2)
+	if len(s) < 2 {
+		panic(fmt.Sprintf("Malformed name/version info string '%s'", info))
+	}
+
+	id.ProjectRoot, version = ProjectRoot(s[0]), s[1]
+	return
+}
+
+// nvrSplit splits an "info" string on " " into the triplet of name,
+// version/constraint, and revision, and returns each individually.
+//
+// It will work fine if only name and version/constraint are provided.
+//
+// This is for narrow use - panics if there are less than two resulting items in
+// the slice.
+func nvrSplit(info string) (id ProjectIdentifier, version string, revision Revision) {
+	if strings.Contains(info, " from ") {
+		parts := regfrom.FindStringSubmatch(info)
+		info = fmt.Sprintf("%s %s", parts[1], parts[3])
+		id.NetworkName = parts[2]
+	}
+
+	s := strings.SplitN(info, " ", 3)
+	if len(s) < 2 {
+		panic(fmt.Sprintf("Malformed name/version info string '%s'", info))
+	}
+
+	id.ProjectRoot, version = ProjectRoot(s[0]), s[1]
+
+	if len(s) == 3 {
+		revision = Revision(s[2])
+	}
+	return
+}
+
+// mkAtom splits the input string on a space, and uses the first two elements as
+// the project identifier and version, respectively.
+//
+// The version segment may have a leading character indicating the type of
+// version to create:
+//
+//  p: create a "plain" (non-semver) version.
+//  b: create a branch version.
+//  r: create a revision.
+//
+// No prefix is assumed to indicate a semver version.
+//
+// If a third space-delimited element is provided, it will be interepreted as a
+// revision, and used as the underlying version in a PairedVersion. No prefix
+// should be provided in this case. It is an error (and will panic) to try to
+// pass a revision with an underlying revision.
+func mkAtom(info string) atom {
+	// if info is "root", special case it to use the root "version"
+	if info == "root" {
+		return atom{
+			id: ProjectIdentifier{
+				ProjectRoot: ProjectRoot("root"),
+			},
+			v: rootRev,
+		}
+	}
+
+	id, ver, rev := nvrSplit(info)
+
+	var v Version
+	switch ver[0] {
+	case 'r':
+		if rev != "" {
+			panic("Cannot pair a revision with a revision")
+		}
+		v = Revision(ver[1:])
+	case 'p':
+		v = NewVersion(ver[1:])
+	case 'b':
+		v = NewBranch(ver[1:])
+	default:
+		_, err := semver.NewVersion(ver)
+		if err != nil {
+			// don't want to allow bad test data at this level, so just panic
+			panic(fmt.Sprintf("Error when converting '%s' into semver: %s", ver, err))
+		}
+		v = NewVersion(ver)
+	}
+
+	if rev != "" {
+		v = v.(UnpairedVersion).Is(rev)
+	}
+
+	return atom{
+		id: id,
+		v:  v,
+	}
+}
+
+// mkPCstrnt splits the input string on a space, and uses the first two elements
+// as the project identifier and constraint body, respectively.
+//
+// The constraint body may have a leading character indicating the type of
+// version to create:
+//
+//  p: create a "plain" (non-semver) version.
+//  b: create a branch version.
+//  r: create a revision.
+//
+// If no leading character is used, a semver constraint is assumed.
+func mkPCstrnt(info string) ProjectConstraint {
+	id, ver, rev := nvrSplit(info)
+
+	var c Constraint
+	switch ver[0] {
+	case 'r':
+		c = Revision(ver[1:])
+	case 'p':
+		c = NewVersion(ver[1:])
+	case 'b':
+		c = NewBranch(ver[1:])
+	default:
+		// Without one of those leading characters, we know it's a proper semver
+		// expression, so use the other parser that doesn't look for a rev
+		rev = ""
+		id, ver = nvSplit(info)
+		var err error
+		c, err = NewSemverConstraint(ver)
+		if err != nil {
+			// don't want bad test data at this level, so just panic
+			panic(fmt.Sprintf("Error when converting '%s' into semver constraint: %s (full info: %s)", ver, err, info))
+		}
+	}
+
+	// There's no practical reason that a real tool would need to produce a
+	// constraint that's a PairedVersion, but it is a possibility admitted by the
+	// system, so we at least allow for it in our testing harness.
+	if rev != "" {
+		// Of course, this *will* panic if the predicate is a revision or a
+		// semver constraint, neither of which implement UnpairedVersion. This
+		// is as intended, to prevent bad data from entering the system.
+		c = c.(UnpairedVersion).Is(rev)
+	}
+
+	return ProjectConstraint{
+		Ident:      id,
+		Constraint: c,
+	}
+}
+
+// mkCDep composes a completeDep struct from the inputs.
+//
+// The only real work here is passing the initial string to mkPDep. All the
+// other args are taken as package names.
+func mkCDep(pdep string, pl ...string) completeDep {
+	pc := mkPCstrnt(pdep)
+	return completeDep{
+		workingConstraint: workingConstraint{
+			Ident:      pc.Ident,
+			Constraint: pc.Constraint,
+		},
+		pl: pl,
+	}
+}
+
+// A depspec is a fixture representing all the information a SourceManager would
+// ordinarily glean directly from interrogating a repository.
+type depspec struct {
+	n       ProjectRoot
+	v       Version
+	deps    []ProjectConstraint
+	devdeps []ProjectConstraint
+	pkgs    []tpkg
+}
+
+// mkDepspec creates a depspec by processing a series of strings, each of which
+// contains an identiifer and version information.
+//
+// The first string is broken out into the name and version of the package being
+// described - see the docs on mkAtom for details. subsequent strings are
+// interpreted as dep constraints of that dep at that version. See the docs on
+// mkPDep for details.
+//
+// If a string other than the first includes a "(dev) " prefix, it will be
+// treated as a test-only dependency.
+func mkDepspec(pi string, deps ...string) depspec {
+	pa := mkAtom(pi)
+	if string(pa.id.ProjectRoot) != pa.id.NetworkName && pa.id.NetworkName != "" {
+		panic("alternate source on self makes no sense")
+	}
+
+	ds := depspec{
+		n: pa.id.ProjectRoot,
+		v: pa.v,
+	}
+
+	for _, dep := range deps {
+		var sl *[]ProjectConstraint
+		if strings.HasPrefix(dep, "(dev) ") {
+			dep = strings.TrimPrefix(dep, "(dev) ")
+			sl = &ds.devdeps
+		} else {
+			sl = &ds.deps
+		}
+
+		*sl = append(*sl, mkPCstrnt(dep))
+	}
+
+	return ds
+}
+
+func mkDep(atom, pdep string, pl ...string) dependency {
+	return dependency{
+		depender: mkAtom(atom),
+		dep:      mkCDep(pdep, pl...),
+	}
+}
+
+func mkADep(atom, pdep string, c Constraint, pl ...string) dependency {
+	return dependency{
+		depender: mkAtom(atom),
+		dep: completeDep{
+			workingConstraint: workingConstraint{
+				Ident: ProjectIdentifier{
+					ProjectRoot: ProjectRoot(pdep),
+				},
+				Constraint: c,
+			},
+			pl: pl,
+		},
+	}
+}
+
+// mkPI creates a ProjectIdentifier with the ProjectRoot as the provided
+// string, and the NetworkName unset.
+//
+// Call normalize() on the returned value if you need the NetworkName to be be
+// equal to the ProjectRoot.
+func mkPI(root string) ProjectIdentifier {
+	return ProjectIdentifier{
+		ProjectRoot: ProjectRoot(root),
+	}
+}
+
+// mkSVC creates a new semver constraint, panicking if an error is returned.
+func mkSVC(body string) Constraint {
+	c, err := NewSemverConstraint(body)
+	if err != nil {
+		panic(fmt.Sprintf("Error while trying to create semver constraint from %s: %s", body, err.Error()))
+	}
+	return c
+}
+
+// mklock makes a fixLock, suitable to act as a lock file
+func mklock(pairs ...string) fixLock {
+	l := make(fixLock, 0)
+	for _, s := range pairs {
+		pa := mkAtom(s)
+		l = append(l, NewLockedProject(pa.id, pa.v, nil))
+	}
+
+	return l
+}
+
+// mkrevlock makes a fixLock, suitable to act as a lock file, with only a name
+// and a rev
+func mkrevlock(pairs ...string) fixLock {
+	l := make(fixLock, 0)
+	for _, s := range pairs {
+		pa := mkAtom(s)
+		l = append(l, NewLockedProject(pa.id, pa.v.(PairedVersion).Underlying(), nil))
+	}
+
+	return l
+}
+
+// mksolution makes a result set
+func mksolution(pairs ...string) map[ProjectIdentifier]Version {
+	m := make(map[ProjectIdentifier]Version)
+	for _, pair := range pairs {
+		a := mkAtom(pair)
+		m[a.id] = a.v
+	}
+
+	return m
+}
+
+// computeBasicReachMap takes a depspec and computes a reach map which is
+// identical to the explicit depgraph.
+//
+// Using a reachMap here is overkill for what the basic fixtures actually need,
+// but we use it anyway for congruence with the more general cases.
+func computeBasicReachMap(ds []depspec) reachMap {
+	rm := make(reachMap)
+
+	for k, d := range ds {
+		n := string(d.n)
+		lm := map[string][]string{
+			n: nil,
+		}
+		v := d.v
+		if k == 0 {
+			// Put the root in with a nil rev, to accommodate the solver
+			v = nil
+		}
+		rm[pident{n: d.n, v: v}] = lm
+
+		for _, dep := range d.deps {
+			lm[n] = append(lm[n], string(dep.Ident.ProjectRoot))
+		}
+
+		// first is root
+		if k == 0 {
+			for _, dep := range d.devdeps {
+				lm[n] = append(lm[n], string(dep.Ident.ProjectRoot))
+			}
+		}
+	}
+
+	return rm
+}
+
+type pident struct {
+	n ProjectRoot
+	v Version
+}
+
+type specfix interface {
+	name() string
+	rootmanifest() RootManifest
+	rootTree() PackageTree
+	specs() []depspec
+	maxTries() int
+	solution() map[ProjectIdentifier]Version
+	failure() error
+}
+
+// A basicFixture is a declarative test fixture that can cover a wide variety of
+// solver cases. All cases, however, maintain one invariant: package == project.
+// There are no subpackages, and so it is impossible for them to trigger or
+// require bimodal solving.
+//
+// This type is separate from bimodalFixture in part for legacy reasons - many
+// of these were adapted from similar tests in dart's pub lib, where there is no
+// such thing as "bimodal solving".
+//
+// But it's also useful to keep them separate because bimodal solving involves
+// considerably more complexity than simple solving, both in terms of fixture
+// declaration and actual solving mechanics. Thus, we gain a lot of value for
+// contributors and maintainers by keeping comprehension costs relatively low
+// while still covering important cases.
+type basicFixture struct {
+	// name of this fixture datum
+	n string
+	// depspecs. always treat first as root
+	ds []depspec
+	// results; map of name/version pairs
+	r map[ProjectIdentifier]Version
+	// max attempts the solver should need to find solution. 0 means no limit
+	maxAttempts int
+	// Use downgrade instead of default upgrade sorter
+	downgrade bool
+	// lock file simulator, if one's to be used at all
+	l fixLock
+	// solve failure expected, if any
+	fail error
+	// overrides, if any
+	ovr ProjectConstraints
+	// request up/downgrade to all projects
+	changeall bool
+}
+
+func (f basicFixture) name() string {
+	return f.n
+}
+
+func (f basicFixture) specs() []depspec {
+	return f.ds
+}
+
+func (f basicFixture) maxTries() int {
+	return f.maxAttempts
+}
+
+func (f basicFixture) solution() map[ProjectIdentifier]Version {
+	return f.r
+}
+
+func (f basicFixture) rootmanifest() RootManifest {
+	return simpleRootManifest{
+		c:   f.ds[0].deps,
+		tc:  f.ds[0].devdeps,
+		ovr: f.ovr,
+	}
+}
+
+func (f basicFixture) rootTree() PackageTree {
+	var imp, timp []string
+	for _, dep := range f.ds[0].deps {
+		imp = append(imp, string(dep.Ident.ProjectRoot))
+	}
+	for _, dep := range f.ds[0].devdeps {
+		timp = append(timp, string(dep.Ident.ProjectRoot))
+	}
+
+	n := string(f.ds[0].n)
+	pt := PackageTree{
+		ImportRoot: n,
+		Packages: map[string]PackageOrErr{
+			string(n): {
+				P: Package{
+					ImportPath:  n,
+					Name:        n,
+					Imports:     imp,
+					TestImports: timp,
+				},
+			},
+		},
+	}
+
+	return pt
+}
+
+func (f basicFixture) failure() error {
+	return f.fail
+}
+
+// A table of basicFixtures, used in the basic solving test set.
+var basicFixtures = map[string]basicFixture{
+	// basic fixtures
+	"no dependencies": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0"),
+		},
+		r: mksolution(),
+	},
+	"simple dependency tree": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 1.0.0", "aa 1.0.0", "ab 1.0.0"),
+			mkDepspec("aa 1.0.0"),
+			mkDepspec("ab 1.0.0"),
+			mkDepspec("b 1.0.0", "ba 1.0.0", "bb 1.0.0"),
+			mkDepspec("ba 1.0.0"),
+			mkDepspec("bb 1.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"aa 1.0.0",
+			"ab 1.0.0",
+			"b 1.0.0",
+			"ba 1.0.0",
+			"bb 1.0.0",
+		),
+	},
+	"shared dependency with overlapping constraints": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 1.0.0", "shared >=2.0.0, <4.0.0"),
+			mkDepspec("b 1.0.0", "shared >=3.0.0, <5.0.0"),
+			mkDepspec("shared 2.0.0"),
+			mkDepspec("shared 3.0.0"),
+			mkDepspec("shared 3.6.9"),
+			mkDepspec("shared 4.0.0"),
+			mkDepspec("shared 5.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+			"shared 3.6.9",
+		),
+	},
+	"downgrade on overlapping constraints": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 1.0.0", "shared >=2.0.0, <=4.0.0"),
+			mkDepspec("b 1.0.0", "shared >=3.0.0, <5.0.0"),
+			mkDepspec("shared 2.0.0"),
+			mkDepspec("shared 3.0.0"),
+			mkDepspec("shared 3.6.9"),
+			mkDepspec("shared 4.0.0"),
+			mkDepspec("shared 5.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+			"shared 3.0.0",
+		),
+		downgrade: true,
+	},
+	"shared dependency where dependent version in turn affects other dependencies": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo <=1.0.2", "bar 1.0.0"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("foo 1.0.1", "bang 1.0.0"),
+			mkDepspec("foo 1.0.2", "whoop 1.0.0"),
+			mkDepspec("foo 1.0.3", "zoop 1.0.0"),
+			mkDepspec("bar 1.0.0", "foo <=1.0.1"),
+			mkDepspec("bang 1.0.0"),
+			mkDepspec("whoop 1.0.0"),
+			mkDepspec("zoop 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.1",
+			"bar 1.0.0",
+			"bang 1.0.0",
+		),
+	},
+	"removed dependency": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "foo 1.0.0", "bar *"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("foo 2.0.0"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 2.0.0", "baz 1.0.0"),
+			mkDepspec("baz 1.0.0", "foo 2.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+		),
+		maxAttempts: 2,
+	},
+	// fixtures with locks
+	"with compatible locked dependency": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.1",
+			"bar 1.0.1",
+		),
+	},
+	"upgrade through lock": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.2",
+		),
+		changeall: true,
+	},
+	"downgrade through lock": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+		),
+		changeall: true,
+		downgrade: true,
+	},
+	"with incompatible locked dependency": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo >1.0.1"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.2",
+		),
+	},
+	"with unrelated locked dependency": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+			mkDepspec("baz 1.0.0 bazrev"),
+		},
+		l: mklock(
+			"baz 1.0.0 bazrev",
+		),
+		r: mksolution(
+			"foo 1.0.2",
+			"bar 1.0.2",
+		),
+	},
+	"unlocks dependencies if necessary to ensure that a new dependency is satisfied": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "newdep *"),
+			mkDepspec("foo 1.0.0 foorev", "bar <2.0.0"),
+			mkDepspec("bar 1.0.0 barrev", "baz <2.0.0"),
+			mkDepspec("baz 1.0.0 bazrev", "qux <2.0.0"),
+			mkDepspec("qux 1.0.0 quxrev"),
+			mkDepspec("foo 2.0.0", "bar <3.0.0"),
+			mkDepspec("bar 2.0.0", "baz <3.0.0"),
+			mkDepspec("baz 2.0.0", "qux <3.0.0"),
+			mkDepspec("qux 2.0.0"),
+			mkDepspec("newdep 2.0.0", "baz >=1.5.0"),
+		},
+		l: mklock(
+			"foo 1.0.0 foorev",
+			"bar 1.0.0 barrev",
+			"baz 1.0.0 bazrev",
+			"qux 1.0.0 quxrev",
+		),
+		r: mksolution(
+			"foo 2.0.0",
+			"bar 2.0.0",
+			"baz 2.0.0",
+			"qux 1.0.0 quxrev",
+			"newdep 2.0.0",
+		),
+		maxAttempts: 4,
+	},
+	"locked atoms are matched on both local and net name": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0 foorev"),
+			mkDepspec("foo 2.0.0 foorev2"),
+		},
+		l: mklock(
+			"foo from baz 1.0.0 foorev",
+		),
+		r: mksolution(
+			"foo 2.0.0 foorev2",
+		),
+	},
+	"pairs bare revs in lock with versions": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo ~1.0.1"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mkrevlock(
+			"foo 1.0.1 foorev", // mkrevlock drops the 1.0.1
+		),
+		r: mksolution(
+			"foo 1.0.1 foorev",
+			"bar 1.0.1",
+		),
+	},
+	// This fixture describes a situation that should be impossible with a
+	// real-world VCS (contents of dep at same rev are different, as indicated
+	// by different constraints on bar). But, that's not the SUT here, so it's
+	// OK.
+	"pairs bare revs in lock with all versions": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo ~1.0.1"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2 foorev", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mkrevlock(
+			"foo 1.0.1 foorev", // mkrevlock drops the 1.0.1
+		),
+		r: mksolution(
+			"foo 1.0.2 foorev",
+			"bar 1.0.2",
+		),
+	},
+	"does not pair bare revs in manifest with unpaired lock version": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo ~1.0.1"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mkrevlock(
+			"foo 1.0.1 foorev", // mkrevlock drops the 1.0.1
+		),
+		r: mksolution(
+			"foo 1.0.1 foorev",
+			"bar 1.0.1",
+		),
+	},
+	"lock to branch on old rev keeps old rev": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo bmaster"),
+			mkDepspec("foo bmaster newrev"),
+		},
+		l: mklock(
+			"foo bmaster oldrev",
+		),
+		r: mksolution(
+			"foo bmaster oldrev",
+		),
+	},
+	// Whereas this is a normal situation for a branch, when it occurs for a
+	// tag, it means someone's been naughty upstream. Still, though, the outcome
+	// is the same.
+	//
+	// TODO(sdboyer) this needs to generate a warning, once we start doing that
+	"lock to now-moved tag on old rev keeps old rev": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo ptaggerino"),
+			mkDepspec("foo ptaggerino newrev"),
+		},
+		l: mklock(
+			"foo ptaggerino oldrev",
+		),
+		r: mksolution(
+			"foo ptaggerino oldrev",
+		),
+	},
+	"includes root package's dev dependencies": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "(dev) foo 1.0.0", "(dev) bar 1.0.0"),
+			mkDepspec("foo 1.0.0"),
+			mkDepspec("bar 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+		),
+	},
+	"includes dev dependency's transitive dependencies": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "(dev) foo 1.0.0"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("bar 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+		),
+	},
+	"ignores transitive dependency's dev dependencies": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "(dev) foo 1.0.0"),
+			mkDepspec("foo 1.0.0", "(dev) bar 1.0.0"),
+			mkDepspec("bar 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+		),
+	},
+	"no version that matches requirement": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo >=1.0.0, <2.0.0"),
+			mkDepspec("foo 2.0.0"),
+			mkDepspec("foo 2.1.3"),
+		},
+		fail: &noVersionError{
+			pn: mkPI("foo"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("2.1.3"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("foo 2.1.3"),
+						failparent: []dependency{mkDep("root", "foo ^1.0.0", "foo")},
+						c:          mkSVC("^1.0.0"),
+					},
+				},
+				{
+					v: NewVersion("2.0.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("foo 2.0.0"),
+						failparent: []dependency{mkDep("root", "foo ^1.0.0", "foo")},
+						c:          mkSVC("^1.0.0"),
+					},
+				},
+			},
+		},
+	},
+	"no version that matches combined constraint": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.0", "shared >=2.0.0, <3.0.0"),
+			mkDepspec("bar 1.0.0", "shared >=2.9.0, <4.0.0"),
+			mkDepspec("shared 2.5.0"),
+			mkDepspec("shared 3.5.0"),
+		},
+		fail: &noVersionError{
+			pn: mkPI("shared"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("3.5.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("shared 3.5.0"),
+						failparent: []dependency{mkDep("foo 1.0.0", "shared >=2.0.0, <3.0.0", "shared")},
+						c:          mkSVC(">=2.9.0, <3.0.0"),
+					},
+				},
+				{
+					v: NewVersion("2.5.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("shared 2.5.0"),
+						failparent: []dependency{mkDep("bar 1.0.0", "shared >=2.9.0, <4.0.0", "shared")},
+						c:          mkSVC(">=2.9.0, <3.0.0"),
+					},
+				},
+			},
+		},
+	},
+	"disjoint constraints": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.0", "shared <=2.0.0"),
+			mkDepspec("bar 1.0.0", "shared >3.0.0"),
+			mkDepspec("shared 2.0.0"),
+			mkDepspec("shared 4.0.0"),
+		},
+		fail: &noVersionError{
+			pn: mkPI("foo"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &disjointConstraintFailure{
+						goal:      mkDep("foo 1.0.0", "shared <=2.0.0", "shared"),
+						failsib:   []dependency{mkDep("bar 1.0.0", "shared >3.0.0", "shared")},
+						nofailsib: nil,
+						c:         mkSVC(">3.0.0"),
+					},
+				},
+			},
+		},
+	},
+	"no valid solution": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b *"),
+			mkDepspec("a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 2.0.0", "b 2.0.0"),
+			mkDepspec("b 1.0.0", "a 2.0.0"),
+			mkDepspec("b 2.0.0", "a 1.0.0"),
+		},
+		fail: &noVersionError{
+			pn: mkPI("b"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("2.0.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("b 2.0.0"),
+						failparent: []dependency{mkDep("a 1.0.0", "b 1.0.0", "b")},
+						c:          mkSVC("1.0.0"),
+					},
+				},
+				{
+					v: NewVersion("1.0.0"),
+					f: &constraintNotAllowedFailure{
+						goal: mkDep("b 1.0.0", "a 2.0.0", "a"),
+						v:    NewVersion("1.0.0"),
+					},
+				},
+			},
+		},
+	},
+	"no version that matches while backtracking": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b >1.0.0"),
+			mkDepspec("a 1.0.0"),
+			mkDepspec("b 1.0.0"),
+		},
+		fail: &noVersionError{
+			pn: mkPI("b"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("b 1.0.0"),
+						failparent: []dependency{mkDep("root", "b >1.0.0", "b")},
+						c:          mkSVC(">1.0.0"),
+					},
+				},
+			},
+		},
+	},
+	// The latest versions of a and b disagree on c. An older version of either
+	// will resolve the problem. This test validates that b, which is farther
+	// in the dependency graph from myapp is downgraded first.
+	"rolls back leaf versions first": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *"),
+			mkDepspec("a 1.0.0", "b *"),
+			mkDepspec("a 2.0.0", "b *", "c 2.0.0"),
+			mkDepspec("b 1.0.0"),
+			mkDepspec("b 2.0.0", "c 1.0.0"),
+			mkDepspec("c 1.0.0"),
+			mkDepspec("c 2.0.0"),
+		},
+		r: mksolution(
+			"a 2.0.0",
+			"b 1.0.0",
+			"c 2.0.0",
+		),
+		maxAttempts: 2,
+	},
+	// Only one version of baz, so foo and bar will have to downgrade until they
+	// reach it.
+	"mutual downgrading": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"),
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 2.0.0", "bar 2.0.0"),
+			mkDepspec("foo 3.0.0", "bar 3.0.0"),
+			mkDepspec("bar 1.0.0", "baz *"),
+			mkDepspec("bar 2.0.0", "baz 2.0.0"),
+			mkDepspec("bar 3.0.0", "baz 3.0.0"),
+			mkDepspec("baz 1.0.0"),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 1.0.0",
+			"baz 1.0.0",
+		),
+		maxAttempts: 3,
+	},
+	// Ensures the solver doesn't exhaustively search all versions of b when
+	// it's a-2.0.0 whose dependency on c-2.0.0-nonexistent led to the
+	// problem. We make sure b has more versions than a so that the solver
+	// tries a first since it sorts sibling dependencies by number of
+	// versions.
+	"search real failer": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b *"),
+			mkDepspec("a 1.0.0", "c 1.0.0"),
+			mkDepspec("a 2.0.0", "c 2.0.0"),
+			mkDepspec("b 1.0.0"),
+			mkDepspec("b 2.0.0"),
+			mkDepspec("b 3.0.0"),
+			mkDepspec("c 1.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 3.0.0",
+			"c 1.0.0",
+		),
+		maxAttempts: 2,
+	},
+	// Dependencies are ordered so that packages with fewer versions are tried
+	// first. Here, there are two valid solutions (either a or b must be
+	// downgraded once). The chosen one depends on which dep is traversed first.
+	// Since b has fewer versions, it will be traversed first, which means a
+	// will come later. Since later selections are revised first, a gets
+	// downgraded.
+	"traverse into package with fewer versions first": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b *"),
+			mkDepspec("a 1.0.0", "c *"),
+			mkDepspec("a 2.0.0", "c *"),
+			mkDepspec("a 3.0.0", "c *"),
+			mkDepspec("a 4.0.0", "c *"),
+			mkDepspec("a 5.0.0", "c 1.0.0"),
+			mkDepspec("b 1.0.0", "c *"),
+			mkDepspec("b 2.0.0", "c *"),
+			mkDepspec("b 3.0.0", "c *"),
+			mkDepspec("b 4.0.0", "c 2.0.0"),
+			mkDepspec("c 1.0.0"),
+			mkDepspec("c 2.0.0"),
+		},
+		r: mksolution(
+			"a 4.0.0",
+			"b 4.0.0",
+			"c 2.0.0",
+		),
+		maxAttempts: 2,
+	},
+	// This is similar to the preceding fixture. When getting the number of
+	// versions of a package to determine which to traverse first, versions that
+	// are disallowed by the root package's constraints should not be
+	// considered. Here, foo has more versions than bar in total (4), but fewer
+	// that meet myapp"s constraints (only 2). There is no solution, but we will
+	// do less backtracking if foo is tested first.
+	"root constraints pre-eliminate versions": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "bar *"),
+			mkDepspec("foo 1.0.0", "none 2.0.0"),
+			mkDepspec("foo 2.0.0", "none 2.0.0"),
+			mkDepspec("foo 3.0.0", "none 2.0.0"),
+			mkDepspec("foo 4.0.0", "none 2.0.0"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 2.0.0"),
+			mkDepspec("bar 3.0.0"),
+			mkDepspec("none 1.0.0"),
+		},
+		fail: &noVersionError{
+			pn: mkPI("none"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &versionNotAllowedFailure{
+						goal:       mkAtom("none 1.0.0"),
+						failparent: []dependency{mkDep("foo 1.0.0", "none 2.0.0", "none")},
+						c:          mkSVC("2.0.0"),
+					},
+				},
+			},
+		},
+	},
+	// If there"s a disjoint constraint on a package, then selecting other
+	// versions of it is a waste of time: no possible versions can match. We
+	// need to jump past it to the most recent package that affected the
+	// constraint.
+	"backjump past failed package on disjoint constraint": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "foo *"),
+			mkDepspec("a 1.0.0", "foo *"),
+			mkDepspec("a 2.0.0", "foo <1.0.0"),
+			mkDepspec("foo 2.0.0"),
+			mkDepspec("foo 2.0.1"),
+			mkDepspec("foo 2.0.2"),
+			mkDepspec("foo 2.0.3"),
+			mkDepspec("foo 2.0.4"),
+			mkDepspec("none 1.0.0"),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"foo 2.0.4",
+		),
+		maxAttempts: 2,
+	},
+	// Revision enters vqueue if a dep has a constraint on that revision
+	"revision injected into vqueue": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo r123abc"),
+			mkDepspec("foo r123abc"),
+			mkDepspec("foo 1.0.0 foorev"),
+			mkDepspec("foo 2.0.0 foorev2"),
+		},
+		r: mksolution(
+			"foo r123abc",
+		),
+	},
+	// Some basic override checks
+	"override root's own constraint": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *", "b *"),
+			mkDepspec("a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 2.0.0", "b 1.0.0"),
+			mkDepspec("b 1.0.0"),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("a"): ProjectProperties{
+				Constraint: NewVersion("1.0.0"),
+			},
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	"override dep's constraint": {
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "a *"),
+			mkDepspec("a 1.0.0", "b 1.0.0"),
+			mkDepspec("a 2.0.0", "b 1.0.0"),
+			mkDepspec("b 1.0.0"),
+			mkDepspec("b 2.0.0"),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("b"): ProjectProperties{
+				Constraint: NewVersion("2.0.0"),
+			},
+		},
+		r: mksolution(
+			"a 2.0.0",
+			"b 2.0.0",
+		),
+	},
+	"overridden mismatched net addrs, alt in dep, back to default": {
+		ds: []depspec{
+			mkDepspec("root 1.0.0", "foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.0", "bar from baz 1.0.0"),
+			mkDepspec("bar 1.0.0"),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("bar"): ProjectProperties{
+				NetworkName: "bar",
+			},
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar from bar 1.0.0",
+		),
+	},
+
+	// TODO(sdboyer) decide how to refactor the solver in order to re-enable these.
+	// Checking for revision existence is important...but kinda obnoxious.
+	//{
+	//// Solve fails if revision constraint calls for a nonexistent revision
+	//n: "fail on missing revision",
+	//ds: []depspec{
+	//mkDepspec("root 0.0.0", "bar *"),
+	//mkDepspec("bar 1.0.0", "foo r123abc"),
+	//mkDepspec("foo r123nomatch"),
+	//mkDepspec("foo 1.0.0"),
+	//mkDepspec("foo 2.0.0"),
+	//},
+	//errp: []string{"bar", "foo", "bar"},
+	//},
+	//{
+	//// Solve fails if revision constraint calls for a nonexistent revision,
+	//// even if rev constraint is specified by root
+	//n: "fail on missing revision from root",
+	//ds: []depspec{
+	//mkDepspec("root 0.0.0", "foo r123nomatch"),
+	//mkDepspec("foo r123abc"),
+	//mkDepspec("foo 1.0.0"),
+	//mkDepspec("foo 2.0.0"),
+	//},
+	//errp: []string{"foo", "root", "foo"},
+	//},
+
+	// TODO(sdboyer) add fixture that tests proper handling of loops via aliases (where
+	// a project that wouldn't be a loop is aliased to a project that is a loop)
+}
+
+func init() {
+	// This sets up a hundred versions of foo and bar, 0.0.0 through 9.9.0. Each
+	// version of foo depends on a baz with the same major version. Each version
+	// of bar depends on a baz with the same minor version. There is only one
+	// version of baz, 0.0.0, so only older versions of foo and bar will
+	// satisfy it.
+	fix := basicFixture{
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *", "bar *"),
+			mkDepspec("baz 0.0.0"),
+		},
+		r: mksolution(
+			"foo 0.9.0",
+			"bar 9.0.0",
+			"baz 0.0.0",
+		),
+		maxAttempts: 10,
+	}
+
+	for i := 0; i < 10; i++ {
+		for j := 0; j < 10; j++ {
+			fix.ds = append(fix.ds, mkDepspec(fmt.Sprintf("foo %v.%v.0", i, j), fmt.Sprintf("baz %v.0.0", i)))
+			fix.ds = append(fix.ds, mkDepspec(fmt.Sprintf("bar %v.%v.0", i, j), fmt.Sprintf("baz 0.%v.0", j)))
+		}
+	}
+
+	basicFixtures["complex backtrack"] = fix
+
+	for k, fix := range basicFixtures {
+		// Assign the name into the fixture itself
+		fix.n = k
+		basicFixtures[k] = fix
+	}
+}
+
+// reachMaps contain externalReach()-type data for a given depspec fixture's
+// universe of proejcts, packages, and versions.
+type reachMap map[pident]map[string][]string
+
+type depspecSourceManager struct {
+	specs []depspec
+	rm    reachMap
+	ig    map[string]bool
+}
+
+type fixSM interface {
+	SourceManager
+	rootSpec() depspec
+	allSpecs() []depspec
+	ignore() map[string]bool
+}
+
+var _ fixSM = &depspecSourceManager{}
+
+func newdepspecSM(ds []depspec, ignore []string) *depspecSourceManager {
+	ig := make(map[string]bool)
+	if len(ignore) > 0 {
+		for _, pkg := range ignore {
+			ig[pkg] = true
+		}
+	}
+
+	return &depspecSourceManager{
+		specs: ds,
+		rm:    computeBasicReachMap(ds),
+		ig:    ig,
+	}
+}
+
+func (sm *depspecSourceManager) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
+	// If the input version is a PairedVersion, look only at its top version,
+	// not the underlying. This is generally consistent with the idea that, for
+	// this class of lookup, the rev probably DOES exist, but upstream changed
+	// it (typically a branch). For the purposes of tests, then, that's an OK
+	// scenario, because otherwise we'd have to enumerate all the revs in the
+	// fixture declarations, which would screw up other things.
+	if pv, ok := v.(PairedVersion); ok {
+		v = pv.Unpair()
+	}
+
+	for _, ds := range sm.specs {
+		if id.netName() == string(ds.n) && v.Matches(ds.v) {
+			return ds, dummyLock{}, nil
+		}
+	}
+
+	// TODO(sdboyer) proper solver-type errors
+	return nil, nil, fmt.Errorf("Project %s at version %s could not be found", id.errString(), v)
+}
+
+func (sm *depspecSourceManager) AnalyzerInfo() (string, *semver.Version) {
+	return "depspec-sm-builtin", sv("v1.0.0")
+}
+
+func (sm *depspecSourceManager) ExternalReach(id ProjectIdentifier, v Version) (map[string][]string, error) {
+	pid := pident{n: ProjectRoot(id.netName()), v: v}
+	if m, exists := sm.rm[pid]; exists {
+		return m, nil
+	}
+	return nil, fmt.Errorf("No reach data for %s at version %s", id.errString(), v)
+}
+
+func (sm *depspecSourceManager) ListExternal(id ProjectIdentifier, v Version) ([]string, error) {
+	// This should only be called for the root
+	pid := pident{n: ProjectRoot(id.netName()), v: v}
+	if r, exists := sm.rm[pid]; exists {
+		return r[string(id.ProjectRoot)], nil
+	}
+	return nil, fmt.Errorf("No reach data for %s at version %s", id.errString(), v)
+}
+
+func (sm *depspecSourceManager) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	pid := pident{n: ProjectRoot(id.netName()), v: v}
+
+	if r, exists := sm.rm[pid]; exists {
+		return PackageTree{
+			ImportRoot: string(pid.n),
+			Packages: map[string]PackageOrErr{
+				string(pid.n): {
+					P: Package{
+						ImportPath: string(pid.n),
+						Name:       string(pid.n),
+						Imports:    r[string(pid.n)],
+					},
+				},
+			},
+		}, nil
+	}
+
+	// if incoming version was paired, walk the map and search for a match on
+	// top-only version
+	if pv, ok := v.(PairedVersion); ok {
+		uv := pv.Unpair()
+		for pid, r := range sm.rm {
+			if uv.Matches(pid.v) {
+				return PackageTree{
+					ImportRoot: string(pid.n),
+					Packages: map[string]PackageOrErr{
+						string(pid.n): {
+							P: Package{
+								ImportPath: string(pid.n),
+								Name:       string(pid.n),
+								Imports:    r[string(pid.n)],
+							},
+						},
+					},
+				}, nil
+			}
+		}
+	}
+
+	return PackageTree{}, fmt.Errorf("Project %s at version %s could not be found", pid.n, v)
+}
+
+func (sm *depspecSourceManager) ListVersions(id ProjectIdentifier) (pi []Version, err error) {
+	for _, ds := range sm.specs {
+		// To simulate the behavior of the real SourceManager, we do not return
+		// revisions from ListVersions().
+		if _, isrev := ds.v.(Revision); !isrev && id.netName() == string(ds.n) {
+			pi = append(pi, ds.v)
+		}
+	}
+
+	if len(pi) == 0 {
+		err = fmt.Errorf("Project %s could not be found", id.errString())
+	}
+
+	return
+}
+
+func (sm *depspecSourceManager) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
+	for _, ds := range sm.specs {
+		if id.netName() == string(ds.n) && r == ds.v {
+			return true, nil
+		}
+	}
+
+	return false, fmt.Errorf("Project %s has no revision %s", id.errString(), r)
+}
+
+func (sm *depspecSourceManager) SourceExists(id ProjectIdentifier) (bool, error) {
+	for _, ds := range sm.specs {
+		if id.netName() == string(ds.n) {
+			return true, nil
+		}
+	}
+
+	return false, nil
+}
+
+func (sm *depspecSourceManager) SyncSourceFor(id ProjectIdentifier) error {
+	// Ignore err because it can't happen
+	if exist, _ := sm.SourceExists(id); !exist {
+		return fmt.Errorf("Source %s does not exist", id.errString())
+	}
+	return nil
+}
+
+func (sm *depspecSourceManager) Release() {}
+
+func (sm *depspecSourceManager) ExportProject(id ProjectIdentifier, v Version, to string) error {
+	return fmt.Errorf("dummy sm doesn't support exporting")
+}
+
+func (sm *depspecSourceManager) DeduceProjectRoot(ip string) (ProjectRoot, error) {
+	for _, ds := range sm.allSpecs() {
+		n := string(ds.n)
+		if ip == n || strings.HasPrefix(ip, n+"/") {
+			return ProjectRoot(n), nil
+		}
+	}
+	return "", fmt.Errorf("Could not find %s, or any parent, in list of known fixtures", ip)
+}
+
+func (sm *depspecSourceManager) rootSpec() depspec {
+	return sm.specs[0]
+}
+
+func (sm *depspecSourceManager) allSpecs() []depspec {
+	return sm.specs
+}
+
+func (sm *depspecSourceManager) ignore() map[string]bool {
+	return sm.ig
+}
+
+type depspecBridge struct {
+	*bridge
+}
+
+// override verifyRoot() on bridge to prevent any filesystem interaction
+func (b *depspecBridge) verifyRootDir(path string) error {
+	root := b.sm.(fixSM).rootSpec()
+	if string(root.n) != path {
+		return fmt.Errorf("Expected only root project %q to verifyRootDir(), got %q", root.n, path)
+	}
+
+	return nil
+}
+
+func (b *depspecBridge) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	return b.sm.(fixSM).ListPackages(id, v)
+}
+
+func (sm *depspecBridge) vendorCodeExists(id ProjectIdentifier) (bool, error) {
+	return false, nil
+}
+
+// enforce interfaces
+var _ Manifest = depspec{}
+var _ Lock = dummyLock{}
+var _ Lock = fixLock{}
+
+// impl Spec interface
+func (ds depspec) DependencyConstraints() []ProjectConstraint {
+	return ds.deps
+}
+
+// impl Spec interface
+func (ds depspec) TestDependencyConstraints() []ProjectConstraint {
+	return ds.devdeps
+}
+
+type fixLock []LockedProject
+
+func (fixLock) SolverVersion() string {
+	return "-1"
+}
+
+// impl Lock interface
+func (fixLock) InputHash() []byte {
+	return []byte("fooooorooooofooorooofoo")
+}
+
+// impl Lock interface
+func (l fixLock) Projects() []LockedProject {
+	return l
+}
+
+type dummyLock struct{}
+
+// impl Lock interface
+func (dummyLock) SolverVersion() string {
+	return "-1"
+}
+
+// impl Lock interface
+func (dummyLock) InputHash() []byte {
+	return []byte("fooooorooooofooorooofoo")
+}
+
+// impl Lock interface
+func (dummyLock) Projects() []LockedProject {
+	return nil
+}
+
+// We've borrowed this bestiary from pub's tests:
+// https://github.com/dart-lang/pub/blob/master/test/version_solver_test.dart
+
+// TODO(sdboyer) finish converting all of these
+
+/*
+func basicGraph() {
+  testResolve("circular dependency", {
+    "myapp 1.0.0": {
+      "foo": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "bar": "1.0.0"
+    },
+    "bar 1.0.0": {
+      "foo": "1.0.0"
+    }
+  }, result: {
+    "myapp from root": "1.0.0",
+    "foo": "1.0.0",
+    "bar": "1.0.0"
+  });
+
+}
+
+func withLockFile() {
+
+}
+
+func rootDependency() {
+  testResolve("with root source", {
+    "myapp 1.0.0": {
+      "foo": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "myapp from root": ">=1.0.0"
+    }
+  }, result: {
+    "myapp from root": "1.0.0",
+    "foo": "1.0.0"
+  });
+
+  testResolve("with different source", {
+    "myapp 1.0.0": {
+      "foo": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "myapp": ">=1.0.0"
+    }
+  }, result: {
+    "myapp from root": "1.0.0",
+    "foo": "1.0.0"
+  });
+
+  testResolve("with wrong version", {
+    "myapp 1.0.0": {
+      "foo": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "myapp": "<1.0.0"
+    }
+  }, error: couldNotSolve);
+}
+
+func unsolvable() {
+
+  testResolve("mismatched descriptions", {
+    "myapp 0.0.0": {
+      "foo": "1.0.0",
+      "bar": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "shared-x": "1.0.0"
+    },
+    "bar 1.0.0": {
+      "shared-y": "1.0.0"
+    },
+    "shared-x 1.0.0": {},
+    "shared-y 1.0.0": {}
+  }, error: descriptionMismatch("shared", "foo", "bar"));
+
+  testResolve("mismatched sources", {
+    "myapp 0.0.0": {
+      "foo": "1.0.0",
+      "bar": "1.0.0"
+    },
+    "foo 1.0.0": {
+      "shared": "1.0.0"
+    },
+    "bar 1.0.0": {
+      "shared from mock2": "1.0.0"
+    },
+    "shared 1.0.0": {},
+    "shared 1.0.0 from mock2": {}
+  }, error: sourceMismatch("shared", "foo", "bar"));
+
+
+
+  // This is a regression test for #18300.
+  testResolve("...", {
+    "myapp 0.0.0": {
+      "angular": "any",
+      "collection": "any"
+    },
+    "analyzer 0.12.2": {},
+    "angular 0.10.0": {
+      "di": ">=0.0.32 <0.1.0",
+      "collection": ">=0.9.1 <1.0.0"
+    },
+    "angular 0.9.11": {
+      "di": ">=0.0.32 <0.1.0",
+      "collection": ">=0.9.1 <1.0.0"
+    },
+    "angular 0.9.10": {
+      "di": ">=0.0.32 <0.1.0",
+      "collection": ">=0.9.1 <1.0.0"
+    },
+    "collection 0.9.0": {},
+    "collection 0.9.1": {},
+    "di 0.0.37": {"analyzer": ">=0.13.0 <0.14.0"},
+    "di 0.0.36": {"analyzer": ">=0.13.0 <0.14.0"}
+  }, error: noVersion(["analyzer", "di"]), maxTries: 2);
+}
+
+func badSource() {
+  testResolve("fail if the root package has a bad source in dep", {
+    "myapp 0.0.0": {
+      "foo from bad": "any"
+    },
+  }, error: unknownSource("myapp", "foo", "bad"));
+
+  testResolve("fail if the root package has a bad source in dev dep", {
+    "myapp 0.0.0": {
+      "(dev) foo from bad": "any"
+    },
+  }, error: unknownSource("myapp", "foo", "bad"));
+
+  testResolve("fail if all versions have bad source in dep", {
+    "myapp 0.0.0": {
+      "foo": "any"
+    },
+    "foo 1.0.0": {
+      "bar from bad": "any"
+    },
+    "foo 1.0.1": {
+      "baz from bad": "any"
+    },
+    "foo 1.0.3": {
+      "bang from bad": "any"
+    },
+  }, error: unknownSource("foo", "bar", "bad"), maxTries: 3);
+
+  testResolve("ignore versions with bad source in dep", {
+    "myapp 1.0.0": {
+      "foo": "any"
+    },
+    "foo 1.0.0": {
+      "bar": "any"
+    },
+    "foo 1.0.1": {
+      "bar from bad": "any"
+    },
+    "foo 1.0.3": {
+      "bar from bad": "any"
+    },
+    "bar 1.0.0": {}
+  }, result: {
+    "myapp from root": "1.0.0",
+    "foo": "1.0.0",
+    "bar": "1.0.0"
+  }, maxTries: 3);
+}
+
+func backtracking() {
+  testResolve("circular dependency on older version", {
+    "myapp 0.0.0": {
+      "a": ">=1.0.0"
+    },
+    "a 1.0.0": {},
+    "a 2.0.0": {
+      "b": "1.0.0"
+    },
+    "b 1.0.0": {
+      "a": "1.0.0"
+    }
+  }, result: {
+    "myapp from root": "0.0.0",
+    "a": "1.0.0"
+  }, maxTries: 2);
+}
+*/
diff --git a/vendor/github.com/sdboyer/gps/solve_bimodal_test.go b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
new file mode 100644
index 0000000..f430ad9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go
@@ -0,0 +1,936 @@
+package gps
+
+import (
+	"fmt"
+	"path/filepath"
+	"strings"
+)
+
+// dsp - "depspec with packages"
+//
+// Wraps a set of tpkgs onto a depspec, and returns it.
+func dsp(ds depspec, pkgs ...tpkg) depspec {
+	ds.pkgs = pkgs
+	return ds
+}
+
+// pkg makes a tpkg appropriate for use in bimodal testing
+func pkg(path string, imports ...string) tpkg {
+	return tpkg{
+		path:    path,
+		imports: imports,
+	}
+}
+
+func init() {
+	for k, fix := range bimodalFixtures {
+		// Assign the name into the fixture itself
+		fix.n = k
+		bimodalFixtures[k] = fix
+	}
+}
+
+// Fixtures that rely on simulated bimodal (project and package-level)
+// analysis for correct operation. The name given in the map gets assigned into
+// the fixture itself in init().
+var bimodalFixtures = map[string]bimodalFixture{
+	// Simple case, ensures that we do the very basics of picking up and
+	// including a single, simple import that is not expressed as a constraint
+	"simple bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a")),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Ensure it works when the import jump is not from the package with the
+	// same path as root, but from a subpkg
+	"subpkg bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// The same, but with a jump through two subpkgs
+	"double-subpkg bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "root/bar"),
+				pkg("root/bar", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Same again, but now nest the subpkgs
+	"double nested subpkg bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "root/foo/bar"),
+				pkg("root/foo/bar", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Importing package from project with no root package
+	"bm-add on project with no pkg in root dir": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a/foo")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a/foo")),
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Import jump is in a dep, and points to a transitive dep
+	"transitive bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	// Constraints apply only if the project that declares them has a
+	// reachable import
+	"constraints activated by import": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "b 1.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+			dsp(mkDepspec("b 1.1.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.1.0",
+		),
+	},
+	// Import jump is in a dep, and points to a transitive dep - but only in not
+	// the first version we try
+	"transitive bm-add on older version": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "a ~1.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+			),
+			dsp(mkDepspec("a 1.1.0"),
+				pkg("a"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	// Import jump is in a dep, and points to a transitive dep - but will only
+	// get there via backtracking
+	"backtrack to dep on bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a", "b"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "c"),
+			),
+			dsp(mkDepspec("a 1.1.0"),
+				pkg("a"),
+			),
+			// Include two versions of b, otherwise it'll be selected first
+			dsp(mkDepspec("b 0.9.0"),
+				pkg("b", "c"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b", "c"),
+			),
+			dsp(mkDepspec("c 1.0.0", "a 1.0.0"),
+				pkg("c", "a"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+			"c 1.0.0",
+		),
+	},
+	// Import jump is in a dep subpkg, and points to a transitive dep
+	"transitive subpkg bm-add": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "a/bar"),
+				pkg("a/bar", "b"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	// Import jump is in a dep subpkg, pointing to a transitive dep, but only in
+	// not the first version we try
+	"transitive subpkg bm-add on older version": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "a ~1.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "a/bar"),
+				pkg("a/bar", "b"),
+			),
+			dsp(mkDepspec("a 1.1.0"),
+				pkg("a", "a/bar"),
+				pkg("a/bar"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0",
+		),
+	},
+	// Ensure that if a constraint is expressed, but no actual import exists,
+	// then the constraint is disregarded - the project named in the constraint
+	// is not part of the solution.
+	"ignore constraint without import": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "a 1.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		r: mksolution(),
+	},
+	// Transitive deps from one project (a) get incrementally included as other
+	// deps incorporate its various packages.
+	"multi-stage pkg incorporation": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a", "d"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+				pkg("a/second", "c"),
+			),
+			dsp(mkDepspec("b 2.0.0"),
+				pkg("b"),
+			),
+			dsp(mkDepspec("c 1.2.0"),
+				pkg("c"),
+			),
+			dsp(mkDepspec("d 1.0.0"),
+				pkg("d", "a/second"),
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 2.0.0",
+			"c 1.2.0",
+			"d 1.0.0",
+		),
+	},
+	// Regression - make sure that the the constraint/import intersector only
+	// accepts a project 'match' if exactly equal, or a separating slash is
+	// present.
+	"radix path separator post-check": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "foo", "foobar"),
+			),
+			dsp(mkDepspec("foo 1.0.0"),
+				pkg("foo"),
+			),
+			dsp(mkDepspec("foobar 1.0.0"),
+				pkg("foobar"),
+			),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"foobar 1.0.0",
+		),
+	},
+	// Well-formed failure when there's a dependency on a pkg that doesn't exist
+	"fail when imports nonexistent package": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "a 1.0.0"),
+				pkg("root", "a/foo"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+		},
+		fail: &noVersionError{
+			pn: mkPI("a"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &checkeeHasProblemPackagesFailure{
+						goal: mkAtom("a 1.0.0"),
+						failpkg: map[string]errDeppers{
+							"a/foo": errDeppers{
+								err: nil, // nil indicates package is missing
+								deppers: []atom{
+									mkAtom("root"),
+								},
+							},
+						},
+					},
+				},
+			},
+		},
+	},
+	// Transitive deps from one project (a) get incrementally included as other
+	// deps incorporate its various packages, and fail with proper error when we
+	// discover one incrementally that isn't present
+	"fail multi-stage missing pkg": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a", "d"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b"),
+				pkg("a/second", "c"),
+			),
+			dsp(mkDepspec("b 2.0.0"),
+				pkg("b"),
+			),
+			dsp(mkDepspec("c 1.2.0"),
+				pkg("c"),
+			),
+			dsp(mkDepspec("d 1.0.0"),
+				pkg("d", "a/second"),
+				pkg("d", "a/nonexistent"),
+			),
+		},
+		fail: &noVersionError{
+			pn: mkPI("d"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &depHasProblemPackagesFailure{
+						goal: mkADep("d 1.0.0", "a", Any(), "a/nonexistent"),
+						v:    NewVersion("1.0.0"),
+						prob: map[string]error{
+							"a/nonexistent": nil,
+						},
+					},
+				},
+			},
+		},
+	},
+	// Check ignores on the root project
+	"ignore in double-subpkg": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "root/bar", "b"),
+				pkg("root/bar", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		ignore: []string{"root/bar"},
+		r: mksolution(
+			"b 1.0.0",
+		),
+	},
+	// Ignores on a dep pkg
+	"ignore through dep pkg": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "root/foo"),
+				pkg("root/foo", "a"),
+			),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "a/bar"),
+				pkg("a/bar", "b"),
+			),
+			dsp(mkDepspec("b 1.0.0"),
+				pkg("b"),
+			),
+		},
+		ignore: []string{"a/bar"},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	// Preferred version, as derived from a dep's lock, is attempted first
+	"respect prefv, simple case": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b")),
+			dsp(mkDepspec("b 1.0.0 foorev"),
+				pkg("b")),
+			dsp(mkDepspec("b 2.0.0 barrev"),
+				pkg("b")),
+		},
+		lm: map[string]fixLock{
+			"a 1.0.0": mklock(
+				"b 1.0.0 foorev",
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0 foorev",
+		),
+	},
+	// Preferred version, as derived from a dep's lock, is attempted first, even
+	// if the root also has a direct dep on it (root doesn't need to use
+	// preferreds, because it has direct control AND because the root lock
+	// already supercedes dep lock "preferences")
+	"respect dep prefv with root import": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a", "b")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b")),
+			//dsp(newDepspec("a 1.0.1"),
+			//pkg("a", "b")),
+			//dsp(newDepspec("a 1.1.0"),
+			//pkg("a", "b")),
+			dsp(mkDepspec("b 1.0.0 foorev"),
+				pkg("b")),
+			dsp(mkDepspec("b 2.0.0 barrev"),
+				pkg("b")),
+		},
+		lm: map[string]fixLock{
+			"a 1.0.0": mklock(
+				"b 1.0.0 foorev",
+			),
+		},
+		r: mksolution(
+			"a 1.0.0",
+			"b 1.0.0 foorev",
+		),
+	},
+	// Preferred versions can only work if the thing offering it has been
+	// selected, or at least marked in the unselected queue
+	"prefv only works if depper is selected": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a", "b")),
+			// Three atoms for a, which will mean it gets visited after b
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a", "b")),
+			dsp(mkDepspec("a 1.0.1"),
+				pkg("a", "b")),
+			dsp(mkDepspec("a 1.1.0"),
+				pkg("a", "b")),
+			dsp(mkDepspec("b 1.0.0 foorev"),
+				pkg("b")),
+			dsp(mkDepspec("b 2.0.0 barrev"),
+				pkg("b")),
+		},
+		lm: map[string]fixLock{
+			"a 1.0.0": mklock(
+				"b 1.0.0 foorev",
+			),
+		},
+		r: mksolution(
+			"a 1.1.0",
+			"b 2.0.0 barrev",
+		),
+	},
+	"override unconstrained root import": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "a")),
+			dsp(mkDepspec("a 1.0.0"),
+				pkg("a")),
+			dsp(mkDepspec("a 2.0.0"),
+				pkg("a")),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("a"): ProjectProperties{
+				Constraint: NewVersion("1.0.0"),
+			},
+		},
+		r: mksolution(
+			"a 1.0.0",
+		),
+	},
+	"alternate net address": {
+		ds: []depspec{
+			dsp(mkDepspec("root 1.0.0", "foo from bar 2.0.0"),
+				pkg("root", "foo")),
+			dsp(mkDepspec("foo 1.0.0"),
+				pkg("foo")),
+			dsp(mkDepspec("foo 2.0.0"),
+				pkg("foo")),
+			dsp(mkDepspec("bar 1.0.0"),
+				pkg("foo")),
+			dsp(mkDepspec("bar 2.0.0"),
+				pkg("foo")),
+		},
+		r: mksolution(
+			"foo from bar 2.0.0",
+		),
+	},
+	"alternate net address, version only in alt": {
+		ds: []depspec{
+			dsp(mkDepspec("root 1.0.0", "foo from bar 2.0.0"),
+				pkg("root", "foo")),
+			dsp(mkDepspec("foo 1.0.0"),
+				pkg("foo")),
+			dsp(mkDepspec("bar 1.0.0"),
+				pkg("foo")),
+			dsp(mkDepspec("bar 2.0.0"),
+				pkg("foo")),
+		},
+		r: mksolution(
+			"foo from bar 2.0.0",
+		),
+	},
+	"alternate net address in dep": {
+		ds: []depspec{
+			dsp(mkDepspec("root 1.0.0", "foo 1.0.0"),
+				pkg("root", "foo")),
+			dsp(mkDepspec("foo 1.0.0", "bar from baz 2.0.0"),
+				pkg("foo", "bar")),
+			dsp(mkDepspec("bar 1.0.0"),
+				pkg("bar")),
+			dsp(mkDepspec("baz 1.0.0"),
+				pkg("bar")),
+			dsp(mkDepspec("baz 2.0.0"),
+				pkg("bar")),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar from baz 2.0.0",
+		),
+	},
+	// Because NOT specifying an alternate net address for a given import path
+	// is taken as an "eh, whatever", if we see an empty net addr after
+	// something else has already set an alternate one, then the second should
+	// just "go along" with whatever's already been specified.
+	"alternate net address with second depper": {
+		ds: []depspec{
+			dsp(mkDepspec("root 1.0.0", "foo from bar 2.0.0"),
+				pkg("root", "foo", "baz")),
+			dsp(mkDepspec("foo 1.0.0"),
+				pkg("foo")),
+			dsp(mkDepspec("foo 2.0.0"),
+				pkg("foo")),
+			dsp(mkDepspec("bar 1.0.0"),
+				pkg("foo")),
+			dsp(mkDepspec("bar 2.0.0"),
+				pkg("foo")),
+			dsp(mkDepspec("baz 1.0.0"),
+				pkg("baz", "foo")),
+		},
+		r: mksolution(
+			"foo from bar 2.0.0",
+			"baz 1.0.0",
+		),
+	},
+	// Same as the previous, except the alternate declaration originates in a
+	// dep, not the root.
+	"alternate net addr from dep, with second default depper": {
+		ds: []depspec{
+			dsp(mkDepspec("root 1.0.0", "foo 1.0.0"),
+				pkg("root", "foo", "bar")),
+			dsp(mkDepspec("foo 1.0.0", "bar 2.0.0"),
+				pkg("foo", "baz")),
+			dsp(mkDepspec("foo 2.0.0", "bar 2.0.0"),
+				pkg("foo", "baz")),
+			dsp(mkDepspec("bar 2.0.0", "baz from quux 1.0.0"),
+				pkg("bar", "baz")),
+			dsp(mkDepspec("baz 1.0.0"),
+				pkg("baz")),
+			dsp(mkDepspec("baz 2.0.0"),
+				pkg("baz")),
+			dsp(mkDepspec("quux 1.0.0"),
+				pkg("baz")),
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar 2.0.0",
+			"baz from quux 1.0.0",
+		),
+	},
+	// When a given project is initially brought in using the default (i.e.,
+	// empty) ProjectIdentifier.NetworkName, and a later, presumably
+	// as-yet-undiscovered dependency specifies an alternate net addr for it, we
+	// have to fail - even though, if the deps were visited in the opposite
+	// order (deeper dep w/the alternate location first, default location
+	// second), it would be fine.
+	//
+	// TODO A better solution here would involve restarting the solver w/a
+	// marker to use that alternate, or (ugh) introducing a new failure
+	// path/marker type that changes how backtracking works. (In fact, these
+	// approaches are probably demonstrably equivalent.)
+	"fails with net mismatch when deeper dep specs it": {
+		ds: []depspec{
+			dsp(mkDepspec("root 1.0.0", "foo 1.0.0"),
+				pkg("root", "foo", "baz")),
+			dsp(mkDepspec("foo 1.0.0", "bar 2.0.0"),
+				pkg("foo", "bar")),
+			dsp(mkDepspec("bar 2.0.0", "baz from quux 1.0.0"),
+				pkg("bar", "baz")),
+			dsp(mkDepspec("baz 1.0.0"),
+				pkg("baz")),
+			dsp(mkDepspec("quux 1.0.0"),
+				pkg("baz")),
+		},
+		fail: &noVersionError{
+			pn: mkPI("bar"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("2.0.0"),
+					f: &sourceMismatchFailure{
+						shared:   ProjectRoot("baz"),
+						current:  "baz",
+						mismatch: "quux",
+						prob:     mkAtom("bar 2.0.0"),
+						sel:      []dependency{mkDep("foo 1.0.0", "bar 2.0.0", "bar")},
+					},
+				},
+			},
+		},
+	},
+	"with mismatched net addrs": {
+		ds: []depspec{
+			dsp(mkDepspec("root 1.0.0", "foo 1.0.0", "bar 1.0.0"),
+				pkg("root", "foo", "bar")),
+			dsp(mkDepspec("foo 1.0.0", "bar from baz 1.0.0"),
+				pkg("foo", "bar")),
+			dsp(mkDepspec("bar 1.0.0"),
+				pkg("bar")),
+			dsp(mkDepspec("baz 1.0.0"),
+				pkg("bar")),
+		},
+		fail: &noVersionError{
+			pn: mkPI("foo"),
+			fails: []failedVersion{
+				{
+					v: NewVersion("1.0.0"),
+					f: &sourceMismatchFailure{
+						shared:   ProjectRoot("bar"),
+						current:  "bar",
+						mismatch: "baz",
+						prob:     mkAtom("foo 1.0.0"),
+						sel:      []dependency{mkDep("root", "foo 1.0.0", "foo")},
+					},
+				},
+			},
+		},
+	},
+	"overridden mismatched net addrs, alt in dep": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0"),
+				pkg("root", "foo")),
+			dsp(mkDepspec("foo 1.0.0", "bar from baz 1.0.0"),
+				pkg("foo", "bar")),
+			dsp(mkDepspec("bar 1.0.0"),
+				pkg("bar")),
+			dsp(mkDepspec("baz 1.0.0"),
+				pkg("bar")),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("bar"): ProjectProperties{
+				NetworkName: "baz",
+			},
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar from baz 1.0.0",
+		),
+	},
+	"overridden mismatched net addrs, alt in root": {
+		ds: []depspec{
+			dsp(mkDepspec("root 0.0.0", "bar from baz 1.0.0"),
+				pkg("root", "foo")),
+			dsp(mkDepspec("foo 1.0.0"),
+				pkg("foo", "bar")),
+			dsp(mkDepspec("bar 1.0.0"),
+				pkg("bar")),
+			dsp(mkDepspec("baz 1.0.0"),
+				pkg("bar")),
+		},
+		ovr: ProjectConstraints{
+			ProjectRoot("bar"): ProjectProperties{
+				NetworkName: "baz",
+			},
+		},
+		r: mksolution(
+			"foo 1.0.0",
+			"bar from baz 1.0.0",
+		),
+	},
+}
+
+// tpkg is a representation of a single package. It has its own import path, as
+// well as a list of paths it itself "imports".
+type tpkg struct {
+	// Full import path of this package
+	path string
+	// Slice of full paths to its virtual imports
+	imports []string
+}
+
+type bimodalFixture struct {
+	// name of this fixture datum
+	n string
+	// bimodal project. first is always treated as root project
+	ds []depspec
+	// results; map of name/version pairs
+	r map[ProjectIdentifier]Version
+	// max attempts the solver should need to find solution. 0 means no limit
+	maxAttempts int
+	// Use downgrade instead of default upgrade sorter
+	downgrade bool
+	// lock file simulator, if one's to be used at all
+	l fixLock
+	// map of locks for deps, if any. keys should be of the form:
+	// "<project> <version>"
+	lm map[string]fixLock
+	// solve failure expected, if any
+	fail error
+	// overrides, if any
+	ovr ProjectConstraints
+	// request up/downgrade to all projects
+	changeall bool
+	// pkgs to ignore
+	ignore []string
+}
+
+func (f bimodalFixture) name() string {
+	return f.n
+}
+
+func (f bimodalFixture) specs() []depspec {
+	return f.ds
+}
+
+func (f bimodalFixture) maxTries() int {
+	return f.maxAttempts
+}
+
+func (f bimodalFixture) solution() map[ProjectIdentifier]Version {
+	return f.r
+}
+
+func (f bimodalFixture) rootmanifest() RootManifest {
+	m := simpleRootManifest{
+		c:   f.ds[0].deps,
+		tc:  f.ds[0].devdeps,
+		ovr: f.ovr,
+		ig:  make(map[string]bool),
+	}
+	for _, ig := range f.ignore {
+		m.ig[ig] = true
+	}
+
+	return m
+}
+
+func (f bimodalFixture) rootTree() PackageTree {
+	pt := PackageTree{
+		ImportRoot: string(f.ds[0].n),
+		Packages:   map[string]PackageOrErr{},
+	}
+
+	for _, pkg := range f.ds[0].pkgs {
+		elems := strings.Split(pkg.path, "/")
+		pt.Packages[pkg.path] = PackageOrErr{
+			P: Package{
+				ImportPath: pkg.path,
+				Name:       elems[len(elems)-1],
+				// TODO(sdboyer) ugh, tpkg type has no space for supporting test
+				// imports...
+				Imports: pkg.imports,
+			},
+		}
+	}
+
+	return pt
+}
+
+func (f bimodalFixture) failure() error {
+	return f.fail
+}
+
+// bmSourceManager is an SM specifically for the bimodal fixtures. It composes
+// the general depspec SM, and differs from it in how it answers static analysis
+// calls, and its support for package ignores and dep lock data.
+type bmSourceManager struct {
+	depspecSourceManager
+	lm map[string]fixLock
+}
+
+var _ SourceManager = &bmSourceManager{}
+
+func newbmSM(bmf bimodalFixture) *bmSourceManager {
+	sm := &bmSourceManager{
+		depspecSourceManager: *newdepspecSM(bmf.ds, bmf.ignore),
+	}
+	sm.rm = computeBimodalExternalMap(bmf.ds)
+	sm.lm = bmf.lm
+
+	return sm
+}
+
+func (sm *bmSourceManager) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	for k, ds := range sm.specs {
+		// Cheat for root, otherwise we blow up b/c version is empty
+		if id.netName() == string(ds.n) && (k == 0 || ds.v.Matches(v)) {
+			ptree := PackageTree{
+				ImportRoot: id.netName(),
+				Packages:   make(map[string]PackageOrErr),
+			}
+			for _, pkg := range ds.pkgs {
+				ptree.Packages[pkg.path] = PackageOrErr{
+					P: Package{
+						ImportPath: pkg.path,
+						Name:       filepath.Base(pkg.path),
+						Imports:    pkg.imports,
+					},
+				}
+			}
+
+			return ptree, nil
+		}
+	}
+
+	return PackageTree{}, fmt.Errorf("Project %s at version %s could not be found", id.errString(), v)
+}
+
+func (sm *bmSourceManager) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
+	for _, ds := range sm.specs {
+		if id.netName() == string(ds.n) && v.Matches(ds.v) {
+			if l, exists := sm.lm[id.netName()+" "+v.String()]; exists {
+				return ds, l, nil
+			}
+			return ds, dummyLock{}, nil
+		}
+	}
+
+	// TODO(sdboyer) proper solver-type errors
+	return nil, nil, fmt.Errorf("Project %s at version %s could not be found", id.errString(), v)
+}
+
+// computeBimodalExternalMap takes a set of depspecs and computes an
+// internally-versioned external reach map that is useful for quickly answering
+// ListExternal()-type calls.
+//
+// Note that it does not do things like stripping out stdlib packages - these
+// maps are intended for use in SM fixtures, and that's a higher-level
+// responsibility within the system.
+func computeBimodalExternalMap(ds []depspec) map[pident]map[string][]string {
+	// map of project name+version -> map of subpkg name -> external pkg list
+	rm := make(map[pident]map[string][]string)
+
+	// algorithm adapted from externalReach()
+	for _, d := range ds {
+		// Keeps a list of all internal and external reaches for packages within
+		// a given root. We create one on each pass through, rather than doing
+		// them all at once, because the depspec set may (read: is expected to)
+		// have multiple versions of the same base project, and each of those
+		// must be calculated independently.
+		workmap := make(map[string]wm)
+
+		for _, pkg := range d.pkgs {
+			w := wm{
+				ex: make(map[string]bool),
+				in: make(map[string]bool),
+			}
+
+			for _, imp := range pkg.imports {
+				if !checkPrefixSlash(filepath.Clean(imp), string(d.n)) {
+					// Easy case - if the import is not a child of the base
+					// project path, put it in the external map
+					w.ex[imp] = true
+				} else {
+					if w2, seen := workmap[imp]; seen {
+						// If it is, and we've seen that path, dereference it
+						// immediately
+						for i := range w2.ex {
+							w.ex[i] = true
+						}
+						for i := range w2.in {
+							w.in[i] = true
+						}
+					} else {
+						// Otherwise, put it in the 'in' map for later
+						// reprocessing
+						w.in[imp] = true
+					}
+				}
+			}
+			workmap[pkg.path] = w
+		}
+
+		drm := wmToReach(workmap, "")
+		rm[pident{n: d.n, v: d.v}] = drm
+	}
+
+	return rm
+}
diff --git a/vendor/github.com/sdboyer/gps/solve_failures.go b/vendor/github.com/sdboyer/gps/solve_failures.go
new file mode 100644
index 0000000..9c144e8
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solve_failures.go
@@ -0,0 +1,492 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strings"
+)
+
+type errorLevel uint8
+
+// TODO(sdboyer) consistent, sensible way of handling 'type' and 'severity' - or figure
+// out that they're not orthogonal and collapse into just 'type'
+
+const (
+	warning errorLevel = 1 << iota
+	mustResolve
+	cannotResolve
+)
+
+func a2vs(a atom) string {
+	if a.v == rootRev || a.v == nil {
+		return "(root)"
+	}
+
+	return fmt.Sprintf("%s@%s", a.id.errString(), a.v)
+}
+
+type traceError interface {
+	traceString() string
+}
+
+type noVersionError struct {
+	pn    ProjectIdentifier
+	fails []failedVersion
+}
+
+func (e *noVersionError) Error() string {
+	if len(e.fails) == 0 {
+		return fmt.Sprintf("No versions found for project %q.", e.pn.ProjectRoot)
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot)
+	for _, f := range e.fails {
+		fmt.Fprintf(&buf, "\n\t%s: %s", f.v, f.f.Error())
+	}
+
+	return buf.String()
+}
+
+func (e *noVersionError) traceString() string {
+	if len(e.fails) == 0 {
+		return fmt.Sprintf("No versions found")
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "No versions of %s met constraints:", e.pn.ProjectRoot)
+	for _, f := range e.fails {
+		if te, ok := f.f.(traceError); ok {
+			fmt.Fprintf(&buf, "\n  %s: %s", f.v, te.traceString())
+		} else {
+			fmt.Fprintf(&buf, "\n  %s: %s", f.v, f.f.Error())
+		}
+	}
+
+	return buf.String()
+}
+
+// disjointConstraintFailure occurs when attempting to introduce an atom that
+// itself has an acceptable version, but one of its dependency constraints is
+// disjoint with one or more dependency constraints already active for that
+// identifier.
+type disjointConstraintFailure struct {
+	// goal is the dependency with the problematic constraint, forcing us to
+	// reject the atom that introduces it.
+	goal dependency
+	// failsib is the list of active dependencies that are disjoint with the
+	// goal dependency. This will be at least one, but may not be all of the
+	// active dependencies.
+	failsib []dependency
+	// nofailsib is the list of active dependencies that are NOT disjoint with
+	// the goal dependency. The total of nofailsib and failsib will always be
+	// the total number of active dependencies on target identifier.
+	nofailsib []dependency
+	// c is the current constraint on the target identifier. It is intersection
+	// of all the active dependencies' constraints.
+	c Constraint
+}
+
+func (e *disjointConstraintFailure) Error() string {
+	if len(e.failsib) == 1 {
+		str := "Could not introduce %s, as it has a dependency on %s with constraint %s, which has no overlap with existing constraint %s from %s"
+		return fmt.Sprintf(str, a2vs(e.goal.depender), e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String(), e.failsib[0].dep.Constraint.String(), a2vs(e.failsib[0].depender))
+	}
+
+	var buf bytes.Buffer
+
+	var sibs []dependency
+	if len(e.failsib) > 1 {
+		sibs = e.failsib
+
+		str := "Could not introduce %s, as it has a dependency on %s with constraint %s, which has no overlap with the following existing constraints:\n"
+		fmt.Fprintf(&buf, str, a2vs(e.goal.depender), e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
+	} else {
+		sibs = e.nofailsib
+
+		str := "Could not introduce %s, as it has a dependency on %s with constraint %s, which does not overlap with the intersection of existing constraints from other currently selected packages:\n"
+		fmt.Fprintf(&buf, str, a2vs(e.goal.depender), e.goal.dep.Ident.errString(), e.goal.dep.Constraint.String())
+	}
+
+	for _, c := range sibs {
+		fmt.Fprintf(&buf, "\t%s from %s\n", c.dep.Constraint.String(), a2vs(c.depender))
+	}
+
+	return buf.String()
+}
+
+func (e *disjointConstraintFailure) traceString() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "constraint %s on %s disjoint with other dependers:\n", e.goal.dep.Constraint.String(), e.goal.dep.Ident.errString())
+	for _, f := range e.failsib {
+		fmt.Fprintf(
+			&buf,
+			"%s from %s (no overlap)\n",
+			f.dep.Constraint.String(),
+			a2vs(f.depender),
+		)
+	}
+	for _, f := range e.nofailsib {
+		fmt.Fprintf(
+			&buf,
+			"%s from %s (some overlap)\n",
+			f.dep.Constraint.String(),
+			a2vs(f.depender),
+		)
+	}
+
+	return buf.String()
+}
+
+// Indicates that an atom could not be introduced because one of its dep
+// constraints does not admit the currently-selected version of the target
+// project.
+type constraintNotAllowedFailure struct {
+	// The dependency with the problematic constraint that could not be
+	// introduced.
+	goal dependency
+	// The (currently selected) version of the target project that was not
+	// admissible by the goal dependency.
+	v Version
+}
+
+func (e *constraintNotAllowedFailure) Error() string {
+	return fmt.Sprintf(
+		"Could not introduce %s, as it has a dependency on %s with constraint %s, which does not allow the currently selected version of %s",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.errString(),
+		e.goal.dep.Constraint,
+		e.v,
+	)
+}
+
+func (e *constraintNotAllowedFailure) traceString() string {
+	return fmt.Sprintf(
+		"%s depends on %s with %s, but that's already selected at %s",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.ProjectRoot,
+		e.goal.dep.Constraint,
+		e.v,
+	)
+}
+
+// versionNotAllowedFailure describes a failure where an atom is rejected
+// because its version is not allowed by current constraints.
+//
+// (This is one of the more straightforward types of failures)
+type versionNotAllowedFailure struct {
+	// goal is the atom that was rejected by current constraints.
+	goal atom
+	// failparent is the list of active dependencies that caused the atom to be
+	// rejected. Note that this only includes dependencies that actually
+	// rejected the atom, which will be at least one, but may not be all the
+	// active dependencies on the atom's identifier.
+	failparent []dependency
+	// c is the current constraint on the atom's identifier. This is the intersection
+	// of all active dependencies' constraints.
+	c Constraint
+}
+
+func (e *versionNotAllowedFailure) Error() string {
+	if len(e.failparent) == 1 {
+		return fmt.Sprintf(
+			"Could not introduce %s, as it is not allowed by constraint %s from project %s.",
+			a2vs(e.goal),
+			e.failparent[0].dep.Constraint.String(),
+			e.failparent[0].depender.id.errString(),
+		)
+	}
+
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "Could not introduce %s, as it is not allowed by constraints from the following projects:\n", a2vs(e.goal))
+
+	for _, f := range e.failparent {
+		fmt.Fprintf(&buf, "\t%s from %s\n", f.dep.Constraint.String(), a2vs(f.depender))
+	}
+
+	return buf.String()
+}
+
+func (e *versionNotAllowedFailure) traceString() string {
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "%s not allowed by constraint %s:\n", a2vs(e.goal), e.c.String())
+	for _, f := range e.failparent {
+		fmt.Fprintf(&buf, "  %s from %s\n", f.dep.Constraint.String(), a2vs(f.depender))
+	}
+
+	return buf.String()
+}
+
+type missingSourceFailure struct {
+	goal ProjectIdentifier
+	prob string
+}
+
+func (e *missingSourceFailure) Error() string {
+	return fmt.Sprintf(e.prob, e.goal)
+}
+
+type badOptsFailure string
+
+func (e badOptsFailure) Error() string {
+	return string(e)
+}
+
+type sourceMismatchFailure struct {
+	// The ProjectRoot over which there is disagreement about where it should be
+	// sourced from
+	shared ProjectRoot
+	// The current value for the network source
+	current string
+	// The mismatched value for the network source
+	mismatch string
+	// The currently selected dependencies which have agreed upon/established
+	// the given network source
+	sel []dependency
+	// The atom with the constraint that has the new, incompatible network source
+	prob atom
+}
+
+func (e *sourceMismatchFailure) Error() string {
+	var cur []string
+	for _, c := range e.sel {
+		cur = append(cur, string(c.depender.id.ProjectRoot))
+	}
+
+	str := "Could not introduce %s, as it depends on %s from %s, but %s is already marked as coming from %s by %s"
+	return fmt.Sprintf(str, a2vs(e.prob), e.shared, e.mismatch, e.shared, e.current, strings.Join(cur, ", "))
+}
+
+func (e *sourceMismatchFailure) traceString() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "disagreement on network addr for %s:\n", e.shared)
+
+	fmt.Fprintf(&buf, "  %s from %s\n", e.mismatch, e.prob.id.errString())
+	for _, dep := range e.sel {
+		fmt.Fprintf(&buf, "  %s from %s\n", e.current, dep.depender.id.errString())
+	}
+
+	return buf.String()
+}
+
+type errDeppers struct {
+	err     error
+	deppers []atom
+}
+
+// checkeeHasProblemPackagesFailure indicates that the goal atom was rejected
+// because one or more of the packages required by its deppers had errors.
+//
+// "errors" includes package nonexistence, which is indicated by a nil err in
+// the corresponding errDeppers failpkg map value.
+//
+// checkeeHasProblemPackagesFailure complements depHasProblemPackagesFailure;
+// one or the other could appear to describe the same fundamental issue,
+// depending on the order in which dependencies were visited.
+type checkeeHasProblemPackagesFailure struct {
+	// goal is the atom that was rejected due to problematic packages.
+	goal atom
+	// failpkg is a map of package names to the error describing the problem
+	// with them, plus a list of the selected atoms that require that package.
+	failpkg map[string]errDeppers
+}
+
+func (e *checkeeHasProblemPackagesFailure) Error() string {
+	var buf bytes.Buffer
+	indent := ""
+
+	if len(e.failpkg) > 1 {
+		indent = "\t"
+		fmt.Fprintf(
+			&buf, "Could not introduce %s due to multiple problematic subpackages:\n",
+			a2vs(e.goal),
+		)
+	}
+
+	for pkg, errdep := range e.failpkg {
+		var cause string
+		if errdep.err == nil {
+			cause = "is missing"
+		} else {
+			cause = fmt.Sprintf("does not contain usable Go code (%T).", errdep.err)
+		}
+
+		if len(e.failpkg) == 1 {
+			fmt.Fprintf(
+				&buf, "Could not introduce %s, as its subpackage %s %s.",
+				a2vs(e.goal),
+				pkg,
+				cause,
+			)
+		} else {
+			fmt.Fprintf(&buf, "\tSubpackage %s %s.", pkg, cause)
+		}
+
+		if len(errdep.deppers) == 1 {
+			fmt.Fprintf(
+				&buf, " (Package is required by %s.)",
+				a2vs(errdep.deppers[0]),
+			)
+		} else {
+			fmt.Fprintf(&buf, " Package is required by:")
+			for _, pa := range errdep.deppers {
+				fmt.Fprintf(&buf, "\n%s\t%s", indent, a2vs(pa))
+			}
+		}
+	}
+
+	return buf.String()
+}
+
+func (e *checkeeHasProblemPackagesFailure) traceString() string {
+	var buf bytes.Buffer
+
+	fmt.Fprintf(&buf, "%s at %s has problem subpkg(s):\n", e.goal.id.ProjectRoot, e.goal.v)
+	for pkg, errdep := range e.failpkg {
+		if errdep.err == nil {
+			fmt.Fprintf(&buf, "\t%s is missing; ", pkg)
+		} else {
+			fmt.Fprintf(&buf, "\t%s has err (%T); ", pkg, errdep.err)
+		}
+
+		if len(errdep.deppers) == 1 {
+			fmt.Fprintf(&buf, "required by %s.", a2vs(errdep.deppers[0]))
+		} else {
+			fmt.Fprintf(&buf, " required by:")
+			for _, pa := range errdep.deppers {
+				fmt.Fprintf(&buf, "\n\t\t%s at %s", pa.id.errString(), pa.v)
+			}
+		}
+	}
+
+	return buf.String()
+}
+
+// depHasProblemPackagesFailure indicates that the goal dependency was rejected
+// because there were problems with one or more of the packages the dependency
+// requires in the atom currently selected for that dependency. (This failure
+// can only occur if the target dependency is already selected.)
+//
+// "errors" includes package nonexistence, which is indicated by a nil err as
+// the corresponding prob map value.
+//
+// depHasProblemPackagesFailure complements checkeeHasProblemPackagesFailure;
+// one or the other could appear to describe the same fundamental issue,
+// depending on the order in which dependencies were visited.
+type depHasProblemPackagesFailure struct {
+	// goal is the dependency that was rejected due to the atom currently
+	// selected for the dependency's target id having errors (including, and
+	// probably most commonly,
+	// nonexistence) in one or more packages named by the dependency.
+	goal dependency
+	// v is the version of the currently selected atom targeted by the goal
+	// dependency.
+	v Version
+	// prob is a map of problem packages to their specific error. It does not
+	// include missing packages.
+	prob map[string]error
+}
+
+func (e *depHasProblemPackagesFailure) Error() string {
+	fcause := func(pkg string) string {
+		if err := e.prob[pkg]; err != nil {
+			return fmt.Sprintf("does not contain usable Go code (%T).", err)
+		}
+		return "is missing."
+	}
+
+	if len(e.prob) == 1 {
+		var pkg string
+		for pkg = range e.prob {
+		}
+
+		return fmt.Sprintf(
+			"Could not introduce %s, as it requires package %s from %s, but in version %s that package %s",
+			a2vs(e.goal.depender),
+			pkg,
+			e.goal.dep.Ident.errString(),
+			e.v,
+			fcause(pkg),
+		)
+	}
+
+	var buf bytes.Buffer
+	fmt.Fprintf(
+		&buf, "Could not introduce %s, as it requires problematic packages from %s (current version %s):",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.errString(),
+		e.v,
+	)
+
+	pkgs := make([]string, len(e.prob))
+	k := 0
+	for pkg := range e.prob {
+		pkgs[k] = pkg
+		k++
+	}
+	sort.Strings(pkgs)
+	for _, pkg := range pkgs {
+		fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg))
+	}
+
+	return buf.String()
+}
+
+func (e *depHasProblemPackagesFailure) traceString() string {
+	var buf bytes.Buffer
+	fcause := func(pkg string) string {
+		if err := e.prob[pkg]; err != nil {
+			return fmt.Sprintf("has parsing err (%T).", err)
+		}
+		return "is missing"
+	}
+
+	fmt.Fprintf(
+		&buf, "%s depping on %s at %s has problem subpkg(s):",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.errString(),
+		e.v,
+	)
+
+	pkgs := make([]string, len(e.prob))
+	k := 0
+	for pkg := range e.prob {
+		pkgs[k] = pkg
+		k++
+	}
+	sort.Strings(pkgs)
+	for _, pkg := range pkgs {
+		fmt.Fprintf(&buf, "\t%s %s", pkg, fcause(pkg))
+	}
+
+	return buf.String()
+}
+
+// nonexistentRevisionFailure indicates that a revision constraint was specified
+// for a given project, but that that revision does not exist in the source
+// repository.
+type nonexistentRevisionFailure struct {
+	goal dependency
+	r    Revision
+}
+
+func (e *nonexistentRevisionFailure) Error() string {
+	return fmt.Sprintf(
+		"Could not introduce %s, as it requires %s at revision %s, but that revision does not exist",
+		a2vs(e.goal.depender),
+		e.goal.dep.Ident.errString(),
+		e.r,
+	)
+}
+
+func (e *nonexistentRevisionFailure) traceString() string {
+	return fmt.Sprintf(
+		"%s wants missing rev %s of %s",
+		a2vs(e.goal.depender),
+		e.r,
+		e.goal.dep.Ident.errString(),
+	)
+}
diff --git a/vendor/github.com/sdboyer/gps/solve_test.go b/vendor/github.com/sdboyer/gps/solve_test.go
new file mode 100644
index 0000000..425dd50
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solve_test.go
@@ -0,0 +1,389 @@
+package gps
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"math/rand"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"testing"
+)
+
+var fixtorun string
+
+// TODO(sdboyer) regression test ensuring that locks with only revs for projects don't cause errors
+func init() {
+	flag.StringVar(&fixtorun, "gps.fix", "", "A single fixture to run in TestBasicSolves or TestBimodalSolves")
+	overrideMkBridge()
+}
+
+// sets the mkBridge global func to one that allows virtualized RootDirs
+func overrideMkBridge() {
+	// For all tests, override the base bridge with the depspecBridge that skips
+	// verifyRootDir calls
+	mkBridge = func(s *solver, sm SourceManager) sourceBridge {
+		return &depspecBridge{
+			&bridge{
+				sm:     sm,
+				s:      s,
+				vlists: make(map[ProjectIdentifier][]Version),
+			},
+		}
+	}
+}
+
+var stderrlog = log.New(os.Stderr, "", 0)
+
+func fixSolve(params SolveParameters, sm SourceManager) (Solution, error) {
+	if testing.Verbose() {
+		params.Trace = true
+		params.TraceLogger = stderrlog
+	}
+
+	s, err := Prepare(params, sm)
+	if err != nil {
+		return nil, err
+	}
+
+	return s.Solve()
+}
+
+// Test all the basic table fixtures.
+//
+// Or, just the one named in the fix arg.
+func TestBasicSolves(t *testing.T) {
+	if fixtorun != "" {
+		if fix, exists := basicFixtures[fixtorun]; exists {
+			solveBasicsAndCheck(fix, t)
+		}
+	} else {
+		// sort them by their keys so we get stable output
+		var names []string
+		for n := range basicFixtures {
+			names = append(names, n)
+		}
+
+		sort.Strings(names)
+		for _, n := range names {
+			solveBasicsAndCheck(basicFixtures[n], t)
+			if testing.Verbose() {
+				// insert a line break between tests
+				stderrlog.Println("")
+			}
+		}
+	}
+}
+
+func solveBasicsAndCheck(fix basicFixture, t *testing.T) (res Solution, err error) {
+	if testing.Verbose() {
+		stderrlog.Printf("[[fixture %q]]", fix.n)
+	}
+	sm := newdepspecSM(fix.ds, nil)
+
+	params := SolveParameters{
+		RootDir:         string(fix.ds[0].n),
+		RootPackageTree: fix.rootTree(),
+		Manifest:        fix.rootmanifest(),
+		Lock:            dummyLock{},
+		Downgrade:       fix.downgrade,
+		ChangeAll:       fix.changeall,
+	}
+
+	if fix.l != nil {
+		params.Lock = fix.l
+	}
+
+	res, err = fixSolve(params, sm)
+
+	return fixtureSolveSimpleChecks(fix, res, err, t)
+}
+
+// Test all the bimodal table fixtures.
+//
+// Or, just the one named in the fix arg.
+func TestBimodalSolves(t *testing.T) {
+	if fixtorun != "" {
+		if fix, exists := bimodalFixtures[fixtorun]; exists {
+			solveBimodalAndCheck(fix, t)
+		}
+	} else {
+		// sort them by their keys so we get stable output
+		var names []string
+		for n := range bimodalFixtures {
+			names = append(names, n)
+		}
+
+		sort.Strings(names)
+		for _, n := range names {
+			solveBimodalAndCheck(bimodalFixtures[n], t)
+			if testing.Verbose() {
+				// insert a line break between tests
+				stderrlog.Println("")
+			}
+		}
+	}
+}
+
+func solveBimodalAndCheck(fix bimodalFixture, t *testing.T) (res Solution, err error) {
+	if testing.Verbose() {
+		stderrlog.Printf("[[fixture %q]]", fix.n)
+	}
+	sm := newbmSM(fix)
+
+	params := SolveParameters{
+		RootDir:         string(fix.ds[0].n),
+		RootPackageTree: fix.rootTree(),
+		Manifest:        fix.rootmanifest(),
+		Lock:            dummyLock{},
+		Downgrade:       fix.downgrade,
+		ChangeAll:       fix.changeall,
+	}
+
+	if fix.l != nil {
+		params.Lock = fix.l
+	}
+
+	res, err = fixSolve(params, sm)
+
+	return fixtureSolveSimpleChecks(fix, res, err, t)
+}
+
+func fixtureSolveSimpleChecks(fix specfix, soln Solution, err error, t *testing.T) (Solution, error) {
+	ppi := func(id ProjectIdentifier) string {
+		// need this so we can clearly tell if there's a NetworkName or not
+		if id.NetworkName == "" {
+			return string(id.ProjectRoot)
+		}
+		return fmt.Sprintf("%s (from %s)", id.ProjectRoot, id.NetworkName)
+	}
+
+	pv := func(v Version) string {
+		if pv, ok := v.(PairedVersion); ok {
+			return fmt.Sprintf("%s (%s)", pv.Unpair(), pv.Underlying())
+		}
+		return v.String()
+	}
+
+	fixfail := fix.failure()
+	if err != nil {
+		if fixfail == nil {
+			t.Errorf("(fixture: %q) Solve failed unexpectedly:\n%s", fix.name(), err)
+		} else if !reflect.DeepEqual(fixfail, err) {
+			// TODO(sdboyer) reflect.DeepEqual works for now, but once we start
+			// modeling more complex cases, this should probably become more robust
+			t.Errorf("(fixture: %q) Failure mismatch:\n\t(GOT): %s\n\t(WNT): %s", fix.name(), err, fixfail)
+		}
+	} else if fixfail != nil {
+		var buf bytes.Buffer
+		fmt.Fprintf(&buf, "(fixture: %q) Solver succeeded, but expecting failure:\n%s\nProjects in solution:", fix.name(), fixfail)
+		for _, p := range soln.Projects() {
+			fmt.Fprintf(&buf, "\n\t- %s at %s", ppi(p.Ident()), p.Version())
+		}
+		t.Error(buf.String())
+	} else {
+		r := soln.(solution)
+		if fix.maxTries() > 0 && r.Attempts() > fix.maxTries() {
+			t.Errorf("(fixture: %q) Solver completed in %v attempts, but expected %v or fewer", fix.name(), r.att, fix.maxTries())
+		}
+
+		// Dump result projects into a map for easier interrogation
+		rp := make(map[ProjectIdentifier]Version)
+		for _, p := range r.p {
+			pa := p.toAtom()
+			rp[pa.id] = pa.v
+		}
+
+		fixlen, rlen := len(fix.solution()), len(rp)
+		if fixlen != rlen {
+			// Different length, so they definitely disagree
+			t.Errorf("(fixture: %q) Solver reported %v package results, result expected %v", fix.name(), rlen, fixlen)
+		}
+
+		// Whether or not len is same, still have to verify that results agree
+		// Walk through fixture/expected results first
+		for p, v := range fix.solution() {
+			if av, exists := rp[p]; !exists {
+				t.Errorf("(fixture: %q) Project %q expected but missing from results", fix.name(), ppi(p))
+			} else {
+				// delete result from map so we skip it on the reverse pass
+				delete(rp, p)
+				if v != av {
+					t.Errorf("(fixture: %q) Expected version %q of project %q, but actual version was %q", fix.name(), pv(v), ppi(p), pv(av))
+				}
+			}
+		}
+
+		// Now walk through remaining actual results
+		for p, v := range rp {
+			if fv, exists := fix.solution()[p]; !exists {
+				t.Errorf("(fixture: %q) Unexpected project %q present in results", fix.name(), ppi(p))
+			} else if v != fv {
+				t.Errorf("(fixture: %q) Got version %q of project %q, but expected version was %q", fix.name(), pv(v), ppi(p), pv(fv))
+			}
+		}
+	}
+
+	return soln, err
+}
+
+// This tests that, when a root lock is underspecified (has only a version) we
+// don't allow a match on that version from a rev in the manifest. We may allow
+// this in the future, but disallow it for now because going from an immutable
+// requirement to a mutable lock automagically is a bad direction that could
+// produce weird side effects.
+func TestRootLockNoVersionPairMatching(t *testing.T) {
+	fix := basicFixture{
+		n: "does not match unpaired lock versions with paired real versions",
+		ds: []depspec{
+			mkDepspec("root 0.0.0", "foo *"), // foo's constraint rewritten below to foorev
+			mkDepspec("foo 1.0.0", "bar 1.0.0"),
+			mkDepspec("foo 1.0.1 foorev", "bar 1.0.1"),
+			mkDepspec("foo 1.0.2 foorev", "bar 1.0.2"),
+			mkDepspec("bar 1.0.0"),
+			mkDepspec("bar 1.0.1"),
+			mkDepspec("bar 1.0.2"),
+		},
+		l: mklock(
+			"foo 1.0.1",
+		),
+		r: mksolution(
+			"foo 1.0.2 foorev",
+			"bar 1.0.2",
+		),
+	}
+
+	pd := fix.ds[0].deps[0]
+	pd.Constraint = Revision("foorev")
+	fix.ds[0].deps[0] = pd
+
+	sm := newdepspecSM(fix.ds, nil)
+
+	l2 := make(fixLock, 1)
+	copy(l2, fix.l)
+	l2[0].v = nil
+
+	params := SolveParameters{
+		RootDir:         string(fix.ds[0].n),
+		RootPackageTree: fix.rootTree(),
+		Manifest:        fix.rootmanifest(),
+		Lock:            l2,
+	}
+
+	res, err := fixSolve(params, sm)
+
+	fixtureSolveSimpleChecks(fix, res, err, t)
+}
+
+func TestBadSolveOpts(t *testing.T) {
+	pn := strconv.FormatInt(rand.Int63(), 36)
+	fix := basicFixtures["no dependencies"]
+	fix.ds[0].n = ProjectRoot(pn)
+
+	sm := newdepspecSM(fix.ds, nil)
+	params := SolveParameters{}
+
+	_, err := Prepare(params, nil)
+	if err == nil {
+		t.Errorf("Prepare should have errored on nil SourceManager")
+	} else if !strings.Contains(err.Error(), "non-nil SourceManager") {
+		t.Error("Prepare should have given error on nil SourceManager, but gave:", err)
+	}
+
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Prepare should have errored on empty root")
+	} else if !strings.Contains(err.Error(), "non-empty root directory") {
+		t.Error("Prepare should have given error on empty root, but gave:", err)
+	}
+
+	params.RootDir = pn
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Prepare should have errored on empty name")
+	} else if !strings.Contains(err.Error(), "non-empty import root") {
+		t.Error("Prepare should have given error on empty import root, but gave:", err)
+	}
+
+	params.RootPackageTree = PackageTree{
+		ImportRoot: pn,
+	}
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Prepare should have errored on empty name")
+	} else if !strings.Contains(err.Error(), "at least one package") {
+		t.Error("Prepare should have given error on empty import root, but gave:", err)
+	}
+
+	params.RootPackageTree = PackageTree{
+		ImportRoot: pn,
+		Packages: map[string]PackageOrErr{
+			pn: {
+				P: Package{
+					ImportPath: pn,
+					Name:       pn,
+				},
+			},
+		},
+	}
+	params.Trace = true
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on trace with no logger")
+	} else if !strings.Contains(err.Error(), "no logger provided") {
+		t.Error("Prepare should have given error on missing trace logger, but gave:", err)
+	}
+	params.TraceLogger = log.New(ioutil.Discard, "", 0)
+
+	params.Manifest = simpleRootManifest{
+		ovr: ProjectConstraints{
+			ProjectRoot("foo"): ProjectProperties{},
+		},
+	}
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on override with empty ProjectProperties")
+	} else if !strings.Contains(err.Error(), "foo, but without any non-zero properties") {
+		t.Error("Prepare should have given error override with empty ProjectProperties, but gave:", err)
+	}
+	params.Manifest = nil
+
+	_, err = Prepare(params, sm)
+	if err != nil {
+		t.Error("Basic conditions satisfied, prepare should have completed successfully, err as:", err)
+	}
+
+	// swap out the test mkBridge override temporarily, just to make sure we get
+	// the right error
+	mkBridge = func(s *solver, sm SourceManager) sourceBridge {
+		return &bridge{
+			sm:     sm,
+			s:      s,
+			vlists: make(map[ProjectIdentifier][]Version),
+		}
+	}
+
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on nonexistent root")
+	} else if !strings.Contains(err.Error(), "could not read project root") {
+		t.Error("Prepare should have given error nonexistent project root dir, but gave:", err)
+	}
+
+	// Pointing it at a file should also be an err
+	params.RootDir = "solve_test.go"
+	_, err = Prepare(params, sm)
+	if err == nil {
+		t.Errorf("Should have errored on file for RootDir")
+	} else if !strings.Contains(err.Error(), "is a file, not a directory") {
+		t.Error("Prepare should have given error on file as RootDir, but gave:", err)
+	}
+
+	// swap them back...not sure if this matters, but just in case
+	overrideMkBridge()
+}
diff --git a/vendor/github.com/sdboyer/gps/solver.go b/vendor/github.com/sdboyer/gps/solver.go
new file mode 100644
index 0000000..5556589
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/solver.go
@@ -0,0 +1,1169 @@
+package gps
+
+import (
+	"container/heap"
+	"fmt"
+	"log"
+	"os"
+	"sort"
+	"strings"
+
+	"github.com/armon/go-radix"
+)
+
+var rootRev = Revision("")
+
+// SolveParameters hold all arguments to a solver run.
+//
+// Only RootDir and ImportRoot are absolutely required. A nil Manifest is
+// allowed, though it usually makes little sense.
+//
+// Of these properties, only Manifest and Ignore are (directly) incorporated in
+// memoization hashing.
+type SolveParameters struct {
+	// The path to the root of the project on which the solver should operate.
+	// This should point to the directory that should contain the vendor/
+	// directory.
+	//
+	// In general, it is wise for this to be under an active GOPATH, though it
+	// is not (currently) required.
+	//
+	// A real path to a readable directory is required.
+	RootDir string
+
+	// The tree of packages that comprise the root project, as well as the
+	// import path that should identify the root of that tree.
+	//
+	// In most situations, tools should simply pass the result of ListPackages()
+	// directly through here.
+	//
+	// The ImportRoot property must be a non-empty string, and at least one
+	// element must be present in the Packages map.
+	RootPackageTree PackageTree
+
+	// The root manifest. This contains all the dependency constraints
+	// associated with normal Manifests, as well as the particular controls
+	// afforded only to the root project.
+	//
+	// May be nil, but for most cases, that would be unwise.
+	Manifest RootManifest
+
+	// The root lock. Optional. Generally, this lock is the output of a previous
+	// solve run.
+	//
+	// If provided, the solver will attempt to preserve the versions specified
+	// in the lock, unless ToChange or ChangeAll settings indicate otherwise.
+	Lock Lock
+
+	// ToChange is a list of project names that should be changed - that is, any
+	// versions specified for those projects in the root lock file should be
+	// ignored.
+	//
+	// Passing ChangeAll has subtly different behavior from enumerating all
+	// projects into ToChange. In general, ToChange should *only* be used if the
+	// user expressly requested an upgrade for a specific project.
+	ToChange []ProjectRoot
+
+	// ChangeAll indicates that all projects should be changed - that is, any
+	// versions specified in the root lock file should be ignored.
+	ChangeAll bool
+
+	// Downgrade indicates whether the solver will attempt to upgrade (false) or
+	// downgrade (true) projects that are not locked, or are marked for change.
+	//
+	// Upgrading is, by far, the most typical case. The field is named
+	// 'Downgrade' so that the bool's zero value corresponds to that most
+	// typical case.
+	Downgrade bool
+
+	// Trace controls whether the solver will generate informative trace output
+	// as it moves through the solving process.
+	Trace bool
+
+	// TraceLogger is the logger to use for generating trace output. If Trace is
+	// true but no logger is provided, solving will result in an error.
+	TraceLogger *log.Logger
+}
+
+// solver is a CDCL-style constraint solver with satisfiability conditions
+// hardcoded to the needs of the Go package management problem space.
+type solver struct {
+	// The current number of attempts made over the course of this solve. This
+	// number increments each time the algorithm completes a backtrack and
+	// starts moving forward again.
+	attempts int
+
+	// SolveParameters are the inputs to the solver. They determine both what
+	// data the solver should operate on, and certain aspects of how solving
+	// proceeds.
+	//
+	// Prepare() validates these, so by the time we have a *solver instance, we
+	// know they're valid.
+	params SolveParameters
+
+	// Logger used exclusively for trace output, if the trace option is set.
+	tl *log.Logger
+
+	// A bridge to the standard SourceManager. The adapter does some local
+	// caching of pre-sorted version lists, as well as translation between the
+	// full-on ProjectIdentifiers that the solver deals with and the simplified
+	// names a SourceManager operates on.
+	b sourceBridge
+
+	// A stack containing projects and packages that are currently "selected" -
+	// that is, they have passed all satisfiability checks, and are part of the
+	// current solution.
+	//
+	// The *selection type is mostly just a dumb data container; the solver
+	// itself is responsible for maintaining that invariant.
+	sel *selection
+
+	// The current list of projects that we need to incorporate into the solution in
+	// order for the solution to be complete. This list is implemented as a
+	// priority queue that places projects least likely to induce errors at the
+	// front, in order to minimize the amount of backtracking required to find a
+	// solution.
+	//
+	// Entries are added to and removed from this list by the solver at the same
+	// time that the selected queue is updated, either with an addition or
+	// removal.
+	unsel *unselected
+
+	// Map of packages to ignore. Derived by converting SolveParameters.Ignore
+	// into a map during solver prep - which also, nicely, deduplicates it.
+	ig map[string]bool
+
+	// A stack of all the currently active versionQueues in the solver. The set
+	// of projects represented here corresponds closely to what's in s.sel,
+	// although s.sel will always contain the root project, and s.vqs never
+	// will. Also, s.vqs is only added to (or popped from during backtracking)
+	// when a new project is selected; it is untouched when new packages are
+	// added to an existing project.
+	vqs []*versionQueue
+
+	// A map of the ProjectRoot (local names) that should be allowed to change
+	chng map[ProjectRoot]struct{}
+
+	// A ProjectConstraints map containing the validated (guaranteed non-empty)
+	// overrides declared by the root manifest.
+	ovr ProjectConstraints
+
+	// A map of the project names listed in the root's lock.
+	rlm map[ProjectRoot]LockedProject
+
+	// A defensively-copied instance of the root manifest.
+	rm Manifest
+
+	// A defensively-copied instance of the root lock.
+	rl Lock
+
+	// A defensively-copied instance of params.RootPackageTree
+	rpt PackageTree
+}
+
+// A Solver is the main workhorse of gps: given a set of project inputs, it
+// performs a constraint solving analysis to develop a complete Solution, or
+// else fail with an informative error.
+//
+// If a Solution is found, an implementing tool may persist it - typically into
+// a "lock file" - and/or use it to write out a directory tree of dependencies,
+// suitable to be a vendor directory, via CreateVendorTree.
+type Solver interface {
+	// HashInputs produces a hash digest representing the unique inputs to this
+	// solver. It is guaranteed that, if the hash digest is equal to the digest
+	// from a previous Solution.InputHash(), that that Solution is valid for
+	// this Solver's inputs.
+	//
+	// In such a case, it may not be necessary to run Solve() at all.
+	HashInputs() []byte
+
+	// Solve initiates a solving run. It will either complete successfully with
+	// a Solution, or fail with an informative error.
+	Solve() (Solution, error)
+}
+
+// Prepare readies a Solver for use.
+//
+// This function reads and validates the provided SolveParameters. If a problem
+// with the inputs is detected, an error is returned. Otherwise, a Solver is
+// returned, ready to hash and check inputs or perform a solving run.
+func Prepare(params SolveParameters, sm SourceManager) (Solver, error) {
+	if sm == nil {
+		return nil, badOptsFailure("must provide non-nil SourceManager")
+	}
+	if params.RootDir == "" {
+		return nil, badOptsFailure("params must specify a non-empty root directory")
+	}
+	if params.RootPackageTree.ImportRoot == "" {
+		return nil, badOptsFailure("params must include a non-empty import root")
+	}
+	if len(params.RootPackageTree.Packages) == 0 {
+		return nil, badOptsFailure("at least one package must be present in the PackageTree")
+	}
+	if params.Trace && params.TraceLogger == nil {
+		return nil, badOptsFailure("trace requested, but no logger provided")
+	}
+
+	if params.Manifest == nil {
+		params.Manifest = simpleRootManifest{}
+	}
+
+	s := &solver{
+		params: params,
+		ig:     params.Manifest.IgnorePackages(),
+		ovr:    params.Manifest.Overrides(),
+		tl:     params.TraceLogger,
+		rpt:    params.RootPackageTree.dup(),
+	}
+
+	// Ensure the ignore and overrides maps are at least initialized
+	if s.ig == nil {
+		s.ig = make(map[string]bool)
+	}
+	if s.ovr == nil {
+		s.ovr = make(ProjectConstraints)
+	}
+
+	// Validate no empties in the overrides map
+	var eovr []string
+	for pr, pp := range s.ovr {
+		if pp.Constraint == nil && pp.NetworkName == "" {
+			eovr = append(eovr, string(pr))
+		}
+	}
+
+	if eovr != nil {
+		// Maybe it's a little nitpicky to do this (we COULD proceed; empty
+		// overrides have no effect), but this errs on the side of letting the
+		// tool/user know there's bad input. Purely as a principle, that seems
+		// preferable to silently allowing progress with icky input.
+		if len(eovr) > 1 {
+			return nil, badOptsFailure(fmt.Sprintf("Overrides lacked any non-zero properties for multiple project roots: %s", strings.Join(eovr, " ")))
+		}
+		return nil, badOptsFailure(fmt.Sprintf("An override was declared for %s, but without any non-zero properties", eovr[0]))
+	}
+
+	// Set up the bridge and ensure the root dir is in good, working order
+	// before doing anything else. (This call is stubbed out in tests, via
+	// overriding mkBridge(), so we can run with virtual RootDir.)
+	s.b = mkBridge(s, sm)
+	err := s.b.verifyRootDir(s.params.RootDir)
+	if err != nil {
+		return nil, err
+	}
+
+	// Initialize maps
+	s.chng = make(map[ProjectRoot]struct{})
+	s.rlm = make(map[ProjectRoot]LockedProject)
+
+	for _, v := range s.params.ToChange {
+		s.chng[v] = struct{}{}
+	}
+
+	// Initialize stacks and queues
+	s.sel = &selection{
+		deps: make(map[ProjectRoot][]dependency),
+		sm:   s.b,
+	}
+	s.unsel = &unselected{
+		sl:  make([]bimodalIdentifier, 0),
+		cmp: s.unselectedComparator,
+	}
+
+	// Prep safe, normalized versions of root manifest and lock data
+	s.rm = prepManifest(s.params.Manifest)
+	if s.params.Lock != nil {
+		for _, lp := range s.params.Lock.Projects() {
+			s.rlm[lp.Ident().ProjectRoot] = lp
+		}
+
+		// Also keep a prepped one, mostly for the bridge. This is probably
+		// wasteful, but only minimally so, and yay symmetry
+		s.rl = prepLock(s.params.Lock)
+	}
+
+	return s, nil
+}
+
+// Solve attempts to find a dependency solution for the given project, as
+// represented by the SolveParameters with which this Solver was created.
+//
+// This is the entry point to the main gps workhorse.
+func (s *solver) Solve() (Solution, error) {
+	// Prime the queues with the root project
+	err := s.selectRoot()
+	if err != nil {
+		return nil, err
+	}
+
+	all, err := s.solve()
+
+	var soln solution
+	if err == nil {
+		soln = solution{
+			att: s.attempts,
+		}
+
+		soln.hd = s.HashInputs()
+
+		// Convert ProjectAtoms into LockedProjects
+		soln.p = make([]LockedProject, len(all))
+		k := 0
+		for pa, pl := range all {
+			soln.p[k] = pa2lp(pa, pl)
+			k++
+		}
+	}
+
+	s.traceFinish(soln, err)
+	return soln, err
+}
+
+// solve is the top-level loop for the solving process.
+func (s *solver) solve() (map[atom]map[string]struct{}, error) {
+	// Main solving loop
+	for {
+		bmi, has := s.nextUnselected()
+
+		if !has {
+			// no more packages to select - we're done.
+			break
+		}
+
+		// This split is the heart of "bimodal solving": we follow different
+		// satisfiability and selection paths depending on whether we've already
+		// selected the base project/repo that came off the unselected queue.
+		//
+		// (If we've already selected the project, other parts of the algorithm
+		// guarantee the bmi will contain at least one package from this project
+		// that has yet to be selected.)
+		if awp, is := s.sel.selected(bmi.id); !is {
+			// Analysis path for when we haven't selected the project yet - need
+			// to create a version queue.
+			queue, err := s.createVersionQueue(bmi)
+			if err != nil {
+				// Err means a failure somewhere down the line; try backtracking.
+				s.traceStartBacktrack(bmi, err, false)
+				//s.traceBacktrack(bmi, false)
+				if s.backtrack() {
+					// backtracking succeeded, move to the next unselected id
+					continue
+				}
+				return nil, err
+			}
+
+			if queue.current() == nil {
+				panic("canary - queue is empty, but flow indicates success")
+			}
+
+			awp := atomWithPackages{
+				a: atom{
+					id: queue.id,
+					v:  queue.current(),
+				},
+				pl: bmi.pl,
+			}
+			s.selectAtom(awp, false)
+			s.vqs = append(s.vqs, queue)
+		} else {
+			// We're just trying to add packages to an already-selected project.
+			// That means it's not OK to burn through the version queue for that
+			// project as we do when first selecting a project, as doing so
+			// would upend the guarantees on which all previous selections of
+			// the project are based (both the initial one, and any package-only
+			// ones).
+
+			// Because we can only safely operate within the scope of the
+			// single, currently selected version, we can skip looking for the
+			// queue and just use the version given in what came back from
+			// s.sel.selected().
+			nawp := atomWithPackages{
+				a: atom{
+					id: bmi.id,
+					v:  awp.a.v,
+				},
+				pl: bmi.pl,
+			}
+
+			s.traceCheckPkgs(bmi)
+			err := s.check(nawp, true)
+			if err != nil {
+				// Err means a failure somewhere down the line; try backtracking.
+				s.traceStartBacktrack(bmi, err, true)
+				if s.backtrack() {
+					// backtracking succeeded, move to the next unselected id
+					continue
+				}
+				return nil, err
+			}
+			s.selectAtom(nawp, true)
+			// We don't add anything to the stack of version queues because the
+			// backtracker knows not to pop the vqstack if it backtracks
+			// across a pure-package addition.
+		}
+	}
+
+	// Getting this far means we successfully found a solution. Combine the
+	// selected projects and packages.
+	projs := make(map[atom]map[string]struct{})
+
+	// Skip the first project. It's always the root, and that shouldn't be
+	// included in results.
+	for _, sel := range s.sel.projects[1:] {
+		pm, exists := projs[sel.a.a]
+		if !exists {
+			pm = make(map[string]struct{})
+			projs[sel.a.a] = pm
+		}
+
+		for _, path := range sel.a.pl {
+			pm[path] = struct{}{}
+		}
+	}
+	return projs, nil
+}
+
+// selectRoot is a specialized selectAtom, used solely to initially
+// populate the queues at the beginning of a solve run.
+func (s *solver) selectRoot() error {
+	pa := atom{
+		id: ProjectIdentifier{
+			ProjectRoot: ProjectRoot(s.rpt.ImportRoot),
+		},
+		// This is a hack so that the root project doesn't have a nil version.
+		// It's sort of OK because the root never makes it out into the results.
+		// We may need a more elegant solution if we discover other side
+		// effects, though.
+		v: rootRev,
+	}
+
+	list := make([]string, len(s.rpt.Packages))
+	k := 0
+	for path, pkg := range s.rpt.Packages {
+		if pkg.Err != nil {
+			list[k] = path
+			k++
+		}
+	}
+	list = list[:k]
+	sort.Strings(list)
+
+	a := atomWithPackages{
+		a:  pa,
+		pl: list,
+	}
+
+	// Push the root project onto the queue.
+	// TODO(sdboyer) maybe it'd just be better to skip this?
+	s.sel.pushSelection(a, true)
+
+	// If we're looking for root's deps, get it from opts and local root
+	// analysis, rather than having the sm do it
+	c, tc := s.rm.DependencyConstraints(), s.rm.TestDependencyConstraints()
+	mdeps := s.ovr.overrideAll(pcSliceToMap(c, tc).asSortedSlice())
+
+	// Err is not possible at this point, as it could only come from
+	// listPackages(), which if we're here already succeeded for root
+	reach := s.rpt.ExternalReach(true, true, s.ig).ListExternalImports()
+
+	deps, err := s.intersectConstraintsWithImports(mdeps, reach)
+	if err != nil {
+		// TODO(sdboyer) this could well happen; handle it with a more graceful error
+		panic(fmt.Sprintf("shouldn't be possible %s", err))
+	}
+
+	for _, dep := range deps {
+		// If we have no lock, or if this dep isn't in the lock, then prefetch
+		// it. See longer explanation in selectRoot() for how we benefit from
+		// parallelism here.
+		if _, has := s.rlm[dep.Ident.ProjectRoot]; !has {
+			go s.b.SyncSourceFor(dep.Ident)
+		}
+
+		s.sel.pushDep(dependency{depender: pa, dep: dep})
+		// Add all to unselected queue
+		heap.Push(s.unsel, bimodalIdentifier{id: dep.Ident, pl: dep.pl, fromRoot: true})
+	}
+
+	s.traceSelectRoot(s.rpt, deps)
+	return nil
+}
+
+func (s *solver) getImportsAndConstraintsOf(a atomWithPackages) ([]completeDep, error) {
+	var err error
+
+	if ProjectRoot(s.rpt.ImportRoot) == a.a.id.ProjectRoot {
+		panic("Should never need to recheck imports/constraints from root during solve")
+	}
+
+	// Work through the source manager to get project info and static analysis
+	// information.
+	m, _, err := s.b.GetManifestAndLock(a.a.id, a.a.v)
+	if err != nil {
+		return nil, err
+	}
+
+	ptree, err := s.b.ListPackages(a.a.id, a.a.v)
+	if err != nil {
+		return nil, err
+	}
+
+	allex := ptree.ExternalReach(false, false, s.ig)
+	// Use a map to dedupe the unique external packages
+	exmap := make(map[string]struct{})
+	// Add to the list those packages that are reached by the packages
+	// explicitly listed in the atom
+	for _, pkg := range a.pl {
+		expkgs, exists := allex[pkg]
+		if !exists {
+			// missing package here *should* only happen if the target pkg was
+			// poisoned somehow - check the original ptree.
+			if perr, exists := ptree.Packages[pkg]; exists {
+				if perr.Err != nil {
+					return nil, fmt.Errorf("package %s has errors: %s", pkg, perr.Err)
+				}
+				return nil, fmt.Errorf("package %s depends on some other package within %s with errors", pkg, a.a.id.errString())
+			}
+			// Nope, it's actually not there. This shouldn't happen.
+			return nil, fmt.Errorf("package %s does not exist within project %s", pkg, a.a.id.errString())
+		}
+
+		for _, ex := range expkgs {
+			exmap[ex] = struct{}{}
+		}
+	}
+
+	reach := make([]string, len(exmap))
+	k := 0
+	for pkg := range exmap {
+		reach[k] = pkg
+		k++
+	}
+	sort.Strings(reach)
+
+	deps := s.ovr.overrideAll(m.DependencyConstraints())
+	return s.intersectConstraintsWithImports(deps, reach)
+}
+
+// intersectConstraintsWithImports takes a list of constraints and a list of
+// externally reached packages, and creates a []completeDep that is guaranteed
+// to include all packages named by import reach, using constraints where they
+// are available, or Any() where they are not.
+func (s *solver) intersectConstraintsWithImports(deps []workingConstraint, reach []string) ([]completeDep, error) {
+	// Create a radix tree with all the projects we know from the manifest
+	// TODO(sdboyer) make this smarter once we allow non-root inputs as 'projects'
+	xt := radix.New()
+	for _, dep := range deps {
+		xt.Insert(string(dep.Ident.ProjectRoot), dep)
+	}
+
+	// Step through the reached packages; if they have prefix matches in
+	// the trie, assume (mostly) it's a correct correspondence.
+	dmap := make(map[ProjectRoot]completeDep)
+	for _, rp := range reach {
+		// If it's a stdlib package, skip it.
+		// TODO(sdboyer) this just hardcodes us to the packages in tip - should we
+		// have go version magic here, too?
+		if stdlib[rp] {
+			continue
+		}
+
+		// Look for a prefix match; it'll be the root project/repo containing
+		// the reached package
+		if pre, idep, match := xt.LongestPrefix(rp); match {
+			if isPathPrefixOrEqual(pre, rp) {
+				// Match is valid; put it in the dmap, either creating a new
+				// completeDep or appending it to the existing one for this base
+				// project/prefix.
+				dep := idep.(workingConstraint)
+				if cdep, exists := dmap[dep.Ident.ProjectRoot]; exists {
+					cdep.pl = append(cdep.pl, rp)
+					dmap[dep.Ident.ProjectRoot] = cdep
+				} else {
+					dmap[dep.Ident.ProjectRoot] = completeDep{
+						workingConstraint: dep,
+						pl:                []string{rp},
+					}
+				}
+				continue
+			}
+		}
+
+		// No match. Let the SourceManager try to figure out the root
+		root, err := s.b.DeduceProjectRoot(rp)
+		if err != nil {
+			// Nothing we can do if we can't suss out a root
+			return nil, err
+		}
+
+		// Make a new completeDep with an open constraint, respecting overrides
+		pd := s.ovr.override(ProjectConstraint{
+			Ident: ProjectIdentifier{
+				ProjectRoot: root,
+			},
+			Constraint: Any(),
+		})
+
+		// Insert the pd into the trie so that further deps from this
+		// project get caught by the prefix search
+		xt.Insert(string(root), pd)
+		// And also put the complete dep into the dmap
+		dmap[root] = completeDep{
+			workingConstraint: pd,
+			pl:                []string{rp},
+		}
+	}
+
+	// Dump all the deps from the map into the expected return slice
+	cdeps := make([]completeDep, len(dmap))
+	k := 0
+	for _, cdep := range dmap {
+		cdeps[k] = cdep
+		k++
+	}
+
+	return cdeps, nil
+}
+
+func (s *solver) createVersionQueue(bmi bimodalIdentifier) (*versionQueue, error) {
+	id := bmi.id
+	// If on the root package, there's no queue to make
+	if ProjectRoot(s.rpt.ImportRoot) == id.ProjectRoot {
+		return newVersionQueue(id, nil, nil, s.b)
+	}
+
+	exists, err := s.b.SourceExists(id)
+	if err != nil {
+		return nil, err
+	}
+	if !exists {
+		exists, err = s.b.vendorCodeExists(id)
+		if err != nil {
+			return nil, err
+		}
+		if exists {
+			// Project exists only in vendor (and in some manifest somewhere)
+			// TODO(sdboyer) mark this for special handling, somehow?
+		} else {
+			return nil, fmt.Errorf("Project '%s' could not be located.", id)
+		}
+	}
+
+	var lockv Version
+	if len(s.rlm) > 0 {
+		lockv, err = s.getLockVersionIfValid(id)
+		if err != nil {
+			// Can only get an error here if an upgrade was expressly requested on
+			// code that exists only in vendor
+			return nil, err
+		}
+	}
+
+	var prefv Version
+	if bmi.fromRoot {
+		// If this bmi came from the root, then we want to search through things
+		// with a dependency on it in order to see if any have a lock that might
+		// express a prefv
+		//
+		// TODO(sdboyer) nested loop; prime candidate for a cache somewhere
+		for _, dep := range s.sel.getDependenciesOn(bmi.id) {
+			// Skip the root, of course
+			if ProjectRoot(s.rpt.ImportRoot) == dep.depender.id.ProjectRoot {
+				continue
+			}
+
+			_, l, err := s.b.GetManifestAndLock(dep.depender.id, dep.depender.v)
+			if err != nil || l == nil {
+				// err being non-nil really shouldn't be possible, but the lock
+				// being nil is quite likely
+				continue
+			}
+
+			for _, lp := range l.Projects() {
+				if lp.Ident().eq(bmi.id) {
+					prefv = lp.Version()
+				}
+			}
+		}
+
+		// OTHER APPROACH - WRONG, BUT MAYBE USEFUL FOR REFERENCE?
+		// If this bmi came from the root, then we want to search the unselected
+		// queue to see if anything *else* wants this ident, in which case we
+		// pick up that prefv
+		//for _, bmi2 := range s.unsel.sl {
+		//// Take the first thing from the queue that's for the same ident,
+		//// and has a non-nil prefv
+		//if bmi.id.eq(bmi2.id) {
+		//if bmi2.prefv != nil {
+		//prefv = bmi2.prefv
+		//}
+		//}
+		//}
+
+	} else {
+		// Otherwise, just use the preferred version expressed in the bmi
+		prefv = bmi.prefv
+	}
+
+	q, err := newVersionQueue(id, lockv, prefv, s.b)
+	if err != nil {
+		// TODO(sdboyer) this particular err case needs to be improved to be ONLY for cases
+		// where there's absolutely nothing findable about a given project name
+		return nil, err
+	}
+
+	// Hack in support for revisions.
+	//
+	// By design, revs aren't returned from ListVersion(). Thus, if the dep in
+	// the bmi was has a rev constraint, it is (almost) guaranteed to fail, even
+	// if that rev does exist in the repo. So, detect a rev and push it into the
+	// vq here, instead.
+	//
+	// Happily, the solver maintains the invariant that constraints on a given
+	// ident cannot be incompatible, so we know that if we find one rev, then
+	// any other deps will have to also be on that rev (or Any).
+	//
+	// TODO(sdboyer) while this does work, it bypasses the interface-implied guarantees
+	// of the version queue, and is therefore not a great strategy for API
+	// coherency. Folding this in to a formal interface would be better.
+	switch tc := s.sel.getConstraint(bmi.id).(type) {
+	case Revision:
+		// We know this is the only thing that could possibly match, so put it
+		// in at the front - if it isn't there already.
+		if q.pi[0] != tc {
+			// Existence of the revision is guaranteed by checkRevisionExists().
+			q.pi = append([]Version{tc}, q.pi...)
+		}
+	}
+
+	// Having assembled the queue, search it for a valid version.
+	s.traceCheckQueue(q, bmi, false, 1)
+	return q, s.findValidVersion(q, bmi.pl)
+}
+
+// findValidVersion walks through a versionQueue until it finds a version that
+// satisfies the constraints held in the current state of the solver.
+//
+// The satisfiability checks triggered from here are constrained to operate only
+// on those dependencies induced by the list of packages given in the second
+// parameter.
+func (s *solver) findValidVersion(q *versionQueue, pl []string) error {
+	if nil == q.current() {
+		// this case should not be reachable, but reflects improper solver state
+		// if it is, so panic immediately
+		panic("version queue is empty, should not happen")
+	}
+
+	faillen := len(q.fails)
+
+	for {
+		cur := q.current()
+		s.traceInfo("try %s@%s", q.id.errString(), cur)
+		err := s.check(atomWithPackages{
+			a: atom{
+				id: q.id,
+				v:  cur,
+			},
+			pl: pl,
+		}, false)
+		if err == nil {
+			// we have a good version, can return safely
+			return nil
+		}
+
+		if q.advance(err) != nil {
+			// Error on advance, have to bail out
+			break
+		}
+		if q.isExhausted() {
+			// Queue is empty, bail with error
+			break
+		}
+	}
+
+	s.fail(s.sel.getDependenciesOn(q.id)[0].depender.id)
+
+	// Return a compound error of all the new errors encountered during this
+	// attempt to find a new, valid version
+	return &noVersionError{
+		pn:    q.id,
+		fails: q.fails[faillen:],
+	}
+}
+
+// getLockVersionIfValid finds an atom for the given ProjectIdentifier from the
+// root lock, assuming:
+//
+// 1. A root lock was provided
+// 2. The general flag to change all projects was not passed
+// 3. A flag to change this particular ProjectIdentifier was not passed
+//
+// If any of these three conditions are true (or if the id cannot be found in
+// the root lock), then no atom will be returned.
+func (s *solver) getLockVersionIfValid(id ProjectIdentifier) (Version, error) {
+	// If the project is specifically marked for changes, then don't look for a
+	// locked version.
+	if _, explicit := s.chng[id.ProjectRoot]; explicit || s.params.ChangeAll {
+		// For projects with an upstream or cache repository, it's safe to
+		// ignore what's in the lock, because there's presumably more versions
+		// to be found and attempted in the repository. If it's only in vendor,
+		// though, then we have to try to use what's in the lock, because that's
+		// the only version we'll be able to get.
+		if exist, _ := s.b.SourceExists(id); exist {
+			// Upgrades mean breaking the lock
+			s.b.breakLock()
+			return nil, nil
+		}
+
+		// However, if a change was *expressly* requested for something that
+		// exists only in vendor, then that guarantees we don't have enough
+		// information to complete a solution. In that case, error out.
+		if explicit {
+			return nil, &missingSourceFailure{
+				goal: id,
+				prob: "Cannot upgrade %s, as no source repository could be found.",
+			}
+		}
+	}
+
+	lp, exists := s.rlm[id.ProjectRoot]
+	if !exists {
+		return nil, nil
+	}
+
+	constraint := s.sel.getConstraint(id)
+	v := lp.Version()
+	if !constraint.Matches(v) {
+		var found bool
+		if tv, ok := v.(Revision); ok {
+			// If we only have a revision from the root's lock, allow matching
+			// against other versions that have that revision
+			for _, pv := range s.b.pairRevision(id, tv) {
+				if constraint.Matches(pv) {
+					v = pv
+					found = true
+					break
+				}
+			}
+			//} else if _, ok := constraint.(Revision); ok {
+			//// If the current constraint is itself a revision, and the lock gave
+			//// an unpaired version, see if they match up
+			////
+			//if u, ok := v.(UnpairedVersion); ok {
+			//pv := s.sm.pairVersion(id, u)
+			//if constraint.Matches(pv) {
+			//v = pv
+			//found = true
+			//}
+			//}
+		}
+
+		if !found {
+			// No match found, which means we're going to be breaking the lock
+			s.b.breakLock()
+			return nil, nil
+		}
+	}
+
+	return v, nil
+}
+
+// backtrack works backwards from the current failed solution to find the next
+// solution to try.
+func (s *solver) backtrack() bool {
+	if len(s.vqs) == 0 {
+		// nothing to backtrack to
+		return false
+	}
+
+	for {
+		for {
+			if len(s.vqs) == 0 {
+				// no more versions, nowhere further to backtrack
+				return false
+			}
+			if s.vqs[len(s.vqs)-1].failed {
+				break
+			}
+
+			s.vqs, s.vqs[len(s.vqs)-1] = s.vqs[:len(s.vqs)-1], nil
+
+			// Pop selections off until we get to a project.
+			var proj bool
+			var awp atomWithPackages
+			for !proj {
+				awp, proj = s.unselectLast()
+				s.traceBacktrack(awp.bmi(), !proj)
+			}
+		}
+
+		// Grab the last versionQueue off the list of queues
+		q := s.vqs[len(s.vqs)-1]
+
+		// Walk back to the next project
+		awp, proj := s.unselectLast()
+		if !proj {
+			panic("canary - *should* be impossible to have a pkg-only selection here")
+		}
+
+		if !q.id.eq(awp.a.id) {
+			panic("canary - version queue stack and selected project stack are misaligned")
+		}
+
+		// Advance the queue past the current version, which we know is bad
+		// TODO(sdboyer) is it feasible to make available the failure reason here?
+		if q.advance(nil) == nil && !q.isExhausted() {
+			// Search for another acceptable version of this failed dep in its queue
+			s.traceCheckQueue(q, awp.bmi(), true, 0)
+			if s.findValidVersion(q, awp.pl) == nil {
+				// Found one! Put it back on the selected queue and stop
+				// backtracking
+
+				// reusing the old awp is fine
+				awp.a.v = q.current()
+				s.selectAtom(awp, false)
+				break
+			}
+		}
+
+		s.traceBacktrack(awp.bmi(), false)
+		//s.traceInfo("no more versions of %s, backtracking", q.id.errString())
+
+		// No solution found; continue backtracking after popping the queue
+		// we just inspected off the list
+		// GC-friendly pop pointer elem in slice
+		s.vqs, s.vqs[len(s.vqs)-1] = s.vqs[:len(s.vqs)-1], nil
+	}
+
+	// Backtracking was successful if loop ended before running out of versions
+	if len(s.vqs) == 0 {
+		return false
+	}
+	s.attempts++
+	return true
+}
+
+func (s *solver) nextUnselected() (bimodalIdentifier, bool) {
+	if len(s.unsel.sl) > 0 {
+		return s.unsel.sl[0], true
+	}
+
+	return bimodalIdentifier{}, false
+}
+
+func (s *solver) unselectedComparator(i, j int) bool {
+	ibmi, jbmi := s.unsel.sl[i], s.unsel.sl[j]
+	iname, jname := ibmi.id, jbmi.id
+
+	// Most important thing is pushing package additions ahead of project
+	// additions. Package additions can't walk their version queue, so all they
+	// do is narrow the possibility of success; better to find out early and
+	// fast if they're going to fail than wait until after we've done real work
+	// on a project and have to backtrack across it.
+
+	// FIXME the impl here is currently O(n) in the number of selections; it
+	// absolutely cannot stay in a hot sorting path like this
+	// FIXME while other solver invariants probably protect us from it, this
+	// call-out means that it's possible for external state change to invalidate
+	// heap invariants.
+	_, isel := s.sel.selected(iname)
+	_, jsel := s.sel.selected(jname)
+
+	if isel && !jsel {
+		return true
+	}
+	if !isel && jsel {
+		return false
+	}
+
+	if iname.eq(jname) {
+		return false
+	}
+
+	_, ilock := s.rlm[iname.ProjectRoot]
+	_, jlock := s.rlm[jname.ProjectRoot]
+
+	switch {
+	case ilock && !jlock:
+		return true
+	case !ilock && jlock:
+		return false
+	case ilock && jlock:
+		return iname.less(jname)
+	}
+
+	// Now, sort by number of available versions. This will trigger network
+	// activity, but at this point we know that the project we're looking at
+	// isn't locked by the root. And, because being locked by root is the only
+	// way avoid that call when making a version queue, we know we're gonna have
+	// to pay that cost anyway.
+
+	// We can safely ignore an err from ListVersions here because, if there is
+	// an actual problem, it'll be noted and handled somewhere else saner in the
+	// solving algorithm.
+	ivl, _ := s.b.ListVersions(iname)
+	jvl, _ := s.b.ListVersions(jname)
+	iv, jv := len(ivl), len(jvl)
+
+	// Packages with fewer versions to pick from are less likely to benefit from
+	// backtracking, so deal with them earlier in order to minimize the amount
+	// of superfluous backtracking through them we do.
+	switch {
+	case iv == 0 && jv != 0:
+		return true
+	case iv != 0 && jv == 0:
+		return false
+	case iv != jv:
+		return iv < jv
+	}
+
+	// Finally, if all else fails, fall back to comparing by name
+	return iname.less(jname)
+}
+
+func (s *solver) fail(id ProjectIdentifier) {
+	// TODO(sdboyer) does this need updating, now that we have non-project package
+	// selection?
+
+	// skip if the root project
+	if ProjectRoot(s.rpt.ImportRoot) != id.ProjectRoot {
+		// just look for the first (oldest) one; the backtracker will necessarily
+		// traverse through and pop off any earlier ones
+		for _, vq := range s.vqs {
+			if vq.id.eq(id) {
+				vq.failed = true
+				return
+			}
+		}
+	}
+}
+
+// selectAtom pulls an atom into the selection stack, alongside some of
+// its contained packages. New resultant dependency requirements are added to
+// the unselected priority queue.
+//
+// Behavior is slightly diffferent if pkgonly is true.
+func (s *solver) selectAtom(a atomWithPackages, pkgonly bool) {
+	s.unsel.remove(bimodalIdentifier{
+		id: a.a.id,
+		pl: a.pl,
+	})
+
+	s.sel.pushSelection(a, pkgonly)
+
+	deps, err := s.getImportsAndConstraintsOf(a)
+	if err != nil {
+		// This shouldn't be possible; other checks should have ensured all
+		// packages and deps are present for any argument passed to this method.
+		panic(fmt.Sprintf("canary - shouldn't be possible %s", err))
+	}
+
+	// If this atom has a lock, pull it out so that we can potentially inject
+	// preferred versions into any bmis we enqueue
+	//
+	// TODO(sdboyer) making this call here could be the first thing to trigger
+	// network activity...maybe? if so, can we mitigate by deferring the work to
+	// queue consumption time?
+	_, l, _ := s.b.GetManifestAndLock(a.a.id, a.a.v)
+	var lmap map[ProjectIdentifier]Version
+	if l != nil {
+		lmap = make(map[ProjectIdentifier]Version)
+		for _, lp := range l.Projects() {
+			lmap[lp.Ident()] = lp.Version()
+		}
+	}
+
+	for _, dep := range deps {
+		// If this is dep isn't in the lock, do some prefetching. (If it is, we
+		// might be able to get away with zero network activity for it, so don't
+		// prefetch). This provides an opportunity for some parallelism wins, on
+		// two fronts:
+		//
+		// 1. Because this loop may have multiple deps in it, we could end up
+		// simultaneously fetching both in the background while solving proceeds
+		//
+		// 2. Even if only one dep gets prefetched here, the worst case is that
+		// that same dep comes out of the unselected queue next, and we gain a
+		// few microseconds before blocking later. Best case, the dep doesn't
+		// come up next, but some other dep comes up that wasn't prefetched, and
+		// both fetches proceed in parallel.
+		if _, has := s.rlm[dep.Ident.ProjectRoot]; !has {
+			go s.b.SyncSourceFor(dep.Ident)
+		}
+
+		s.sel.pushDep(dependency{depender: a.a, dep: dep})
+		// Go through all the packages introduced on this dep, selecting only
+		// the ones where the only depper on them is what the previous line just
+		// pushed in. Then, put those into the unselected queue.
+		rpm := s.sel.getRequiredPackagesIn(dep.Ident)
+		var newp []string
+		for _, pkg := range dep.pl {
+			// Just one means that the dep we're visiting is the sole importer.
+			if rpm[pkg] == 1 {
+				newp = append(newp, pkg)
+			}
+		}
+
+		if len(newp) > 0 {
+			bmi := bimodalIdentifier{
+				id: dep.Ident,
+				pl: newp,
+				// This puts in a preferred version if one's in the map, else
+				// drops in the zero value (nil)
+				prefv: lmap[dep.Ident],
+			}
+			heap.Push(s.unsel, bmi)
+		}
+	}
+
+	s.traceSelect(a, pkgonly)
+}
+
+func (s *solver) unselectLast() (atomWithPackages, bool) {
+	awp, first := s.sel.popSelection()
+	heap.Push(s.unsel, bimodalIdentifier{id: awp.a.id, pl: awp.pl})
+
+	deps, err := s.getImportsAndConstraintsOf(awp)
+	if err != nil {
+		// This shouldn't be possible; other checks should have ensured all
+		// packages and deps are present for any argument passed to this method.
+		panic(fmt.Sprintf("canary - shouldn't be possible %s", err))
+	}
+
+	for _, dep := range deps {
+		s.sel.popDep(dep.Ident)
+
+		// if no parents/importers, remove from unselected queue
+		if s.sel.depperCount(dep.Ident) == 0 {
+			s.unsel.remove(bimodalIdentifier{id: dep.Ident, pl: dep.pl})
+		}
+	}
+
+	return awp, first
+}
+
+// simple (temporary?) helper just to convert atoms into locked projects
+func pa2lp(pa atom, pkgs map[string]struct{}) LockedProject {
+	lp := LockedProject{
+		pi: pa.id,
+	}
+
+	switch v := pa.v.(type) {
+	case UnpairedVersion:
+		lp.v = v
+	case Revision:
+		lp.r = v
+	case versionPair:
+		lp.v = v.v
+		lp.r = v.r
+	default:
+		panic("unreachable")
+	}
+
+	for pkg := range pkgs {
+		lp.pkgs = append(lp.pkgs, strings.TrimPrefix(pkg, string(pa.id.ProjectRoot)+string(os.PathSeparator)))
+	}
+	sort.Strings(lp.pkgs)
+
+	return lp
+}
diff --git a/vendor/github.com/sdboyer/gps/source.go b/vendor/github.com/sdboyer/gps/source.go
new file mode 100644
index 0000000..81cb3be
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/source.go
@@ -0,0 +1,381 @@
+package gps
+
+import (
+	"fmt"
+	"sync"
+)
+
+type source interface {
+	syncLocal() error
+	checkExistence(sourceExistence) bool
+	exportVersionTo(Version, string) error
+	getManifestAndLock(ProjectRoot, Version) (Manifest, Lock, error)
+	listPackages(ProjectRoot, Version) (PackageTree, error)
+	listVersions() ([]Version, error)
+	revisionPresentIn(Revision) (bool, error)
+}
+
+type sourceMetaCache struct {
+	//Version  string                   // TODO(sdboyer) use this
+	infos  map[Revision]projectInfo
+	ptrees map[Revision]PackageTree
+	vMap   map[UnpairedVersion]Revision
+	rMap   map[Revision][]UnpairedVersion
+	// TODO(sdboyer) mutexes. actually probably just one, b/c complexity
+}
+
+// projectInfo holds manifest and lock
+type projectInfo struct {
+	Manifest
+	Lock
+}
+
+type existence struct {
+	// The existence levels for which a search/check has been performed
+	s sourceExistence
+
+	// The existence levels verified to be present through searching
+	f sourceExistence
+}
+
+func newMetaCache() *sourceMetaCache {
+	return &sourceMetaCache{
+		infos:  make(map[Revision]projectInfo),
+		ptrees: make(map[Revision]PackageTree),
+		vMap:   make(map[UnpairedVersion]Revision),
+		rMap:   make(map[Revision][]UnpairedVersion),
+	}
+}
+
+type baseVCSSource struct {
+	// Object for the cache repository
+	crepo *repo
+
+	// Indicates the extent to which we have searched for, and verified, the
+	// existence of the project/repo.
+	ex existence
+
+	// ProjectAnalyzer used to fulfill getManifestAndLock
+	an ProjectAnalyzer
+
+	// The project metadata cache. This is (or is intended to be) persisted to
+	// disk, for reuse across solver runs.
+	dc *sourceMetaCache
+
+	// lvfunc allows the other vcs source types that embed this type to inject
+	// their listVersions func into the baseSource, for use as needed.
+	lvfunc func() (vlist []Version, err error)
+
+	// lock to serialize access to syncLocal
+	synclock sync.Mutex
+
+	// Globalish flag indicating whether a "full" sync has been performed. Also
+	// used as a one-way gate to ensure that the full syncing routine is never
+	// run more than once on a given source instance.
+	allsync bool
+
+	// The error, if any, that occurred on syncLocal
+	syncerr error
+
+	// Whether the cache has the latest info on versions
+	cvsync bool
+}
+
+func (bs *baseVCSSource) getManifestAndLock(r ProjectRoot, v Version) (Manifest, Lock, error) {
+	if err := bs.ensureCacheExistence(); err != nil {
+		return nil, nil, err
+	}
+
+	rev, err := bs.toRevOrErr(v)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	// Return the info from the cache, if we already have it
+	if pi, exists := bs.dc.infos[rev]; exists {
+		return pi.Manifest, pi.Lock, nil
+	}
+
+	bs.crepo.mut.Lock()
+	if !bs.crepo.synced {
+		err = bs.crepo.r.Update()
+		if err != nil {
+			return nil, nil, fmt.Errorf("failed fetching latest updates with err: %s", err.Error())
+		}
+		bs.crepo.synced = true
+	}
+
+	// Always prefer a rev, if it's available
+	if pv, ok := v.(PairedVersion); ok {
+		err = bs.crepo.r.UpdateVersion(pv.Underlying().String())
+	} else {
+		err = bs.crepo.r.UpdateVersion(v.String())
+	}
+	bs.crepo.mut.Unlock()
+
+	if err != nil {
+		// TODO(sdboyer) More-er proper-er error
+		panic(fmt.Sprintf("canary - why is checkout/whatever failing: %s %s %s", bs.crepo.r.LocalPath(), v.String(), err))
+	}
+
+	bs.crepo.mut.RLock()
+	m, l, err := bs.an.DeriveManifestAndLock(bs.crepo.r.LocalPath(), r)
+	// TODO(sdboyer) cache results
+	bs.crepo.mut.RUnlock()
+
+	if err == nil {
+		if l != nil {
+			l = prepLock(l)
+		}
+
+		// If m is nil, prepManifest will provide an empty one.
+		pi := projectInfo{
+			Manifest: prepManifest(m),
+			Lock:     l,
+		}
+
+		bs.dc.infos[rev] = pi
+
+		return pi.Manifest, pi.Lock, nil
+	}
+
+	return nil, nil, err
+}
+
+// toRevision turns a Version into a Revision, if doing so is possible based on
+// the information contained in the version itself, or in the cache maps.
+func (dc *sourceMetaCache) toRevision(v Version) Revision {
+	switch t := v.(type) {
+	case Revision:
+		return t
+	case PairedVersion:
+		return t.Underlying()
+	case UnpairedVersion:
+		// This will return the empty rev (empty string) if we don't have a
+		// record of it. It's up to the caller to decide, for example, if
+		// it's appropriate to update the cache.
+		return dc.vMap[t]
+	default:
+		panic(fmt.Sprintf("Unknown version type %T", v))
+	}
+}
+
+// toUnpaired turns a Version into an UnpairedVersion, if doing so is possible
+// based on the information contained in the version itself, or in the cache
+// maps.
+//
+// If the input is a revision and multiple UnpairedVersions are associated with
+// it, whatever happens to be the first is returned.
+func (dc *sourceMetaCache) toUnpaired(v Version) UnpairedVersion {
+	switch t := v.(type) {
+	case UnpairedVersion:
+		return t
+	case PairedVersion:
+		return t.Unpair()
+	case Revision:
+		if upv, has := dc.rMap[t]; has && len(upv) > 0 {
+			return upv[0]
+		}
+		return nil
+	default:
+		panic(fmt.Sprintf("unknown version type %T", v))
+	}
+}
+
+func (bs *baseVCSSource) revisionPresentIn(r Revision) (bool, error) {
+	// First and fastest path is to check the data cache to see if the rev is
+	// present. This could give us false positives, but the cases where that can
+	// occur would require a type of cache staleness that seems *exceedingly*
+	// unlikely to occur.
+	if _, has := bs.dc.infos[r]; has {
+		return true, nil
+	} else if _, has := bs.dc.rMap[r]; has {
+		return true, nil
+	}
+
+	err := bs.ensureCacheExistence()
+	if err != nil {
+		return false, err
+	}
+
+	bs.crepo.mut.RLock()
+	defer bs.crepo.mut.RUnlock()
+	return bs.crepo.r.IsReference(string(r)), nil
+}
+
+func (bs *baseVCSSource) ensureCacheExistence() error {
+	// Technically, methods could could attempt to return straight from the
+	// metadata cache even if the repo cache doesn't exist on disk. But that
+	// would allow weird state inconsistencies (cache exists, but no repo...how
+	// does that even happen?) that it'd be better to just not allow so that we
+	// don't have to think about it elsewhere
+	if !bs.checkExistence(existsInCache) {
+		if bs.checkExistence(existsUpstream) {
+			bs.crepo.mut.Lock()
+			err := bs.crepo.r.Get()
+			bs.crepo.mut.Unlock()
+
+			if err != nil {
+				return fmt.Errorf("failed to create repository cache for %s with err:\n%s", bs.crepo.r.Remote(), err)
+			}
+			bs.crepo.synced = true
+			bs.ex.s |= existsInCache
+			bs.ex.f |= existsInCache
+		} else {
+			return fmt.Errorf("project %s does not exist upstream", bs.crepo.r.Remote())
+		}
+	}
+
+	return nil
+}
+
+// checkExistence provides a direct method for querying existence levels of the
+// source. It will only perform actual searching (local fs or over the network)
+// if no previous attempt at that search has been made.
+//
+// Note that this may perform read-ish operations on the cache repo, and it
+// takes a lock accordingly. This makes it unsafe to call from a segment where
+// the cache repo mutex is already write-locked, as deadlock will occur.
+func (bs *baseVCSSource) checkExistence(ex sourceExistence) bool {
+	if bs.ex.s&ex != ex {
+		if ex&existsInVendorRoot != 0 && bs.ex.s&existsInVendorRoot == 0 {
+			panic("should now be implemented in bridge")
+		}
+		if ex&existsInCache != 0 && bs.ex.s&existsInCache == 0 {
+			bs.crepo.mut.RLock()
+			bs.ex.s |= existsInCache
+			if bs.crepo.r.CheckLocal() {
+				bs.ex.f |= existsInCache
+			}
+			bs.crepo.mut.RUnlock()
+		}
+		if ex&existsUpstream != 0 && bs.ex.s&existsUpstream == 0 {
+			bs.crepo.mut.RLock()
+			bs.ex.s |= existsUpstream
+			if bs.crepo.r.Ping() {
+				bs.ex.f |= existsUpstream
+			}
+			bs.crepo.mut.RUnlock()
+		}
+	}
+
+	return ex&bs.ex.f == ex
+}
+
+// syncLocal ensures the local data we have about the source is fully up to date
+// with what's out there over the network.
+func (bs *baseVCSSource) syncLocal() error {
+	// Ensure we only have one goroutine doing this at a time
+	bs.synclock.Lock()
+	defer bs.synclock.Unlock()
+
+	// ...and that we only ever do it once
+	if bs.allsync {
+		// Return the stored err, if any
+		return bs.syncerr
+	}
+
+	bs.allsync = true
+	// First, ensure the local instance exists
+	bs.syncerr = bs.ensureCacheExistence()
+	if bs.syncerr != nil {
+		return bs.syncerr
+	}
+
+	_, bs.syncerr = bs.lvfunc()
+	if bs.syncerr != nil {
+		return bs.syncerr
+	}
+
+	// This case is really just for git repos, where the lvfunc doesn't
+	// guarantee that the local repo is synced
+	if !bs.crepo.synced {
+		bs.syncerr = bs.crepo.r.Update()
+		if bs.syncerr != nil {
+			return bs.syncerr
+		}
+		bs.crepo.synced = true
+	}
+
+	return nil
+}
+
+func (bs *baseVCSSource) listPackages(pr ProjectRoot, v Version) (ptree PackageTree, err error) {
+	if err = bs.ensureCacheExistence(); err != nil {
+		return
+	}
+
+	var r Revision
+	if r, err = bs.toRevOrErr(v); err != nil {
+		return
+	}
+
+	// Return the ptree from the cache, if we already have it
+	var exists bool
+	if ptree, exists = bs.dc.ptrees[r]; exists {
+		return
+	}
+
+	// Not in the cache; check out the version and do the analysis
+	bs.crepo.mut.Lock()
+	// Check out the desired version for analysis
+	if r != "" {
+		// Always prefer a rev, if it's available
+		err = bs.crepo.r.UpdateVersion(string(r))
+	} else {
+		// If we don't have a rev, ensure the repo is up to date, otherwise we
+		// could have a desync issue
+		if !bs.crepo.synced {
+			err = bs.crepo.r.Update()
+			if err != nil {
+				return PackageTree{}, fmt.Errorf("could not fetch latest updates into repository: %s", err)
+			}
+			bs.crepo.synced = true
+		}
+		err = bs.crepo.r.UpdateVersion(v.String())
+	}
+
+	ptree, err = ListPackages(bs.crepo.r.LocalPath(), string(pr))
+	bs.crepo.mut.Unlock()
+
+	// TODO(sdboyer) cache errs?
+	if err != nil {
+		bs.dc.ptrees[r] = ptree
+	}
+
+	return
+}
+
+// toRevOrErr makes all efforts to convert a Version into a rev, including
+// updating the cache repo (if needed). It does not guarantee that the returned
+// Revision actually exists in the repository (as one of the cheaper methods may
+// have had bad data).
+func (bs *baseVCSSource) toRevOrErr(v Version) (r Revision, err error) {
+	r = bs.dc.toRevision(v)
+	if r == "" {
+		// Rev can be empty if:
+		//  - The cache is unsynced
+		//  - A version was passed that used to exist, but no longer does
+		//  - A garbage version was passed. (Functionally indistinguishable from
+		//  the previous)
+		if !bs.cvsync {
+			// call the lvfunc to sync the meta cache
+			_, err = bs.lvfunc()
+			if err != nil {
+				return
+			}
+		}
+
+		r = bs.dc.toRevision(v)
+		// If we still don't have a rev, then the version's no good
+		if r == "" {
+			err = fmt.Errorf("version %s does not exist in source %s", v, bs.crepo.r.Remote())
+		}
+	}
+
+	return
+}
+
+func (bs *baseVCSSource) exportVersionTo(v Version, to string) error {
+	return bs.crepo.exportVersionTo(v, to)
+}
diff --git a/vendor/github.com/sdboyer/gps/source_manager.go b/vendor/github.com/sdboyer/gps/source_manager.go
new file mode 100644
index 0000000..f59ae62
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/source_manager.go
@@ -0,0 +1,432 @@
+package gps
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	"github.com/Masterminds/semver"
+)
+
+// Used to compute a friendly filepath from a URL-shaped input
+//
+// TODO(sdboyer) this is awful. Right?
+var sanitizer = strings.NewReplacer(":", "-", "/", "-", "+", "-")
+
+// A SourceManager is responsible for retrieving, managing, and interrogating
+// source repositories. Its primary purpose is to serve the needs of a Solver,
+// but it is handy for other purposes, as well.
+//
+// gps's built-in SourceManager, SourceMgr, is intended to be generic and
+// sufficient for any purpose. It provides some additional semantics around the
+// methods defined here.
+type SourceManager interface {
+	// SourceExists checks if a repository exists, either upstream or in the
+	// SourceManager's central repository cache.
+	SourceExists(ProjectIdentifier) (bool, error)
+
+	// SyncSourceFor will attempt to bring all local information about a source
+	// fully up to date.
+	SyncSourceFor(ProjectIdentifier) error
+
+	// ListVersions retrieves a list of the available versions for a given
+	// repository name.
+	ListVersions(ProjectIdentifier) ([]Version, error)
+
+	// RevisionPresentIn indicates whether the provided Version is present in
+	// the given repository.
+	RevisionPresentIn(ProjectIdentifier, Revision) (bool, error)
+
+	// ListPackages parses the tree of the Go packages at or below root of the
+	// provided ProjectIdentifier, at the provided version.
+	ListPackages(ProjectIdentifier, Version) (PackageTree, error)
+
+	// GetManifestAndLock returns manifest and lock information for the provided
+	// root import path.
+	//
+	// gps currently requires that projects be rooted at their repository root,
+	// necessitating that the ProjectIdentifier's ProjectRoot must also be a
+	// repository root.
+	GetManifestAndLock(ProjectIdentifier, Version) (Manifest, Lock, error)
+
+	// ExportProject writes out the tree of the provided import path, at the
+	// provided version, to the provided directory.
+	ExportProject(ProjectIdentifier, Version, string) error
+
+	// AnalyzerInfo reports the name and version of the logic used to service
+	// GetManifestAndLock().
+	AnalyzerInfo() (name string, version *semver.Version)
+
+	// DeduceRootProject takes an import path and deduces the corresponding
+	// project/source root.
+	DeduceProjectRoot(ip string) (ProjectRoot, error)
+}
+
+// A ProjectAnalyzer is responsible for analyzing a given path for Manifest and
+// Lock information. Tools relying on gps must implement one.
+type ProjectAnalyzer interface {
+	// Perform analysis of the filesystem tree rooted at path, with the
+	// root import path importRoot, to determine the project's constraints, as
+	// indicated by a Manifest and Lock.
+	DeriveManifestAndLock(path string, importRoot ProjectRoot) (Manifest, Lock, error)
+
+	// Report the name and version of this ProjectAnalyzer.
+	Info() (name string, version *semver.Version)
+}
+
+// SourceMgr is the default SourceManager for gps.
+//
+// There's no (planned) reason why it would need to be reimplemented by other
+// tools; control via dependency injection is intended to be sufficient.
+type SourceMgr struct {
+	cachedir string
+	lf       *os.File
+	srcs     map[string]source
+	srcmut   sync.RWMutex
+	an       ProjectAnalyzer
+	dxt      deducerTrie
+	rootxt   prTrie
+}
+
+var _ SourceManager = &SourceMgr{}
+
+// NewSourceManager produces an instance of gps's built-in SourceManager. It
+// takes a cache directory (where local instances of upstream repositories are
+// stored), and a ProjectAnalyzer that is used to extract manifest and lock
+// information from source trees.
+//
+// The returned SourceManager aggressively caches information wherever possible.
+// If tools need to do preliminary work involving upstream repository analysis
+// prior to invoking a solve run, it is recommended that they create this
+// SourceManager as early as possible and use it to their ends. That way, the
+// solver can benefit from any caches that may have already been warmed.
+//
+// gps's SourceManager is intended to be threadsafe (if it's not, please file a
+// bug!). It should be safe to reuse across concurrent solving runs, even on
+// unrelated projects.
+func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) {
+	if an == nil {
+		return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager")
+	}
+
+	err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777)
+	if err != nil {
+		return nil, err
+	}
+
+	glpath := filepath.Join(cachedir, "sm.lock")
+	_, err = os.Stat(glpath)
+	if err == nil {
+		return nil, CouldNotCreateLockError{
+			Path: glpath,
+			Err:  fmt.Errorf("cache lock file %s exists - another process crashed or is still running?", glpath),
+		}
+	}
+
+	fi, err := os.OpenFile(glpath, os.O_CREATE|os.O_EXCL, 0600) // is 0600 sane for this purpose?
+	if err != nil {
+		return nil, CouldNotCreateLockError{
+			Path: glpath,
+			Err:  fmt.Errorf("err on attempting to create global cache lock: %s", err),
+		}
+	}
+
+	return &SourceMgr{
+		cachedir: cachedir,
+		lf:       fi,
+		srcs:     make(map[string]source),
+		an:       an,
+		dxt:      pathDeducerTrie(),
+		rootxt:   newProjectRootTrie(),
+	}, nil
+}
+
+type CouldNotCreateLockError struct {
+	Path string
+	Err  error
+}
+
+func (e CouldNotCreateLockError) Error() string {
+	return e.Err.Error()
+}
+
+// Release lets go of any locks held by the SourceManager.
+func (sm *SourceMgr) Release() {
+	sm.lf.Close()
+	os.Remove(filepath.Join(sm.cachedir, "sm.lock"))
+}
+
+// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer.
+func (sm *SourceMgr) AnalyzerInfo() (name string, version *semver.Version) {
+	return sm.an.Info()
+}
+
+// GetManifestAndLock returns manifest and lock information for the provided
+// import path. gps currently requires that projects be rooted at their
+// repository root, necessitating that the ProjectIdentifier's ProjectRoot must
+// also be a repository root.
+//
+// The work of producing the manifest and lock is delegated to the injected
+// ProjectAnalyzer's DeriveManifestAndLock() method.
+func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) {
+	src, err := sm.getSourceFor(id)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return src.getManifestAndLock(id.ProjectRoot, v)
+}
+
+// ListPackages parses the tree of the Go packages at and below the ProjectRoot
+// of the given ProjectIdentifier, at the given version.
+func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (PackageTree, error) {
+	src, err := sm.getSourceFor(id)
+	if err != nil {
+		return PackageTree{}, err
+	}
+
+	return src.listPackages(id.ProjectRoot, v)
+}
+
+// ListVersions retrieves a list of the available versions for a given
+// repository name.
+//
+// The list is not sorted; while it may be returned in the order that the
+// underlying VCS reports version information, no guarantee is made. It is
+// expected that the caller either not care about order, or sort the result
+// themselves.
+//
+// This list is always retrieved from upstream on the first call. Subsequent
+// calls will return a cached version of the first call's results. if upstream
+// is not accessible (network outage, access issues, or the resource actually
+// went away), an error will be returned.
+func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]Version, error) {
+	src, err := sm.getSourceFor(id)
+	if err != nil {
+		// TODO(sdboyer) More-er proper-er errors
+		return nil, err
+	}
+
+	return src.listVersions()
+}
+
+// RevisionPresentIn indicates whether the provided Revision is present in the given
+// repository.
+func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) {
+	src, err := sm.getSourceFor(id)
+	if err != nil {
+		// TODO(sdboyer) More-er proper-er errors
+		return false, err
+	}
+
+	return src.revisionPresentIn(r)
+}
+
+// SourceExists checks if a repository exists, either upstream or in the cache,
+// for the provided ProjectIdentifier.
+func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) {
+	src, err := sm.getSourceFor(id)
+	if err != nil {
+		return false, err
+	}
+
+	return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil
+}
+
+// SyncSourceFor will ensure that all local caches and information about a
+// source are up to date with any network-acccesible information.
+//
+// The primary use case for this is prefetching.
+func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error {
+	src, err := sm.getSourceFor(id)
+	if err != nil {
+		return err
+	}
+
+	return src.syncLocal()
+}
+
+// ExportProject writes out the tree of the provided ProjectIdentifier's
+// ProjectRoot, at the provided version, to the provided directory.
+func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) error {
+	src, err := sm.getSourceFor(id)
+	if err != nil {
+		return err
+	}
+
+	return src.exportVersionTo(v, to)
+}
+
+// DeduceRootProject takes an import path and deduces the corresponding
+// project/source root.
+//
+// Note that some import paths may require network activity to correctly
+// determine the root of the path, such as, but not limited to, vanity import
+// paths. (A special exception is written for gopkg.in to minimize network
+// activity, as its behavior is well-structured)
+func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) {
+	if prefix, root, has := sm.rootxt.LongestPrefix(ip); has {
+		// The non-matching tail of the import path could still be malformed.
+		// Validate just that part, if it exists
+		if prefix != ip {
+			// TODO(sdboyer) commented until i find a proper description of how
+			// to validate an import path
+			//if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) {
+			//return "", fmt.Errorf("%q is not a valid import path", ip)
+			//}
+			// There was one, and it validated fine - add it so we don't have to
+			// revalidate it later
+			sm.rootxt.Insert(ip, root)
+		}
+		return root, nil
+	}
+
+	rootf, _, err := sm.deducePathAndProcess(ip)
+	if err != nil {
+		return "", err
+	}
+
+	r, err := rootf()
+	return ProjectRoot(r), err
+}
+
+func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) {
+	nn := id.netName()
+
+	sm.srcmut.RLock()
+	src, has := sm.srcs[nn]
+	sm.srcmut.RUnlock()
+	if has {
+		return src, nil
+	}
+
+	_, srcf, err := sm.deducePathAndProcess(nn)
+	if err != nil {
+		return nil, err
+	}
+
+	// we don't care about the ident here, and the future produced by
+	// deducePathAndProcess will dedupe with what's in the sm.srcs map
+	src, _, err = srcf()
+	return src, err
+}
+
+func (sm *SourceMgr) deducePathAndProcess(path string) (stringFuture, sourceFuture, error) {
+	df, err := sm.deduceFromPath(path)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var rstart, sstart int32
+	rc, sc := make(chan struct{}, 1), make(chan struct{}, 1)
+
+	// Rewrap in a deferred future, so the caller can decide when to trigger it
+	rootf := func() (pr string, err error) {
+		// CAS because a bad interleaving here would panic on double-closing rc
+		if atomic.CompareAndSwapInt32(&rstart, 0, 1) {
+			go func() {
+				defer close(rc)
+				pr, err = df.root()
+				if err != nil {
+					// Don't cache errs. This doesn't really hurt the solver, and is
+					// beneficial for other use cases because it means we don't have to
+					// expose any kind of controls for clearing caches.
+					return
+				}
+
+				tpr := ProjectRoot(pr)
+				sm.rootxt.Insert(pr, tpr)
+				// It's not harmful if the netname was a URL rather than an
+				// import path
+				if pr != path {
+					// Insert the result into the rootxt twice - once at the
+					// root itself, so as to catch siblings/relatives, and again
+					// at the exact provided import path (assuming they were
+					// different), so that on subsequent calls, exact matches
+					// can skip the regex above.
+					sm.rootxt.Insert(path, tpr)
+				}
+			}()
+		}
+
+		<-rc
+		return pr, err
+	}
+
+	// Now, handle the source
+	fut := df.psf(sm.cachedir, sm.an)
+
+	// Rewrap in a deferred future, so the caller can decide when to trigger it
+	srcf := func() (src source, ident string, err error) {
+		// CAS because a bad interleaving here would panic on double-closing sc
+		if atomic.CompareAndSwapInt32(&sstart, 0, 1) {
+			go func() {
+				defer close(sc)
+				src, ident, err = fut()
+				if err != nil {
+					// Don't cache errs. This doesn't really hurt the solver, and is
+					// beneficial for other use cases because it means we don't have
+					// to expose any kind of controls for clearing caches.
+					return
+				}
+
+				sm.srcmut.Lock()
+				defer sm.srcmut.Unlock()
+
+				// Check to make sure a source hasn't shown up in the meantime, or that
+				// there wasn't already one at the ident.
+				var hasi, hasp bool
+				var srci, srcp source
+				if ident != "" {
+					srci, hasi = sm.srcs[ident]
+				}
+				srcp, hasp = sm.srcs[path]
+
+				// if neither the ident nor the input path have an entry for this src,
+				// we're in the simple case - write them both in and we're done
+				if !hasi && !hasp {
+					sm.srcs[path] = src
+					if ident != path && ident != "" {
+						sm.srcs[ident] = src
+					}
+					return
+				}
+
+				// Now, the xors.
+				//
+				// If already present for ident but not for path, copy ident's src
+				// to path. This covers cases like a gopkg.in path referring back
+				// onto a github repository, where something else already explicitly
+				// looked up that same gh repo.
+				if hasi && !hasp {
+					sm.srcs[path] = srci
+					src = srci
+				}
+				// If already present for path but not for ident, do NOT copy path's
+				// src to ident, but use the returned one instead. Really, this case
+				// shouldn't occur at all...? But the crucial thing is that the
+				// path-based one has already discovered what actual ident of source
+				// they want to use, and changing that arbitrarily would have
+				// undefined effects.
+				if hasp && !hasi && ident != "" {
+					sm.srcs[ident] = src
+				}
+
+				// If both are present, then assume we're good, and use the path one
+				if hasp && hasi {
+					// TODO(sdboyer) compare these (somehow? reflect? pointer?) and if they're not the
+					// same object, panic
+					src = srcp
+				}
+			}()
+		}
+
+		<-sc
+		return
+	}
+
+	return rootf, srcf, nil
+}
diff --git a/vendor/github.com/sdboyer/gps/source_test.go b/vendor/github.com/sdboyer/gps/source_test.go
new file mode 100644
index 0000000..787e573
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/source_test.go
@@ -0,0 +1,339 @@
+package gps
+
+import (
+	"io/ioutil"
+	"net/url"
+	"reflect"
+	"testing"
+)
+
+func TestGitSourceInteractions(t *testing.T) {
+	// This test is slowish, skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping git source version fetching test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	rf := func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}
+
+	n := "github.com/Masterminds/VCSTestRepo"
+	un := "https://" + n
+	u, err := url.Parse(un)
+	if err != nil {
+		t.Errorf("URL was bad, lolwut? errtext: %s", err)
+		rf()
+		t.FailNow()
+	}
+	mb := maybeGitSource{
+		url: u,
+	}
+
+	isrc, ident, err := mb.try(cpath, naiveAnalyzer{})
+	if err != nil {
+		t.Errorf("Unexpected error while setting up gitSource for test repo: %s", err)
+		rf()
+		t.FailNow()
+	}
+	src, ok := isrc.(*gitSource)
+	if !ok {
+		t.Errorf("Expected a gitSource, got a %T", isrc)
+		rf()
+		t.FailNow()
+	}
+	if ident != un {
+		t.Errorf("Expected %s as source ident, got %s", un, ident)
+	}
+
+	vlist, err := src.listVersions()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from git repo: %s", err)
+		rf()
+		t.FailNow()
+	}
+
+	if src.ex.s&existsUpstream != existsUpstream {
+		t.Errorf("gitSource.listVersions() should have set the upstream existence bit for search")
+	}
+	if src.ex.f&existsUpstream != existsUpstream {
+		t.Errorf("gitSource.listVersions() should have set the upstream existence bit for found")
+	}
+	if src.ex.s&existsInCache != 0 {
+		t.Errorf("gitSource.listVersions() should not have set the cache existence bit for search")
+	}
+	if src.ex.f&existsInCache != 0 {
+		t.Errorf("gitSource.listVersions() should not have set the cache existence bit for found")
+	}
+
+	// check that an expected rev is present
+	is, err := src.revisionPresentIn(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
+	if err != nil {
+		t.Errorf("Unexpected error while checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present")
+	}
+
+	if len(vlist) != 3 {
+		t.Errorf("git test repo should've produced three versions, got %v: vlist was %s", len(vlist), vlist)
+	} else {
+		SortForUpgrade(vlist)
+		evl := []Version{
+			NewVersion("1.0.0").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+			newDefaultBranch("master").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+			NewBranch("test").Is(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")),
+		}
+		if !reflect.DeepEqual(vlist, evl) {
+			t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+		}
+	}
+
+	// recheck that rev is present, this time interacting with cache differently
+	is, err = src.revisionPresentIn(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e"))
+	if err != nil {
+		t.Errorf("Unexpected error while re-checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present on re-check")
+	}
+}
+
+func TestBzrSourceInteractions(t *testing.T) {
+	// This test is quite slow (ugh bzr), so skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping bzr source version fetching test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	rf := func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}
+
+	n := "launchpad.net/govcstestbzrrepo"
+	un := "https://" + n
+	u, err := url.Parse(un)
+	if err != nil {
+		t.Errorf("URL was bad, lolwut? errtext: %s", err)
+		rf()
+		t.FailNow()
+	}
+	mb := maybeBzrSource{
+		url: u,
+	}
+
+	isrc, ident, err := mb.try(cpath, naiveAnalyzer{})
+	if err != nil {
+		t.Errorf("Unexpected error while setting up bzrSource for test repo: %s", err)
+		rf()
+		t.FailNow()
+	}
+	src, ok := isrc.(*bzrSource)
+	if !ok {
+		t.Errorf("Expected a bzrSource, got a %T", isrc)
+		rf()
+		t.FailNow()
+	}
+	if ident != un {
+		t.Errorf("Expected %s as source ident, got %s", un, ident)
+	}
+	evl := []Version{
+		NewVersion("1.0.0").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")),
+		newDefaultBranch("(default)").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")),
+	}
+
+	// check that an expected rev is present
+	is, err := src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68"))
+	if err != nil {
+		t.Errorf("Unexpected error while checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present")
+	}
+
+	vlist, err := src.listVersions()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err)
+	}
+
+	if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for search")
+	}
+	if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for found")
+	}
+
+	if len(vlist) != 2 {
+		t.Errorf("bzr test repo should've produced two versions, got %v", len(vlist))
+	} else {
+		SortForUpgrade(vlist)
+		if !reflect.DeepEqual(vlist, evl) {
+			t.Errorf("bzr version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+		}
+	}
+
+	// Run again, this time to ensure cache outputs correctly
+	vlist, err = src.listVersions()
+	if err != nil {
+		t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err)
+	}
+
+	if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for search")
+	}
+	if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+		t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for found")
+	}
+
+	if len(vlist) != 2 {
+		t.Errorf("bzr test repo should've produced two versions, got %v", len(vlist))
+	} else {
+		SortForUpgrade(vlist)
+		if !reflect.DeepEqual(vlist, evl) {
+			t.Errorf("bzr version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+		}
+	}
+
+	// recheck that rev is present, this time interacting with cache differently
+	is, err = src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68"))
+	if err != nil {
+		t.Errorf("Unexpected error while re-checking revision presence: %s", err)
+	} else if !is {
+		t.Errorf("Revision that should exist was not present on re-check")
+	}
+}
+
+func TestHgSourceInteractions(t *testing.T) {
+	// This test is slow, so skip it on -short
+	if testing.Short() {
+		t.Skip("Skipping hg source version fetching test in short mode")
+	}
+
+	cpath, err := ioutil.TempDir("", "smcache")
+	if err != nil {
+		t.Errorf("Failed to create temp dir: %s", err)
+	}
+	rf := func() {
+		err := removeAll(cpath)
+		if err != nil {
+			t.Errorf("removeAll failed: %s", err)
+		}
+	}
+
+	tfunc := func(n string, evl []Version) {
+		un := "https://" + n
+		u, err := url.Parse(un)
+		if err != nil {
+			t.Errorf("URL was bad, lolwut? errtext: %s", err)
+			return
+		}
+		mb := maybeHgSource{
+			url: u,
+		}
+
+		isrc, ident, err := mb.try(cpath, naiveAnalyzer{})
+		if err != nil {
+			t.Errorf("Unexpected error while setting up hgSource for test repo: %s", err)
+			return
+		}
+		src, ok := isrc.(*hgSource)
+		if !ok {
+			t.Errorf("Expected a hgSource, got a %T", isrc)
+			return
+		}
+		if ident != un {
+			t.Errorf("Expected %s as source ident, got %s", un, ident)
+		}
+
+		// check that an expected rev is present
+		is, err := src.revisionPresentIn(Revision("103d1bddef2199c80aad7c42041223083d613ef9"))
+		if err != nil {
+			t.Errorf("Unexpected error while checking revision presence: %s", err)
+		} else if !is {
+			t.Errorf("Revision that should exist was not present")
+		}
+
+		vlist, err := src.listVersions()
+		if err != nil {
+			t.Errorf("Unexpected error getting version pairs from hg repo: %s", err)
+		}
+
+		if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+			t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for search")
+		}
+		if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+			t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for found")
+		}
+
+		if len(vlist) != len(evl) {
+			t.Errorf("hg test repo should've produced %v versions, got %v", len(evl), len(vlist))
+		} else {
+			SortForUpgrade(vlist)
+			if !reflect.DeepEqual(vlist, evl) {
+				t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+			}
+		}
+
+		// Run again, this time to ensure cache outputs correctly
+		vlist, err = src.listVersions()
+		if err != nil {
+			t.Errorf("Unexpected error getting version pairs from hg repo: %s", err)
+		}
+
+		if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache {
+			t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for search")
+		}
+		if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache {
+			t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for found")
+		}
+
+		if len(vlist) != len(evl) {
+			t.Errorf("hg test repo should've produced %v versions, got %v", len(evl), len(vlist))
+		} else {
+			SortForUpgrade(vlist)
+			if !reflect.DeepEqual(vlist, evl) {
+				t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl)
+			}
+		}
+
+		// recheck that rev is present, this time interacting with cache differently
+		is, err = src.revisionPresentIn(Revision("103d1bddef2199c80aad7c42041223083d613ef9"))
+		if err != nil {
+			t.Errorf("Unexpected error while re-checking revision presence: %s", err)
+		} else if !is {
+			t.Errorf("Revision that should exist was not present on re-check")
+		}
+	}
+
+	// simultaneously run for both the repo with and without the magic bookmark
+	donech := make(chan struct{})
+	go func() {
+		tfunc("bitbucket.org/sdboyer/withbm", []Version{
+			NewVersion("v1.0.0").Is(Revision("aa110802a0c64195d0a6c375c9f66668827c90b4")),
+			newDefaultBranch("@").Is(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")),
+			NewBranch("another").Is(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")),
+			NewBranch("default").Is(Revision("3d466f437f6616da594bbab6446cc1cb4328d1bb")),
+			NewBranch("newbranch").Is(Revision("5e2a01be9aee942098e44590ae545c7143da9675")),
+		})
+		close(donech)
+	}()
+
+	tfunc("bitbucket.org/sdboyer/nobm", []Version{
+		NewVersion("v1.0.0").Is(Revision("aa110802a0c64195d0a6c375c9f66668827c90b4")),
+		newDefaultBranch("default").Is(Revision("3d466f437f6616da594bbab6446cc1cb4328d1bb")),
+		NewBranch("another").Is(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")),
+		NewBranch("newbranch").Is(Revision("5e2a01be9aee942098e44590ae545c7143da9675")),
+	})
+
+	<-donech
+	rf()
+}
diff --git a/vendor/github.com/sdboyer/gps/trace.go b/vendor/github.com/sdboyer/gps/trace.go
new file mode 100644
index 0000000..e08dcf7
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/trace.go
@@ -0,0 +1,183 @@
+package gps
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+const (
+	successChar   = "✓"
+	successCharSp = successChar + " "
+	failChar      = "✗"
+	failCharSp    = failChar + " "
+	backChar      = "←"
+)
+
+func (s *solver) traceCheckPkgs(bmi bimodalIdentifier) {
+	if !s.params.Trace {
+		return
+	}
+
+	prefix := strings.Repeat("| ", len(s.vqs)+1)
+	s.tl.Printf("%s\n", tracePrefix(fmt.Sprintf("? revisit %s to add %v pkgs", bmi.id.errString(), len(bmi.pl)), prefix, prefix))
+}
+
+func (s *solver) traceCheckQueue(q *versionQueue, bmi bimodalIdentifier, cont bool, offset int) {
+	if !s.params.Trace {
+		return
+	}
+
+	prefix := strings.Repeat("| ", len(s.vqs)+offset)
+	vlen := strconv.Itoa(len(q.pi))
+	if !q.allLoaded {
+		vlen = "at least " + vlen
+	}
+
+	// TODO(sdboyer) how...to list the packages in the limited space we have?
+	var verb string
+	if cont {
+		verb = "continue"
+		vlen = vlen + " more"
+	} else {
+		verb = "attempt"
+	}
+
+	s.tl.Printf("%s\n", tracePrefix(fmt.Sprintf("? %s %s with %v pkgs; %s versions to try", verb, bmi.id.errString(), len(bmi.pl), vlen), prefix, prefix))
+}
+
+// traceStartBacktrack is called with the bmi that first failed, thus initiating
+// backtracking
+func (s *solver) traceStartBacktrack(bmi bimodalIdentifier, err error, pkgonly bool) {
+	if !s.params.Trace {
+		return
+	}
+
+	var msg string
+	if pkgonly {
+		msg = fmt.Sprintf("%s could not add %v pkgs to %s; begin backtrack", backChar, len(bmi.pl), bmi.id.errString())
+	} else {
+		msg = fmt.Sprintf("%s no more versions of %s to try; begin backtrack", backChar, bmi.id.errString())
+	}
+
+	prefix := strings.Repeat("| ", len(s.sel.projects))
+	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
+}
+
+// traceBacktrack is called when a package or project is poppped off during
+// backtracking
+func (s *solver) traceBacktrack(bmi bimodalIdentifier, pkgonly bool) {
+	if !s.params.Trace {
+		return
+	}
+
+	var msg string
+	if pkgonly {
+		msg = fmt.Sprintf("%s backtrack: popped %v pkgs from %s", backChar, len(bmi.pl), bmi.id.errString())
+	} else {
+		msg = fmt.Sprintf("%s backtrack: no more versions of %s to try", backChar, bmi.id.errString())
+	}
+
+	prefix := strings.Repeat("| ", len(s.sel.projects))
+	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
+}
+
+// Called just once after solving has finished, whether success or not
+func (s *solver) traceFinish(sol solution, err error) {
+	if !s.params.Trace {
+		return
+	}
+
+	if err == nil {
+		var pkgcount int
+		for _, lp := range sol.Projects() {
+			pkgcount += len(lp.pkgs)
+		}
+		s.tl.Printf("%s found solution with %v packages from %v projects", successChar, pkgcount, len(sol.Projects()))
+	} else {
+		s.tl.Printf("%s solving failed", failChar)
+	}
+}
+
+// traceSelectRoot is called just once, when the root project is selected
+func (s *solver) traceSelectRoot(ptree PackageTree, cdeps []completeDep) {
+	if !s.params.Trace {
+		return
+	}
+
+	// This duplicates work a bit, but we're in trace mode and it's only once,
+	// so who cares
+	rm := ptree.ExternalReach(true, true, s.ig)
+
+	s.tl.Printf("Root project is %q", s.rpt.ImportRoot)
+
+	var expkgs int
+	for _, cdep := range cdeps {
+		expkgs += len(cdep.pl)
+	}
+
+	// TODO(sdboyer) include info on ignored pkgs/imports, etc.
+	s.tl.Printf(" %v transitively valid internal packages", len(rm))
+	s.tl.Printf(" %v external packages imported from %v projects", expkgs, len(cdeps))
+	s.tl.Printf(successCharSp + "select (root)")
+}
+
+// traceSelect is called when an atom is successfully selected
+func (s *solver) traceSelect(awp atomWithPackages, pkgonly bool) {
+	if !s.params.Trace {
+		return
+	}
+
+	var msg string
+	if pkgonly {
+		msg = fmt.Sprintf("%s include %v more pkgs from %s", successChar, len(awp.pl), a2vs(awp.a))
+	} else {
+		msg = fmt.Sprintf("%s select %s w/%v pkgs", successChar, a2vs(awp.a), len(awp.pl))
+	}
+
+	prefix := strings.Repeat("| ", len(s.sel.projects)-1)
+	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
+}
+
+func (s *solver) traceInfo(args ...interface{}) {
+	if !s.params.Trace {
+		return
+	}
+
+	if len(args) == 0 {
+		panic("must pass at least one param to traceInfo")
+	}
+
+	preflen := len(s.sel.projects)
+	var msg string
+	switch data := args[0].(type) {
+	case string:
+		msg = tracePrefix(fmt.Sprintf(data, args[1:]...), "| ", "| ")
+	case traceError:
+		preflen += 1
+		// We got a special traceError, use its custom method
+		msg = tracePrefix(data.traceString(), "| ", failCharSp)
+	case error:
+		// Regular error; still use the x leader but default Error() string
+		msg = tracePrefix(data.Error(), "| ", failCharSp)
+	default:
+		// panic here because this can *only* mean a stupid internal bug
+		panic(fmt.Sprintf("canary - unknown type passed as first param to traceInfo %T", data))
+	}
+
+	prefix := strings.Repeat("| ", preflen)
+	s.tl.Printf("%s\n", tracePrefix(msg, prefix, prefix))
+}
+
+func tracePrefix(msg, sep, fsep string) string {
+	parts := strings.Split(strings.TrimSuffix(msg, "\n"), "\n")
+	for k, str := range parts {
+		if k == 0 {
+			parts[k] = fsep + str
+		} else {
+			parts[k] = sep + str
+		}
+	}
+
+	return strings.Join(parts, "\n")
+}
diff --git a/vendor/github.com/sdboyer/gps/typed_radix.go b/vendor/github.com/sdboyer/gps/typed_radix.go
new file mode 100644
index 0000000..76b2f68
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/typed_radix.go
@@ -0,0 +1,160 @@
+package gps
+
+import (
+	"strings"
+
+	"github.com/armon/go-radix"
+)
+
+// Typed implementations of radix trees. These are just simple wrappers that let
+// us avoid having to type assert anywhere else, cleaning up other code a bit.
+//
+// Some of the more annoying things to implement (like walks) aren't
+// implemented. They can be added if/when we actually need them.
+//
+// Oh generics, where art thou...
+
+type deducerTrie struct {
+	t *radix.Tree
+}
+
+func newDeducerTrie() deducerTrie {
+	return deducerTrie{
+		t: radix.New(),
+	}
+}
+
+// Delete is used to delete a key, returning the previous value and if it was deleted
+func (t deducerTrie) Delete(s string) (pathDeducer, bool) {
+	if d, had := t.t.Delete(s); had {
+		return d.(pathDeducer), had
+	}
+	return nil, false
+}
+
+// Get is used to lookup a specific key, returning the value and if it was found
+func (t deducerTrie) Get(s string) (pathDeducer, bool) {
+	if d, has := t.t.Get(s); has {
+		return d.(pathDeducer), has
+	}
+	return nil, false
+}
+
+// Insert is used to add a newentry or update an existing entry. Returns if updated.
+func (t deducerTrie) Insert(s string, d pathDeducer) (pathDeducer, bool) {
+	if d2, had := t.t.Insert(s, d); had {
+		return d2.(pathDeducer), had
+	}
+	return nil, false
+}
+
+// Len is used to return the number of elements in the tree
+func (t deducerTrie) Len() int {
+	return t.t.Len()
+}
+
+// LongestPrefix is like Get, but instead of an exact match, it will return the
+// longest prefix match.
+func (t deducerTrie) LongestPrefix(s string) (string, pathDeducer, bool) {
+	if p, d, has := t.t.LongestPrefix(s); has {
+		return p, d.(pathDeducer), has
+	}
+	return "", nil, false
+}
+
+// ToMap is used to walk the tree and convert it to a map.
+func (t deducerTrie) ToMap() map[string]pathDeducer {
+	m := make(map[string]pathDeducer)
+	t.t.Walk(func(s string, d interface{}) bool {
+		m[s] = d.(pathDeducer)
+		return false
+	})
+
+	return m
+}
+
+type prTrie struct {
+	t *radix.Tree
+}
+
+func newProjectRootTrie() prTrie {
+	return prTrie{
+		t: radix.New(),
+	}
+}
+
+// Delete is used to delete a key, returning the previous value and if it was deleted
+func (t prTrie) Delete(s string) (ProjectRoot, bool) {
+	if pr, had := t.t.Delete(s); had {
+		return pr.(ProjectRoot), had
+	}
+	return "", false
+}
+
+// Get is used to lookup a specific key, returning the value and if it was found
+func (t prTrie) Get(s string) (ProjectRoot, bool) {
+	if pr, has := t.t.Get(s); has {
+		return pr.(ProjectRoot), has
+	}
+	return "", false
+}
+
+// Insert is used to add a newentry or update an existing entry. Returns if updated.
+func (t prTrie) Insert(s string, pr ProjectRoot) (ProjectRoot, bool) {
+	if pr2, had := t.t.Insert(s, pr); had {
+		return pr2.(ProjectRoot), had
+	}
+	return "", false
+}
+
+// Len is used to return the number of elements in the tree
+func (t prTrie) Len() int {
+	return t.t.Len()
+}
+
+// LongestPrefix is like Get, but instead of an exact match, it will return the
+// longest prefix match.
+func (t prTrie) LongestPrefix(s string) (string, ProjectRoot, bool) {
+	if p, pr, has := t.t.LongestPrefix(s); has && isPathPrefixOrEqual(p, s) {
+		return p, pr.(ProjectRoot), has
+	}
+	return "", "", false
+}
+
+// ToMap is used to walk the tree and convert it to a map.
+func (t prTrie) ToMap() map[string]ProjectRoot {
+	m := make(map[string]ProjectRoot)
+	t.t.Walk(func(s string, pr interface{}) bool {
+		m[s] = pr.(ProjectRoot)
+		return false
+	})
+
+	return m
+}
+
+// isPathPrefixOrEqual is an additional helper check to ensure that the literal
+// string prefix returned from a radix tree prefix match is also a path tree
+// match.
+//
+// The radix tree gets it mostly right, but we have to guard against
+// possibilities like this:
+//
+// github.com/sdboyer/foo
+// github.com/sdboyer/foobar/baz
+//
+// The latter would incorrectly be conflated with the former. As we know we're
+// operating on strings that describe import paths, guard against this case by
+// verifying that either the input is the same length as the match (in which
+// case we know they're equal), or that the next character is a "/". (Import
+// paths are defined to always use "/", not the OS-specific path separator.)
+func isPathPrefixOrEqual(pre, path string) bool {
+	prflen, pathlen := len(pre), len(path)
+	if pathlen == prflen+1 {
+		// this can never be the case
+		return false
+	}
+
+	// we assume something else (a trie) has done equality check up to the point
+	// of the prefix, so we just check len
+	return prflen == pathlen || strings.Index(path[prflen:], "/") == 0
+}
diff --git a/vendor/github.com/sdboyer/gps/typed_radix_test.go b/vendor/github.com/sdboyer/gps/typed_radix_test.go
new file mode 100644
index 0000000..8edf39b
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/typed_radix_test.go
@@ -0,0 +1,22 @@
+package gps
+
+import "testing"
+
+// basically a regression test
+func TestPathPrefixOrEqual(t *testing.T) {
+	if !isPathPrefixOrEqual("foo", "foo") {
+		t.Error("Same path should return true")
+	}
+
+	if isPathPrefixOrEqual("foo", "fooer") {
+		t.Error("foo is not a path-type prefix of fooer")
+	}
+
+	if !isPathPrefixOrEqual("foo", "foo/bar") {
+		t.Error("foo is a path prefix of foo/bar")
+	}
+
+	if isPathPrefixOrEqual("foo", "foo/") {
+		t.Error("special case - foo is not a path prefix of foo/")
+	}
+}
diff --git a/vendor/github.com/sdboyer/gps/types.go b/vendor/github.com/sdboyer/gps/types.go
new file mode 100644
index 0000000..33b57f9
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/types.go
@@ -0,0 +1,228 @@
+package gps
+
+import (
+	"fmt"
+	"math/rand"
+	"strconv"
+)
+
+// ProjectRoot is the topmost import path in a tree of other import paths - the
+// root of the tree. In gps' current design, ProjectRoots have to correspond to
+// a repository root (mostly), but their real purpose is to identify the root
+// import path of a "project", logically encompassing all child packages.
+//
+// Projects are a crucial unit of operation in gps. Constraints are declared by
+// a project's manifest, and apply to all packages in a ProjectRoot's tree.
+// Solving itself mostly proceeds on a project-by-project basis.
+//
+// Aliasing string types is usually a bit of an anti-pattern. gps does it here
+// as a means of clarifying API intent. This is important because Go's package
+// management domain has lots of different path-ish strings floating around:
+//
+//  actual directories:
+//	/home/sdboyer/go/src/github.com/sdboyer/gps/example
+//  URLs:
+//	https://github.com/sdboyer/gps
+//  import paths:
+//	github.com/sdboyer/gps/example
+//  portions of import paths that refer to a package:
+//	example
+//  portions that could not possibly refer to anything sane:
+//	github.com/sdboyer
+//  portions that correspond to a repository root:
+//	github.com/sdboyer/gps
+//
+// While not a panacea, defining ProjectRoot at least allows us to clearly
+// identify when one of these path-ish strings is *supposed* to have certain
+// semantics.
+type ProjectRoot string
+
+// A ProjectIdentifier is, more or less, the name of a dependency. It is related
+// to, but differs in two keys ways from, an import path.
+//
+// First, ProjectIdentifiers do not identify a single package. Rather, they
+// encompasses the whole tree of packages rooted at and including their
+// ProjectRoot. In gps' current design, this ProjectRoot must correspond to the
+// root of a repository, though this may change in the future.
+//
+// Second, ProjectIdentifiers can optionally carry a NetworkName, which
+// identifies where the underlying source code can be located on the network.
+// These can be either a full URL, including protocol, or plain import paths.
+// So, these are all valid data for NetworkName:
+//
+//  github.com/sdboyer/gps
+//  github.com/fork/gps
+//  git@github.com:sdboyer/gps
+//  https://github.com/sdboyer/gps
+//
+// With plain import paths, network addresses are derived purely through an
+// algorithm. By having an explicit network name, it becomes possible to, for
+// example, transparently substitute a fork for the original upstream source
+// repository.
+//
+// Note that gps makes no guarantees about the actual import paths contained in
+// a repository aligning with ImportRoot. If tools, or their users, specify an
+// alternate NetworkName that contains a repository with incompatible internal
+// import paths, gps will fail. (gps does no import rewriting.)
+//
+// Also note that if different projects' manifests report a different
+// NetworkName for a given ImportRoot, it is a solve failure. Everyone has to
+// agree on where a given import path should be sourced from.
+//
+// If NetworkName is not explicitly set, gps will derive the network address from
+// the ImportRoot using a similar algorithm to that of the official go tooling.
+type ProjectIdentifier struct {
+	ProjectRoot ProjectRoot
+	NetworkName string
+}
+
+func (i ProjectIdentifier) less(j ProjectIdentifier) bool {
+	if i.ProjectRoot < j.ProjectRoot {
+		return true
+	}
+	if j.ProjectRoot < i.ProjectRoot {
+		return false
+	}
+
+	return i.netName() < j.netName()
+}
+
+func (i ProjectIdentifier) eq(j ProjectIdentifier) bool {
+	if i.ProjectRoot != j.ProjectRoot {
+		return false
+	}
+	if i.NetworkName == j.NetworkName {
+		return true
+	}
+
+	if (i.NetworkName == "" && j.NetworkName == string(j.ProjectRoot)) ||
+		(j.NetworkName == "" && i.NetworkName == string(i.ProjectRoot)) {
+		return true
+	}
+
+	return false
+}
+
+// equiv will check if the two identifiers are "equivalent," under special
+// rules.
+//
+// Given that the ProjectRoots are equal (==), equivalency occurs if:
+//
+// 1. The NetworkNames are equal (==), OR
+// 2. The LEFT (the receiver) NetworkName is non-empty, and the right
+// NetworkName is empty.
+//
+// *This is, very much intentionally, an asymmetric binary relation.* It's
+// specifically intended to facilitate the case where we allow for a
+// ProjectIdentifier with an explicit NetworkName to match one without.
+func (i ProjectIdentifier) equiv(j ProjectIdentifier) bool {
+	if i.ProjectRoot != j.ProjectRoot {
+		return false
+	}
+	if i.NetworkName == j.NetworkName {
+		return true
+	}
+
+	if i.NetworkName != "" && j.NetworkName == "" {
+		return true
+	}
+
+	return false
+}
+
+func (i ProjectIdentifier) netName() string {
+	if i.NetworkName == "" {
+		return string(i.ProjectRoot)
+	}
+	return i.NetworkName
+}
+
+func (i ProjectIdentifier) errString() string {
+	if i.NetworkName == "" || i.NetworkName == string(i.ProjectRoot) {
+		return string(i.ProjectRoot)
+	}
+	return fmt.Sprintf("%s (from %s)", i.ProjectRoot, i.NetworkName)
+}
+
+func (i ProjectIdentifier) normalize() ProjectIdentifier {
+	if i.NetworkName == "" {
+		i.NetworkName = string(i.ProjectRoot)
+	}
+
+	return i
+}
+
+// ProjectProperties comprise the properties that can be attached to a
+// ProjectRoot.
+//
+// In general, these are declared in the context of a map of ProjectRoot to its
+// ProjectProperties; they make little sense without their corresponding
+// ProjectRoot.
+type ProjectProperties struct {
+	NetworkName string
+	Constraint  Constraint
+}
+
+// Package represents a Go package. It contains a subset of the information
+// go/build.Package does.
+type Package struct {
+	ImportPath, CommentPath string
+	Name                    string
+	Imports                 []string
+	TestImports             []string
+}
+
+// bimodalIdentifiers are used to track work to be done in the unselected queue.
+type bimodalIdentifier struct {
+	id ProjectIdentifier
+	// List of packages required within/under the ProjectIdentifier
+	pl []string
+	// prefv is used to indicate a 'preferred' version. This is expected to be
+	// derived from a dep's lock data, or else is empty.
+	prefv Version
+	// Indicates that the bmi came from the root project originally
+	fromRoot bool
+}
+
+type atom struct {
+	id ProjectIdentifier
+	v  Version
+}
+
+// With a random revision and no name, collisions are...unlikely
+var nilpa = atom{
+	v: Revision(strconv.FormatInt(rand.Int63(), 36)),
+}
+
+type atomWithPackages struct {
+	a  atom
+	pl []string
+}
+
+// bmi converts an atomWithPackages into a bimodalIdentifier.
+//
+// This is mostly intended for (read-only) trace use, so the package list slice
+// is not copied. It is the callers responsibility to not modify the pl slice,
+// lest that backpropagate and cause inconsistencies.
+func (awp atomWithPackages) bmi() bimodalIdentifier {
+	return bimodalIdentifier{
+		id: awp.a.id,
+		pl: awp.pl,
+	}
+}
+
+// completeDep (name hopefully to change) provides the whole picture of a
+// dependency - the root (repo and project, since currently we assume the two
+// are the same) name, a constraint, and the actual packages needed that are
+// under that root.
+type completeDep struct {
+	// The base workingConstraint
+	workingConstraint
+	// The specific packages required from the ProjectDep
+	pl []string
+}
+
+type dependency struct {
+	depender atom
+	dep      completeDep
+}
diff --git a/vendor/github.com/sdboyer/gps/vcs_source.go b/vendor/github.com/sdboyer/gps/vcs_source.go
new file mode 100644
index 0000000..91089ca
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/vcs_source.go
@@ -0,0 +1,585 @@
+package gps
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strings"
+	"sync"
+
+	"github.com/Masterminds/vcs"
+	"github.com/termie/go-shutil"
+)
+
+type vcsSource interface {
+	syncLocal() error
+	ensureLocal() error
+	listLocalVersionPairs() ([]PairedVersion, sourceExistence, error)
+	listUpstreamVersionPairs() ([]PairedVersion, sourceExistence, error)
+	hasRevision(Revision) (bool, error)
+	checkout(Version) error
+	exportVersionTo(Version, string) error
+}
+
+// gitSource is a generic git repository implementation that should work with
+// all standard git remotes.
+type gitSource struct {
+	baseVCSSource
+}
+
+func (s *gitSource) exportVersionTo(v Version, to string) error {
+	// Get away without syncing local, if we can
+	r := s.crepo.r
+	// ...but local repo does have to at least exist
+	if err := s.ensureCacheExistence(); err != nil {
+		return err
+	}
+
+	do := func() error {
+		s.crepo.mut.Lock()
+		defer s.crepo.mut.Unlock()
+
+		// Back up original index
+		idx, bak := filepath.Join(r.LocalPath(), ".git", "index"), filepath.Join(r.LocalPath(), ".git", "origindex")
+		err := os.Rename(idx, bak)
+		if err != nil {
+			return err
+		}
+
+		// could have an err here...but it's hard to imagine how?
+		defer os.Rename(bak, idx)
+
+		vstr := v.String()
+		if rv, ok := v.(PairedVersion); ok {
+			vstr = rv.Underlying().String()
+		}
+
+		out, err := r.RunFromDir("git", "read-tree", vstr)
+		if err != nil {
+			return fmt.Errorf("%s: %s", out, err)
+		}
+
+		// Ensure we have exactly one trailing slash
+		to = strings.TrimSuffix(to, string(os.PathSeparator)) + string(os.PathSeparator)
+		// Checkout from our temporary index to the desired target location on
+		// disk; now it's git's job to make it fast.
+		//
+		// Sadly, this approach *does* also write out vendor dirs. There doesn't
+		// appear to be a way to make checkout-index respect sparse checkout
+		// rules (-a supercedes it). The alternative is using plain checkout,
+		// though we have a bunch of housekeeping to do to set up, then tear
+		// down, the sparse checkout controls, as well as restore the original
+		// index and HEAD.
+		out, err = r.RunFromDir("git", "checkout-index", "-a", "--prefix="+to)
+		if err != nil {
+			return fmt.Errorf("%s: %s", out, err)
+		}
+		return nil
+	}
+
+	err := do()
+	if err != nil && !s.crepo.synced {
+		// If there was an err, and the repo cache is stale, it might've been
+		// beacuse we were missing the rev/ref. Try syncing, then run the export
+		// op again.
+		err = s.syncLocal()
+		if err != nil {
+			return err
+		}
+		err = do()
+	}
+
+	return err
+}
+
+func (s *gitSource) listVersions() (vlist []Version, err error) {
+	if s.cvsync {
+		vlist = make([]Version, len(s.dc.vMap))
+		k := 0
+		for v, r := range s.dc.vMap {
+			vlist[k] = v.Is(r)
+			k++
+		}
+
+		return
+	}
+
+	r := s.crepo.r
+	var out []byte
+	c := exec.Command("git", "ls-remote", r.Remote())
+	// Ensure no terminal prompting for PWs
+	c.Env = mergeEnvLists([]string{"GIT_TERMINAL_PROMPT=0"}, os.Environ())
+	out, err = c.CombinedOutput()
+
+	all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+	if err != nil || len(all) == 0 {
+		// TODO(sdboyer) remove this path? it really just complicates things, for
+		// probably not much benefit
+
+		// ls-remote failed, probably due to bad communication or a faulty
+		// upstream implementation. So fetch updates, then build the list
+		// locally
+		s.crepo.mut.Lock()
+		err = r.Update()
+		s.crepo.mut.Unlock()
+		if err != nil {
+			// Definitely have a problem, now - bail out
+			return
+		}
+
+		// Upstream and cache must exist for this to have worked, so add that to
+		// searched and found
+		s.ex.s |= existsUpstream | existsInCache
+		s.ex.f |= existsUpstream | existsInCache
+		// Also, local is definitely now synced
+		s.crepo.synced = true
+
+		s.crepo.mut.RLock()
+		out, err = r.RunFromDir("git", "show-ref", "--dereference")
+		s.crepo.mut.RUnlock()
+		if err != nil {
+			// TODO(sdboyer) More-er proper-er error
+			return
+		}
+
+		all = bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+		if len(all) == 0 {
+			return nil, fmt.Errorf("no versions available for %s (this is weird)", r.Remote())
+		}
+	}
+
+	// Local cache may not actually exist here, but upstream definitely does
+	s.ex.s |= existsUpstream
+	s.ex.f |= existsUpstream
+
+	// pull out the HEAD rev (it's always first) so we know what branches to
+	// mark as default. This is, perhaps, not the best way to glean this, but it
+	// was good enough for git itself until 1.8.5. Also, the alternative is
+	// sniffing data out of the pack protocol, which is a separate request, and
+	// also waaaay more than we want to do right now.
+	//
+	// The cost is that we could potentially have multiple branches marked as
+	// the default. If that does occur, a later check (again, emulating git
+	// <1.8.5 behavior) further narrows the failure mode by choosing master as
+	// the sole default branch if a) master exists and b) master is one of the
+	// branches marked as a default.
+	//
+	// This all reduces the failure mode to a very narrow range of
+	// circumstances. Nevertheless, if we do end up emitting multiple
+	// default branches, it is possible that a user could end up following a
+	// non-default branch, IF:
+	//
+	// * Multiple branches match the HEAD rev
+	// * None of them are master
+	// * The solver makes it into the branch list in the version queue
+	// * The user has provided no constraint, or DefaultBranch
+	// * A branch that is not actually the default, but happens to share the
+	// rev, is lexicographically earlier than the true default branch
+	//
+	// Then the user could end up with an erroneous non-default branch in their
+	// lock file.
+	headrev := Revision(all[0][:40])
+	var onedef, multidef, defmaster bool
+
+	smap := make(map[string]bool)
+	uniq := 0
+	vlist = make([]Version, len(all)-1) // less 1, because always ignore HEAD
+	for _, pair := range all {
+		var v PairedVersion
+		if string(pair[46:51]) == "heads" {
+			rev := Revision(pair[:40])
+
+			isdef := rev == headrev
+			n := string(pair[52:])
+			if isdef {
+				if onedef {
+					multidef = true
+				}
+				onedef = true
+				if n == "master" {
+					defmaster = true
+				}
+			}
+			v = branchVersion{
+				name:      n,
+				isDefault: isdef,
+			}.Is(rev).(PairedVersion)
+
+			vlist[uniq] = v
+			uniq++
+		} else if string(pair[46:50]) == "tags" {
+			vstr := string(pair[51:])
+			if strings.HasSuffix(vstr, "^{}") {
+				// If the suffix is there, then we *know* this is the rev of
+				// the underlying commit object that we actually want
+				vstr = strings.TrimSuffix(vstr, "^{}")
+			} else if smap[vstr] {
+				// Already saw the deref'd version of this tag, if one
+				// exists, so skip this.
+				continue
+				// Can only hit this branch if we somehow got the deref'd
+				// version first. Which should be impossible, but this
+				// covers us in case of weirdness, anyway.
+			}
+			v = NewVersion(vstr).Is(Revision(pair[:40])).(PairedVersion)
+			smap[vstr] = true
+			vlist[uniq] = v
+			uniq++
+		}
+	}
+
+	// Trim off excess from the slice
+	vlist = vlist[:uniq]
+
+	// There were multiple default branches, but one was master. So, go through
+	// and strip the default flag from all the non-master branches.
+	if multidef && defmaster {
+		for k, v := range vlist {
+			pv := v.(PairedVersion)
+			if bv, ok := pv.Unpair().(branchVersion); ok {
+				if bv.name != "master" && bv.isDefault == true {
+					bv.isDefault = false
+					vlist[k] = bv.Is(pv.Underlying())
+				}
+			}
+		}
+	}
+
+	// Process the version data into the cache
+	//
+	// reset the rmap and vmap, as they'll be fully repopulated by this
+	// TODO(sdboyer) detect out-of-sync pairings as we do this?
+	s.dc.vMap = make(map[UnpairedVersion]Revision)
+	s.dc.rMap = make(map[Revision][]UnpairedVersion)
+
+	for _, v := range vlist {
+		pv := v.(PairedVersion)
+		u, r := pv.Unpair(), pv.Underlying()
+		s.dc.vMap[u] = r
+		s.dc.rMap[r] = append(s.dc.rMap[r], u)
+	}
+	// Mark the cache as being in sync with upstream's version list
+	s.cvsync = true
+	return
+}
+
+// bzrSource is a generic bzr repository implementation that should work with
+// all standard bazaar remotes.
+type bzrSource struct {
+	baseVCSSource
+}
+
+func (s *bzrSource) listVersions() (vlist []Version, err error) {
+	if s.cvsync {
+		vlist = make([]Version, len(s.dc.vMap))
+		k := 0
+		for v, r := range s.dc.vMap {
+			vlist[k] = v.Is(r)
+			k++
+		}
+
+		return
+	}
+
+	// Must first ensure cache checkout's existence
+	err = s.ensureCacheExistence()
+	if err != nil {
+		return
+	}
+	r := s.crepo.r
+
+	// Local repo won't have all the latest refs if ensureCacheExistence()
+	// didn't create it
+	if !s.crepo.synced {
+		s.crepo.mut.Lock()
+		err = r.Update()
+		s.crepo.mut.Unlock()
+		if err != nil {
+			return
+		}
+
+		s.crepo.synced = true
+	}
+
+	var out []byte
+	// Now, list all the tags
+	out, err = r.RunFromDir("bzr", "tags", "--show-ids", "-v")
+	if err != nil {
+		return nil, fmt.Errorf("%s: %s", err, string(out))
+	}
+
+	all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+
+	var branchrev []byte
+	branchrev, err = r.RunFromDir("bzr", "version-info", "--custom", "--template={revision_id}", "--revision=branch:.")
+	br := string(branchrev)
+	if err != nil {
+		return nil, fmt.Errorf("%s: %s", err, br)
+	}
+
+	// Both commands completed successfully, so there's no further possibility
+	// of errors. That means it's now safe to reset the rmap and vmap, as
+	// they're about to be fully repopulated.
+	s.dc.vMap = make(map[UnpairedVersion]Revision)
+	s.dc.rMap = make(map[Revision][]UnpairedVersion)
+	vlist = make([]Version, len(all)+1)
+
+	// Now, all the tags.
+	for k, line := range all {
+		idx := bytes.IndexByte(line, 32) // space
+		v := NewVersion(string(line[:idx]))
+		r := Revision(bytes.TrimSpace(line[idx:]))
+
+		s.dc.vMap[v] = r
+		s.dc.rMap[r] = append(s.dc.rMap[r], v)
+		vlist[k] = v.Is(r)
+	}
+
+	// Last, add the default branch, hardcoding the visual representation of it
+	// that bzr uses when operating in the workflow mode we're using.
+	v := newDefaultBranch("(default)")
+	rev := Revision(string(branchrev))
+	s.dc.vMap[v] = rev
+	s.dc.rMap[rev] = append(s.dc.rMap[rev], v)
+	vlist[len(vlist)-1] = v.Is(rev)
+
+	// Cache is now in sync with upstream's version list
+	s.cvsync = true
+	return
+}
+
+// hgSource is a generic hg repository implementation that should work with
+// all standard mercurial servers.
+type hgSource struct {
+	baseVCSSource
+}
+
+func (s *hgSource) listVersions() (vlist []Version, err error) {
+	if s.cvsync {
+		vlist = make([]Version, len(s.dc.vMap))
+		k := 0
+		for v, r := range s.dc.vMap {
+			vlist[k] = v.Is(r)
+			k++
+		}
+
+		return
+	}
+
+	// Must first ensure cache checkout's existence
+	err = s.ensureCacheExistence()
+	if err != nil {
+		return
+	}
+	r := s.crepo.r
+
+	// Local repo won't have all the latest refs if ensureCacheExistence()
+	// didn't create it
+	if !s.crepo.synced {
+		s.crepo.mut.Lock()
+		err = r.Update()
+		s.crepo.mut.Unlock()
+		if err != nil {
+			return
+		}
+
+		s.crepo.synced = true
+	}
+
+	var out []byte
+
+	// Now, list all the tags
+	out, err = r.RunFromDir("hg", "tags", "--debug", "--verbose")
+	if err != nil {
+		return nil, fmt.Errorf("%s: %s", err, string(out))
+	}
+
+	all := bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+	lbyt := []byte("local")
+	nulrev := []byte("0000000000000000000000000000000000000000")
+	for _, line := range all {
+		if bytes.Equal(lbyt, line[len(line)-len(lbyt):]) {
+			// Skip local tags
+			continue
+		}
+
+		// tip is magic, don't include it
+		if bytes.HasPrefix(line, []byte("tip")) {
+			continue
+		}
+
+		// Split on colon; this gets us the rev and the tag plus local revno
+		pair := bytes.Split(line, []byte(":"))
+		if bytes.Equal(nulrev, pair[1]) {
+			// null rev indicates this tag is marked for deletion
+			continue
+		}
+
+		idx := bytes.IndexByte(pair[0], 32) // space
+		v := NewVersion(string(pair[0][:idx])).Is(Revision(pair[1])).(PairedVersion)
+		vlist = append(vlist, v)
+	}
+
+	// bookmarks next, because the presence of the magic @ bookmark has to
+	// determine how we handle the branches
+	var magicAt bool
+	out, err = r.RunFromDir("hg", "bookmarks", "--debug")
+	if err != nil {
+		// better nothing than partial and misleading
+		return nil, fmt.Errorf("%s: %s", err, string(out))
+	}
+
+	out = bytes.TrimSpace(out)
+	if !bytes.Equal(out, []byte("no bookmarks set")) {
+		all = bytes.Split(out, []byte("\n"))
+		for _, line := range all {
+			// Trim leading spaces, and * marker if present
+			line = bytes.TrimLeft(line, " *")
+			pair := bytes.Split(line, []byte(":"))
+			// if this doesn't split exactly once, we have something weird
+			if len(pair) != 2 {
+				continue
+			}
+
+			// Split on colon; this gets us the rev and the branch plus local revno
+			idx := bytes.IndexByte(pair[0], 32) // space
+			// if it's the magic @ marker, make that the default branch
+			str := string(pair[0][:idx])
+			var v Version
+			if str == "@" {
+				magicAt = true
+				v = newDefaultBranch(str).Is(Revision(pair[1])).(PairedVersion)
+			} else {
+				v = NewBranch(str).Is(Revision(pair[1])).(PairedVersion)
+			}
+			vlist = append(vlist, v)
+		}
+	}
+
+	out, err = r.RunFromDir("hg", "branches", "-c", "--debug")
+	if err != nil {
+		// better nothing than partial and misleading
+		return nil, fmt.Errorf("%s: %s", err, string(out))
+	}
+
+	all = bytes.Split(bytes.TrimSpace(out), []byte("\n"))
+	for _, line := range all {
+		// Trim inactive and closed suffixes, if present; we represent these
+		// anyway
+		line = bytes.TrimSuffix(line, []byte(" (inactive)"))
+		line = bytes.TrimSuffix(line, []byte(" (closed)"))
+
+		// Split on colon; this gets us the rev and the branch plus local revno
+		pair := bytes.Split(line, []byte(":"))
+		idx := bytes.IndexByte(pair[0], 32) // space
+		str := string(pair[0][:idx])
+		// if there was no magic @ bookmark, and this is mercurial's magic
+		// "default" branch, then mark it as default branch
+		var v Version
+		if !magicAt && str == "default" {
+			v = newDefaultBranch(str).Is(Revision(pair[1])).(PairedVersion)
+		} else {
+			v = NewBranch(str).Is(Revision(pair[1])).(PairedVersion)
+		}
+		vlist = append(vlist, v)
+	}
+
+	// reset the rmap and vmap, as they'll be fully repopulated by this
+	s.dc.vMap = make(map[UnpairedVersion]Revision)
+	s.dc.rMap = make(map[Revision][]UnpairedVersion)
+
+	for _, v := range vlist {
+		pv := v.(PairedVersion)
+		u, r := pv.Unpair(), pv.Underlying()
+		s.dc.vMap[u] = r
+		s.dc.rMap[r] = append(s.dc.rMap[r], u)
+	}
+
+	// Cache is now in sync with upstream's version list
+	s.cvsync = true
+	return
+}
+
+type repo struct {
+	// Path to the root of the default working copy (NOT the repo itself)
+	rpath string
+
+	// Mutex controlling general access to the repo
+	mut sync.RWMutex
+
+	// Object for direct repo interaction
+	r vcs.Repo
+
+	// Whether or not the cache repo is in sync (think dvcs) with upstream
+	synced bool
+}
+
+func (r *repo) exportVersionTo(v Version, to string) error {
+	r.mut.Lock()
+	defer r.mut.Unlock()
+
+	// TODO(sdboyer) sloppy - this update may not be necessary
+	if !r.synced {
+		err := r.r.Update()
+		if err != nil {
+			return fmt.Errorf("err on attempting to update repo: %s", err.Error())
+		}
+	}
+
+	r.r.UpdateVersion(v.String())
+
+	// TODO(sdboyer) This is a dumb, slow approach, but we're punting on making
+	// these fast for now because git is the OVERWHELMING case (it's handled in
+	// its own method)
+
+	cfg := &shutil.CopyTreeOptions{
+		Symlinks:     true,
+		CopyFunction: shutil.Copy,
+		Ignore: func(src string, contents []os.FileInfo) (ignore []string) {
+			for _, fi := range contents {
+				if !fi.IsDir() {
+					continue
+				}
+				n := fi.Name()
+				switch n {
+				case "vendor", ".bzr", ".svn", ".hg":
+					ignore = append(ignore, n)
+				}
+			}
+
+			return
+		},
+	}
+
+	return shutil.CopyTree(r.rpath, to, cfg)
+}
+
+// This func copied from Masterminds/vcs so we can exec our own commands
+func mergeEnvLists(in, out []string) []string {
+NextVar:
+	for _, inkv := range in {
+		k := strings.SplitAfterN(inkv, "=", 2)[0]
+		for i, outkv := range out {
+			if strings.HasPrefix(outkv, k) {
+				out[i] = inkv
+				continue NextVar
+			}
+		}
+		out = append(out, inkv)
+	}
+	return out
+}
+
+func stripVendor(path string, info os.FileInfo, err error) error {
+	if info.Name() == "vendor" {
+		if _, err := os.Lstat(path); err == nil {
+			if info.IsDir() {
+				return removeAll(path)
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/sdboyer/gps/version.go b/vendor/github.com/sdboyer/gps/version.go
new file mode 100644
index 0000000..7912d1e
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/version.go
@@ -0,0 +1,708 @@
+package gps
+
+import (
+	"sort"
+
+	"github.com/Masterminds/semver"
+)
+
+// Version represents one of the different types of versions used by gps.
+//
+// Version composes Constraint, because all versions can be used as a constraint
+// (where they allow one, and only one, version - themselves), but constraints
+// are not necessarily discrete versions.
+//
+// Version is an interface, but it contains private methods, which restricts it
+// to gps's own internal implementations. We do this for the confluence of
+// two reasons: the implementation of Versions is complete (there is no case in
+// which we'd need other types), and the implementation relies on type magic
+// under the hood, which would be unsafe to do if other dynamic types could be
+// hiding behind the interface.
+type Version interface {
+	Constraint
+
+	// Indicates the type of version - Revision, Branch, Version, or Semver
+	Type() string
+}
+
+// PairedVersion represents a normal Version, but paired with its corresponding,
+// underlying Revision.
+type PairedVersion interface {
+	Version
+
+	// Underlying returns the immutable Revision that identifies this Version.
+	Underlying() Revision
+
+	// Unpair returns the surface-level UnpairedVersion that half of the pair.
+	//
+	// It does NOT modify the original PairedVersion
+	Unpair() UnpairedVersion
+
+	// Ensures it is impossible to be both a PairedVersion and an
+	// UnpairedVersion
+	_pair(int)
+}
+
+// UnpairedVersion represents a normal Version, with a method for creating a
+// VersionPair by indicating the version's corresponding, underlying Revision.
+type UnpairedVersion interface {
+	Version
+	// Is takes the underlying Revision that this UnpairedVersion corresponds
+	// to and unites them into a PairedVersion.
+	Is(Revision) PairedVersion
+	// Ensures it is impossible to be both a PairedVersion and an
+	// UnpairedVersion
+	_pair(bool)
+}
+
+// types are weird
+func (branchVersion) _private()  {}
+func (branchVersion) _pair(bool) {}
+func (plainVersion) _private()   {}
+func (plainVersion) _pair(bool)  {}
+func (semVersion) _private()     {}
+func (semVersion) _pair(bool)    {}
+func (versionPair) _private()    {}
+func (versionPair) _pair(int)    {}
+func (Revision) _private()       {}
+
+// NewBranch creates a new Version to represent a floating version (in
+// general, a branch).
+func NewBranch(body string) UnpairedVersion {
+	return branchVersion{
+		name: body,
+		// We always set isDefault to false here, because the property is
+		// specifically designed to be internal-only: only the SourceManager
+		// gets to mark it. This is OK because nothing that client code is
+		// responsible for needs to care about has to touch it it.
+		//
+		// TODO(sdboyer) ...maybe. this just ugly.
+		isDefault: false,
+	}
+}
+
+func newDefaultBranch(body string) UnpairedVersion {
+	return branchVersion{
+		name:      body,
+		isDefault: true,
+	}
+}
+
+// NewVersion creates a Semver-typed Version if the provided version string is
+// valid semver, and a plain/non-semver version if not.
+func NewVersion(body string) UnpairedVersion {
+	sv, err := semver.NewVersion(body)
+
+	if err != nil {
+		return plainVersion(body)
+	}
+	return semVersion{sv: sv}
+}
+
+// A Revision represents an immutable versioning identifier.
+type Revision string
+
+// String converts the Revision back into a string.
+func (r Revision) String() string {
+	return string(r)
+}
+
+func (r Revision) Type() string {
+	return "rev"
+}
+
+// Matches is the Revision acting as a constraint; it checks to see if the provided
+// version is the same Revision as itself.
+func (r Revision) Matches(v Version) bool {
+	switch tv := v.(type) {
+	case versionTypeUnion:
+		return tv.Matches(r)
+	case Revision:
+		return r == tv
+	case versionPair:
+		return r == tv.r
+	}
+
+	return false
+}
+
+// MatchesAny is the Revision acting as a constraint; it checks to see if the provided
+// version is the same Revision as itself.
+func (r Revision) MatchesAny(c Constraint) bool {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return true
+	case noneConstraint:
+		return false
+	case versionTypeUnion:
+		return tc.MatchesAny(r)
+	case Revision:
+		return r == tc
+	case versionPair:
+		return r == tc.r
+	}
+
+	return false
+}
+
+func (r Revision) Intersect(c Constraint) Constraint {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return r
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(r)
+	case Revision:
+		if r == tc {
+			return r
+		}
+	case versionPair:
+		if r == tc.r {
+			return r
+		}
+	}
+
+	return none
+}
+
+type branchVersion struct {
+	name      string
+	isDefault bool
+}
+
+func (v branchVersion) String() string {
+	return string(v.name)
+}
+
+func (v branchVersion) Type() string {
+	return "branch"
+}
+
+func (v branchVersion) Matches(v2 Version) bool {
+	switch tv := v2.(type) {
+	case versionTypeUnion:
+		return tv.Matches(v)
+	case branchVersion:
+		return v.name == tv.name
+	case versionPair:
+		if tv2, ok := tv.v.(branchVersion); ok {
+			return tv2.name == v.name
+		}
+	}
+	return false
+}
+
+func (v branchVersion) MatchesAny(c Constraint) bool {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return true
+	case noneConstraint:
+		return false
+	case versionTypeUnion:
+		return tc.MatchesAny(v)
+	case branchVersion:
+		return v.name == tc.name
+	case versionPair:
+		if tc2, ok := tc.v.(branchVersion); ok {
+			return tc2.name == v.name
+		}
+	}
+
+	return false
+}
+
+func (v branchVersion) Intersect(c Constraint) Constraint {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return v
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(v)
+	case branchVersion:
+		if v.name == tc.name {
+			return v
+		}
+	case versionPair:
+		if tc2, ok := tc.v.(branchVersion); ok {
+			if v.name == tc2.name {
+				return v
+			}
+		}
+	}
+
+	return none
+}
+
+func (v branchVersion) Is(r Revision) PairedVersion {
+	return versionPair{
+		v: v,
+		r: r,
+	}
+}
+
+type plainVersion string
+
+func (v plainVersion) String() string {
+	return string(v)
+}
+
+func (r plainVersion) Type() string {
+	return "version"
+}
+
+func (v plainVersion) Matches(v2 Version) bool {
+	switch tv := v2.(type) {
+	case versionTypeUnion:
+		return tv.Matches(v)
+	case plainVersion:
+		return v == tv
+	case versionPair:
+		if tv2, ok := tv.v.(plainVersion); ok {
+			return tv2 == v
+		}
+	}
+	return false
+}
+
+func (v plainVersion) MatchesAny(c Constraint) bool {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return true
+	case noneConstraint:
+		return false
+	case versionTypeUnion:
+		return tc.MatchesAny(v)
+	case plainVersion:
+		return v == tc
+	case versionPair:
+		if tc2, ok := tc.v.(plainVersion); ok {
+			return tc2 == v
+		}
+	}
+
+	return false
+}
+
+func (v plainVersion) Intersect(c Constraint) Constraint {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return v
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(v)
+	case plainVersion:
+		if v == tc {
+			return v
+		}
+	case versionPair:
+		if tc2, ok := tc.v.(plainVersion); ok {
+			if v == tc2 {
+				return v
+			}
+		}
+	}
+
+	return none
+}
+
+func (v plainVersion) Is(r Revision) PairedVersion {
+	return versionPair{
+		v: v,
+		r: r,
+	}
+}
+
+type semVersion struct {
+	sv *semver.Version
+}
+
+func (v semVersion) String() string {
+	str := v.sv.Original()
+	if str == "" {
+		str = v.sv.String()
+	}
+	return str
+}
+
+func (r semVersion) Type() string {
+	return "semver"
+}
+
+func (v semVersion) Matches(v2 Version) bool {
+	switch tv := v2.(type) {
+	case versionTypeUnion:
+		return tv.Matches(v)
+	case semVersion:
+		return v.sv.Equal(tv.sv)
+	case versionPair:
+		if tv2, ok := tv.v.(semVersion); ok {
+			return tv2.sv.Equal(v.sv)
+		}
+	}
+	return false
+}
+
+func (v semVersion) MatchesAny(c Constraint) bool {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return true
+	case noneConstraint:
+		return false
+	case versionTypeUnion:
+		return tc.MatchesAny(v)
+	case semVersion:
+		return v.sv.Equal(tc.sv)
+	case semverConstraint:
+		return tc.Intersect(v) != none
+	case versionPair:
+		if tc2, ok := tc.v.(semVersion); ok {
+			return tc2.sv.Equal(v.sv)
+		}
+	}
+
+	return false
+}
+
+func (v semVersion) Intersect(c Constraint) Constraint {
+	switch tc := c.(type) {
+	case anyConstraint:
+		return v
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(v)
+	case semVersion:
+		if v.sv.Equal(tc.sv) {
+			return v
+		}
+	case semverConstraint:
+		return tc.Intersect(v)
+	case versionPair:
+		if tc2, ok := tc.v.(semVersion); ok {
+			if v.sv.Equal(tc2.sv) {
+				return v
+			}
+		}
+	}
+
+	return none
+}
+
+func (v semVersion) Is(r Revision) PairedVersion {
+	return versionPair{
+		v: v,
+		r: r,
+	}
+}
+
+type versionPair struct {
+	v UnpairedVersion
+	r Revision
+}
+
+func (v versionPair) String() string {
+	return v.v.String()
+}
+
+func (v versionPair) Type() string {
+	return v.v.Type()
+}
+
+func (v versionPair) Underlying() Revision {
+	return v.r
+}
+
+func (v versionPair) Unpair() UnpairedVersion {
+	return v.v
+}
+
+func (v versionPair) Matches(v2 Version) bool {
+	switch tv2 := v2.(type) {
+	case versionTypeUnion:
+		return tv2.Matches(v)
+	case versionPair:
+		return v.r == tv2.r
+	case Revision:
+		return v.r == tv2
+	}
+
+	switch tv := v.v.(type) {
+	case plainVersion, branchVersion:
+		if tv.Matches(v2) {
+			return true
+		}
+	case semVersion:
+		if tv2, ok := v2.(semVersion); ok {
+			if tv.sv.Equal(tv2.sv) {
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+func (v versionPair) MatchesAny(c2 Constraint) bool {
+	return c2.Matches(v)
+}
+
+func (v versionPair) Intersect(c2 Constraint) Constraint {
+	switch tc := c2.(type) {
+	case anyConstraint:
+		return v
+	case noneConstraint:
+		return none
+	case versionTypeUnion:
+		return tc.Intersect(v)
+	case versionPair:
+		if v.r == tc.r {
+			return v.r
+		}
+	case Revision:
+		if v.r == tc {
+			return v.r
+		}
+	case semverConstraint:
+		if tv, ok := v.v.(semVersion); ok {
+			if tc.Intersect(tv) == v.v {
+				return v
+			}
+		}
+		// If the semver intersection failed, we know nothing could work
+		return none
+	}
+
+	switch tv := v.v.(type) {
+	case plainVersion, branchVersion:
+		if c2.Matches(v) {
+			return v
+		}
+	case semVersion:
+		if tv2, ok := c2.(semVersion); ok {
+			if tv.sv.Equal(tv2.sv) {
+				return v
+			}
+		}
+	}
+
+	return none
+}
+
+// compareVersionType is a sort func helper that makes a coarse-grained sorting
+// decision based on version type.
+//
+// Make sure that l and r have already been converted from versionPair (if
+// applicable).
+func compareVersionType(l, r Version) int {
+	// Big fugly double type switch. No reflect, because this can be smack in a hot loop
+	switch l.(type) {
+	case Revision:
+		switch r.(type) {
+		case Revision:
+			return 0
+		case branchVersion, plainVersion, semVersion:
+			return 1
+		}
+
+	case plainVersion:
+		switch r.(type) {
+		case Revision:
+			return -1
+		case plainVersion:
+			return 0
+		case branchVersion, semVersion:
+			return 1
+		}
+
+	case branchVersion:
+		switch r.(type) {
+		case Revision, plainVersion:
+			return -1
+		case branchVersion:
+			return 0
+		case semVersion:
+			return 1
+		}
+
+	case semVersion:
+		switch r.(type) {
+		case Revision, branchVersion, plainVersion:
+			return -1
+		case semVersion:
+			return 0
+		}
+	}
+	panic("unknown version type")
+}
+
+// SortForUpgrade sorts a slice of []Version in roughly descending order, so
+// that presumably newer versions are visited first. The rules are:
+//
+//  - All semver versions come first, and sort mostly according to the semver
+//  2.0 spec (as implemented by github.com/Masterminds/semver lib), with one
+//  exception:
+//  - Semver versions with a prerelease are after *all* non-prerelease semver.
+//  Against each other, they are sorted first by their numerical component, then
+//  lexicographically by their prerelease version.
+//  - All branches are next, and sort lexicographically against each other.
+//  - All non-semver versions (tags) are next, and sort lexicographically
+//  against each other.
+//  - Revisions are last, and sort lexicographically against each other.
+//
+// So, given a slice of the following versions:
+//
+//  - Branch: master devel
+//  - Semver tags: v1.0.0, v1.1.0, v1.1.0-alpha1
+//  - Non-semver tags: footag
+//  - Revision: f6e74e8d
+//
+// Sorting for upgrade will result in the following slice.
+//
+//  [v1.1.0 v1.0.0 v1.1.0-alpha1 footag devel master f6e74e8d]
+func SortForUpgrade(vl []Version) {
+	sort.Sort(upgradeVersionSorter(vl))
+}
+
+// SortForDowngrade sorts a slice of []Version in roughly ascending order, so
+// that presumably older versions are visited first.
+//
+// This is *not* the same as reversing SortForUpgrade (or you could simply
+// sort.Reverse()). The type precedence is the same, including the semver vs.
+// semver-with-prerelease relation. Lexicographic comparisons within non-semver
+// tags, branches, and revisions remains the same as well; because we treat
+// these domains as having no ordering relations (chronology), there can be no
+// real concept of "upgrade" vs "downgrade", so there is no reason to reverse
+// them.
+//
+// Thus, the only binary relation that is reversed for downgrade is within-type
+// comparisons for semver (with and without prerelease).
+//
+// So, given a slice of the following versions:
+//
+//  - Branch: master devel
+//  - Semver tags: v1.0.0, v1.1.0, v1.1.0-alpha1
+//  - Non-semver tags: footag
+//  - Revision: f6e74e8d
+//
+// Sorting for downgrade will result in the following slice.
+//
+//  [v1.0.0 v1.1.0 v1.1.0-alpha1 footag devel master f6e74e8d]
+func SortForDowngrade(vl []Version) {
+	sort.Sort(downgradeVersionSorter(vl))
+}
+
+type upgradeVersionSorter []Version
+type downgradeVersionSorter []Version
+
+func (vs upgradeVersionSorter) Len() int {
+	return len(vs)
+}
+
+func (vs upgradeVersionSorter) Swap(i, j int) {
+	vs[i], vs[j] = vs[j], vs[i]
+}
+
+func (vs downgradeVersionSorter) Len() int {
+	return len(vs)
+}
+
+func (vs downgradeVersionSorter) Swap(i, j int) {
+	vs[i], vs[j] = vs[j], vs[i]
+}
+
+func (vs upgradeVersionSorter) Less(i, j int) bool {
+	l, r := vs[i], vs[j]
+
+	if tl, ispair := l.(versionPair); ispair {
+		l = tl.v
+	}
+	if tr, ispair := r.(versionPair); ispair {
+		r = tr.v
+	}
+
+	switch compareVersionType(l, r) {
+	case -1:
+		return true
+	case 1:
+		return false
+	case 0:
+		break
+	default:
+		panic("unreachable")
+	}
+
+	switch tl := l.(type) {
+	case branchVersion:
+		tr := r.(branchVersion)
+		if tl.isDefault != tr.isDefault {
+			// If they're not both defaults, then return the left val: if left
+			// is the default, then it is "less" (true) b/c we want it earlier.
+			// Else the right is the default, and so the left should be later
+			// (false).
+			return tl.isDefault
+		}
+		return l.String() < r.String()
+	case Revision, plainVersion:
+		// All that we can do now is alpha sort
+		return l.String() < r.String()
+	}
+
+	// This ensures that pre-release versions are always sorted after ALL
+	// full-release versions
+	lsv, rsv := l.(semVersion).sv, r.(semVersion).sv
+	lpre, rpre := lsv.Prerelease() == "", rsv.Prerelease() == ""
+	if (lpre && !rpre) || (!lpre && rpre) {
+		return lpre
+	}
+	return lsv.GreaterThan(rsv)
+}
+
+func (vs downgradeVersionSorter) Less(i, j int) bool {
+	l, r := vs[i], vs[j]
+
+	if tl, ispair := l.(versionPair); ispair {
+		l = tl.v
+	}
+	if tr, ispair := r.(versionPair); ispair {
+		r = tr.v
+	}
+
+	switch compareVersionType(l, r) {
+	case -1:
+		return true
+	case 1:
+		return false
+	case 0:
+		break
+	default:
+		panic("unreachable")
+	}
+
+	switch tl := l.(type) {
+	case branchVersion:
+		tr := r.(branchVersion)
+		if tl.isDefault != tr.isDefault {
+			// If they're not both defaults, then return the left val: if left
+			// is the default, then it is "less" (true) b/c we want it earlier.
+			// Else the right is the default, and so the left should be later
+			// (false).
+			return tl.isDefault
+		}
+		return l.String() < r.String()
+	case Revision, plainVersion:
+		// All that we can do now is alpha sort
+		return l.String() < r.String()
+	}
+
+	// This ensures that pre-release versions are always sorted after ALL
+	// full-release versions
+	lsv, rsv := l.(semVersion).sv, r.(semVersion).sv
+	lpre, rpre := lsv.Prerelease() == "", rsv.Prerelease() == ""
+	if (lpre && !rpre) || (!lpre && rpre) {
+		return lpre
+	}
+	return lsv.LessThan(rsv)
+}
diff --git a/vendor/github.com/sdboyer/gps/version_queue.go b/vendor/github.com/sdboyer/gps/version_queue.go
new file mode 100644
index 0000000..dc5da98
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/version_queue.go
@@ -0,0 +1,154 @@
+package gps
+
+import (
+	"fmt"
+	"strings"
+)
+
+type failedVersion struct {
+	v Version
+	f error
+}
+
+type versionQueue struct {
+	id           ProjectIdentifier
+	pi           []Version
+	lockv, prefv Version
+	fails        []failedVersion
+	b            sourceBridge
+	failed       bool
+	allLoaded    bool
+	adverr       error
+}
+
+func newVersionQueue(id ProjectIdentifier, lockv, prefv Version, b sourceBridge) (*versionQueue, error) {
+	vq := &versionQueue{
+		id: id,
+		b:  b,
+	}
+
+	// Lock goes in first, if present
+	if lockv != nil {
+		vq.lockv = lockv
+		vq.pi = append(vq.pi, lockv)
+	}
+
+	// Preferred version next
+	if prefv != nil {
+		vq.prefv = prefv
+		vq.pi = append(vq.pi, prefv)
+	}
+
+	if len(vq.pi) == 0 {
+		var err error
+		vq.pi, err = vq.b.ListVersions(vq.id)
+		if err != nil {
+			// TODO(sdboyer) pushing this error this early entails that we
+			// unconditionally deep scan (e.g. vendor), as well as hitting the
+			// network.
+			return nil, err
+		}
+		vq.allLoaded = true
+	}
+
+	return vq, nil
+}
+
+func (vq *versionQueue) current() Version {
+	if len(vq.pi) > 0 {
+		return vq.pi[0]
+	}
+
+	return nil
+}
+
+// advance moves the versionQueue forward to the next available version,
+// recording the failure that eliminated the current version.
+func (vq *versionQueue) advance(fail error) error {
+	// Nothing in the queue means...nothing in the queue, nicely enough
+	if vq.adverr != nil || len(vq.pi) == 0 { // should be a redundant check, but just in case
+		return vq.adverr
+	}
+
+	// Record the fail reason and pop the queue
+	vq.fails = append(vq.fails, failedVersion{
+		v: vq.pi[0],
+		f: fail,
+	})
+	vq.pi = vq.pi[1:]
+
+	// *now*, if the queue is empty, ensure all versions have been loaded
+	if len(vq.pi) == 0 {
+		if vq.allLoaded {
+			// This branch gets hit when the queue is first fully exhausted,
+			// after a previous advance() already called ListVersions().
+			return nil
+		}
+		vq.allLoaded = true
+
+		var vltmp []Version
+		vltmp, vq.adverr = vq.b.ListVersions(vq.id)
+		if vq.adverr != nil {
+			return vq.adverr
+		}
+		// defensive copy - calling ListVersions here means slice contents may
+		// be modified when removing prefv/lockv.
+		vq.pi = make([]Version, len(vltmp))
+		copy(vq.pi, vltmp)
+
+		// search for and remove lockv and prefv, in a pointer GC-safe manner
+		//
+		// could use the version comparator for binary search here to avoid
+		// O(n) each time...if it matters
+		var delkeys []int
+		for k, pi := range vq.pi {
+			if pi == vq.lockv || pi == vq.prefv {
+				delkeys = append(delkeys, k)
+			}
+		}
+
+		for k, dk := range delkeys {
+			dk -= k
+			copy(vq.pi[dk:], vq.pi[dk+1:])
+			// write nil to final position for GC safety
+			vq.pi[len(vq.pi)-1] = nil
+			vq.pi = vq.pi[:len(vq.pi)-1]
+		}
+
+		if len(vq.pi) == 0 {
+			// If listing versions added nothing (new), then return now
+			return nil
+		}
+	}
+
+	// We're finally sure that there's something in the queue. Remove the
+	// failure marker, as the current version may have failed, but the next one
+	// hasn't yet
+	vq.failed = false
+
+	// If all have been loaded and the queue is empty, we're definitely out
+	// of things to try. Return empty, though, because vq semantics dictate
+	// that we don't explicitly indicate the end of the queue here.
+	return nil
+}
+
+// isExhausted indicates whether or not the queue has definitely been exhausted,
+// in which case it will return true.
+//
+// It may return false negatives - suggesting that there is more in the queue
+// when a subsequent call to current() will be empty. Plan accordingly.
+func (vq *versionQueue) isExhausted() bool {
+	if !vq.allLoaded {
+		return false
+	}
+	return len(vq.pi) == 0
+}
+
+func (vq *versionQueue) String() string {
+	var vs []string
+
+	for _, v := range vq.pi {
+		vs = append(vs, v.String())
+	}
+	return fmt.Sprintf("[%s]", strings.Join(vs, ", "))
+}
diff --git a/vendor/github.com/sdboyer/gps/version_queue_test.go b/vendor/github.com/sdboyer/gps/version_queue_test.go
new file mode 100644
index 0000000..2e6174d
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/version_queue_test.go
@@ -0,0 +1,249 @@
+package gps
+
+import (
+	"fmt"
+	"testing"
+)
+
+// just need a ListVersions method
+type fakeBridge struct {
+	*bridge
+	vl []Version
+}
+
+var fakevl = []Version{
+	NewVersion("v2.0.0").Is("200rev"),
+	NewVersion("v1.1.1").Is("111rev"),
+	NewVersion("v1.1.0").Is("110rev"),
+	NewVersion("v1.0.0").Is("100rev"),
+	NewBranch("master").Is("masterrev"),
+}
+
+func init() {
+	SortForUpgrade(fakevl)
+}
+
+func (fb *fakeBridge) ListVersions(id ProjectIdentifier) ([]Version, error) {
+	// it's a fixture, we only ever do the one, regardless of id
+	return fb.vl, nil
+}
+
+type fakeFailBridge struct {
+	*bridge
+}
+
+var vqerr = fmt.Errorf("vqerr")
+
+func (fb *fakeFailBridge) ListVersions(id ProjectIdentifier) ([]Version, error) {
+	return nil, vqerr
+}
+
+func TestVersionQueueSetup(t *testing.T) {
+	id := ProjectIdentifier{ProjectRoot: ProjectRoot("foo")}.normalize()
+
+	// shouldn't even need to embed a real bridge
+	fb := &fakeBridge{vl: fakevl}
+	ffb := &fakeFailBridge{}
+
+	_, err := newVersionQueue(id, nil, nil, ffb)
+	if err == nil {
+		t.Error("Expected err when providing no prefv or lockv, and injected bridge returns err from ListVersions()")
+	}
+
+	vq, err := newVersionQueue(id, nil, nil, fb)
+	if err != nil {
+		t.Errorf("Unexpected err on vq create: %s", err)
+	} else {
+		if len(vq.pi) != 5 {
+			t.Errorf("Should have five versions from ListVersions() when providing no prefv or lockv; got %v:\n\t%s", len(vq.pi), vq.String())
+		}
+		if !vq.allLoaded {
+			t.Errorf("allLoaded flag should be set, but wasn't")
+		}
+
+		if vq.prefv != nil || vq.lockv != nil {
+			t.Error("lockv and prefv should be nil")
+		}
+		if vq.current() != fakevl[0] {
+			t.Errorf("current should be head of fakevl (%s), got %s", fakevl[0], vq.current())
+		}
+	}
+
+	lockv := fakevl[0]
+	prefv := fakevl[1]
+	vq, err = newVersionQueue(id, lockv, nil, fb)
+	if err != nil {
+		t.Errorf("Unexpected err on vq create: %s", err)
+	} else {
+		if len(vq.pi) != 1 {
+			t.Errorf("Should have one version when providing only a lockv; got %v:\n\t%s", len(vq.pi), vq.String())
+		}
+		if vq.allLoaded {
+			t.Errorf("allLoaded flag should not be set")
+		}
+		if vq.lockv != lockv {
+			t.Errorf("lockv should be %s, was %s", lockv, vq.lockv)
+		}
+		if vq.current() != lockv {
+			t.Errorf("current should be lockv (%s), got %s", lockv, vq.current())
+		}
+	}
+
+	vq, err = newVersionQueue(id, nil, prefv, fb)
+	if err != nil {
+		t.Errorf("Unexpected err on vq create: %s", err)
+	} else {
+		if len(vq.pi) != 1 {
+			t.Errorf("Should have one version when providing only a prefv; got %v:\n\t%s", len(vq.pi), vq.String())
+		}
+		if vq.allLoaded {
+			t.Errorf("allLoaded flag should not be set")
+		}
+		if vq.prefv != prefv {
+			t.Errorf("prefv should be %s, was %s", prefv, vq.prefv)
+		}
+		if vq.current() != prefv {
+			t.Errorf("current should be prefv (%s), got %s", prefv, vq.current())
+		}
+	}
+
+	vq, err = newVersionQueue(id, lockv, prefv, fb)
+	if err != nil {
+		t.Errorf("Unexpected err on vq create: %s", err)
+	} else {
+		if len(vq.pi) != 2 {
+			t.Errorf("Should have two versions when providing both a prefv and lockv; got %v:\n\t%s", len(vq.pi), vq.String())
+		}
+		if vq.allLoaded {
+			t.Errorf("allLoaded flag should not be set")
+		}
+		if vq.prefv != prefv {
+			t.Errorf("prefv should be %s, was %s", prefv, vq.prefv)
+		}
+		if vq.lockv != lockv {
+			t.Errorf("lockv should be %s, was %s", lockv, vq.lockv)
+		}
+		if vq.current() != lockv {
+			t.Errorf("current should be lockv (%s), got %s", lockv, vq.current())
+		}
+	}
+}
+
+func TestVersionQueueAdvance(t *testing.T) {
+	fb := &fakeBridge{vl: fakevl}
+	id := ProjectIdentifier{ProjectRoot: ProjectRoot("foo")}.normalize()
+
+	// First with no prefv or lockv
+	vq, err := newVersionQueue(id, nil, nil, fb)
+	if err != nil {
+		t.Errorf("Unexpected err on vq create: %s", err)
+		t.FailNow()
+	}
+
+	for k, v := range fakevl[1:] {
+		err = vq.advance(fmt.Errorf("advancment fail for %s", fakevl[k]))
+		if err != nil {
+			t.Errorf("error on advancing vq from %s to %s", fakevl[k], v)
+			break
+		}
+
+		if vq.current() != v {
+			t.Errorf("on advance() %v, current should be %s, got %s", k, v, vq.current())
+		}
+	}
+
+	if vq.isExhausted() {
+		t.Error("should not be exhausted until advancing 'past' the end")
+	}
+	if err = vq.advance(fmt.Errorf("final advance failure")); err != nil {
+		t.Errorf("should not error on advance, even past end, but got %s", err)
+	}
+
+	if !vq.isExhausted() {
+		t.Error("advanced past end, should now report exhaustion")
+	}
+	if vq.current() != nil {
+		t.Error("advanced past end, current should return nil")
+	}
+
+	// now, do one with both a prefv and lockv
+	lockv := fakevl[2]
+	prefv := fakevl[0]
+	vq, err = newVersionQueue(id, lockv, prefv, fb)
+	if vq.String() != "[v1.1.0, v2.0.0]" {
+		t.Error("stringifying vq did not have expected outcome, got", vq.String())
+	}
+	if vq.isExhausted() {
+		t.Error("can't be exhausted, we aren't even 'allLoaded' yet")
+	}
+
+	err = vq.advance(fmt.Errorf("dequeue lockv"))
+	if err != nil {
+		t.Error("unexpected error when advancing past lockv", err)
+	} else {
+		if vq.current() != prefv {
+			t.Errorf("current should be prefv (%s) after first advance, got %s", prefv, vq.current())
+		}
+		if len(vq.pi) != 1 {
+			t.Errorf("should have just prefv elem left in vq, but there are %v:\n\t%s", len(vq.pi), vq.String())
+		}
+	}
+
+	err = vq.advance(fmt.Errorf("dequeue prefv"))
+	if err != nil {
+		t.Error("unexpected error when advancing past prefv", err)
+	} else {
+		if !vq.allLoaded {
+			t.Error("allLoaded should now be true")
+		}
+		if len(vq.pi) != 3 {
+			t.Errorf("should have three remaining versions after removing prefv and lockv, but there are %v:\n\t%s", len(vq.pi), vq.String())
+		}
+		if vq.current() != fakevl[1] {
+			t.Errorf("current should be first elem of fakevl (%s) after advancing into all, got %s", fakevl[1], vq.current())
+		}
+	}
+
+	// make sure the queue ordering is still right even with a double-delete
+	vq.advance(nil)
+	if vq.current() != fakevl[3] {
+		t.Errorf("second elem after ListVersions() should be idx 3 of fakevl (%s), got %s", fakevl[3], vq.current())
+	}
+	vq.advance(nil)
+	if vq.current() != fakevl[4] {
+		t.Errorf("third elem after ListVersions() should be idx 4 of fakevl (%s), got %s", fakevl[4], vq.current())
+	}
+	vq.advance(nil)
+	if vq.current() != nil || !vq.isExhausted() {
+		t.Error("should be out of versions in the queue")
+	}
+
+	// Make sure we handle things correctly when listVersions adds nothing new
+	fb = &fakeBridge{vl: []Version{lockv, prefv}}
+	vq, err = newVersionQueue(id, lockv, prefv, fb)
+	vq.advance(nil)
+	vq.advance(nil)
+	if vq.current() != nil || !vq.isExhausted() {
+		t.Errorf("should have no versions left, as ListVersions() added nothing new, but still have %s", vq.String())
+	}
+	err = vq.advance(nil)
+	if err != nil {
+		t.Errorf("should be fine to advance on empty queue, per docs, but got err %s", err)
+	}
+
+	// Also handle it well when advancing calls ListVersions() and it gets an
+	// error
+	vq, err = newVersionQueue(id, lockv, nil, &fakeFailBridge{})
+	if err != nil {
+		t.Errorf("should not err on creation when preseeded with lockv, but got err %s", err)
+	}
+	err = vq.advance(nil)
+	if err == nil {
+		t.Error("advancing should trigger call to erroring bridge, but no err")
+	}
+	err = vq.advance(nil)
+	if err == nil {
+		t.Error("err should be stored for reuse on any subsequent calls")
+	}
+
+}
diff --git a/vendor/github.com/sdboyer/gps/version_test.go b/vendor/github.com/sdboyer/gps/version_test.go
new file mode 100644
index 0000000..d375e77
--- /dev/null
+++ b/vendor/github.com/sdboyer/gps/version_test.go
@@ -0,0 +1,100 @@
+package gps
+
+import "testing"
+
+func TestVersionSorts(t *testing.T) {
+	rev := Revision("flooboofoobooo")
+	v1 := NewBranch("master").Is(rev)
+	v2 := NewBranch("test").Is(rev)
+	v3 := NewVersion("1.0.0").Is(rev)
+	v4 := NewVersion("1.0.1")
+	v5 := NewVersion("v2.0.5")
+	v6 := NewVersion("2.0.5.2")
+	v7 := newDefaultBranch("unwrapped")
+	v8 := NewVersion("20.0.5.2")
+
+	start := []Version{
+		v1,
+		v2,
+		v3,
+		v4,
+		v5,
+		v6,
+		v7,
+		v8,
+		rev,
+	}
+
+	down := make([]Version, len(start))
+	copy(down, start)
+	up := make([]Version, len(start))
+	copy(up, start)
+
+	edown := []Version{
+		v3, v4, v5, // semvers
+		v7, v1, v2, // floating/branches
+		v6, v8, // plain versions
+		rev, // revs
+	}
+
+	eup := []Version{
+		v5, v4, v3, // semvers
+		v7, v1, v2, // floating/branches
+		v6, v8, // plain versions
+		rev, // revs
+	}
+
+	SortForUpgrade(up)
+	var wrong []int
+	for k, v := range up {
+		if eup[k] != v {
+			wrong = append(wrong, k)
+			t.Errorf("Expected version %s in position %v on upgrade sort, but got %s", eup[k], k, v)
+		}
+	}
+	if len(wrong) > 0 {
+		// Just helps with readability a bit
+		t.Errorf("Upgrade sort positions with wrong versions: %v", wrong)
+	}
+
+	SortForDowngrade(down)
+	wrong = wrong[:0]
+	for k, v := range down {
+		if edown[k] != v {
+			wrong = append(wrong, k)
+			t.Errorf("Expected version %s in position %v on downgrade sort, but got %s", edown[k], k, v)
+		}
+	}
+	if len(wrong) > 0 {
+		// Just helps with readability a bit
+		t.Errorf("Downgrade sort positions with wrong versions: %v", wrong)
+	}
+
+	// Now make sure we sort back the other way correctly...just because
+	SortForUpgrade(down)
+	wrong = wrong[:0]
+	for k, v := range down {
+		if eup[k] != v {
+			wrong = append(wrong, k)
+			t.Errorf("Expected version %s in position %v on down-then-upgrade sort, but got %s", eup[k], k, v)
+		}
+	}
+	if len(wrong) > 0 {
+		// Just helps with readability a bit
+		t.Errorf("Down-then-upgrade sort positions with wrong versions: %v", wrong)
+	}
+
+	// Now make sure we sort back the other way correctly...just because
+	SortForDowngrade(up)
+	wrong = wrong[:0]
+	for k, v := range up {
+		if edown[k] != v {
+			wrong = append(wrong, k)
+			t.Errorf("Expected version %s in position %v on up-then-downgrade sort, but got %s", edown[k], k, v)
+		}
+	}
+	if len(wrong) > 0 {
+		// Just helps with readability a bit
+		t.Errorf("Up-then-downgrade sort positions with wrong versions: %v", wrong)
+	}
+}
diff --git a/vendor/github.com/termie/go-shutil/.gitignore b/vendor/github.com/termie/go-shutil/.gitignore
new file mode 100644
index 0000000..139b1ee
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/.gitignore
@@ -0,0 +1 @@
+test/testfile3
diff --git a/vendor/github.com/termie/go-shutil/LICENSE b/vendor/github.com/termie/go-shutil/LICENSE
new file mode 100644
index 0000000..3890b94
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/LICENSE
@@ -0,0 +1 @@
+I guess Python's? If that doesn't apply then MIT. Have fun.
diff --git a/vendor/github.com/termie/go-shutil/README.rst b/vendor/github.com/termie/go-shutil/README.rst
new file mode 100644
index 0000000..b63b016
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/README.rst
@@ -0,0 +1,24 @@
+=========================================
+High-level Filesystem Operations (for Go)
+=========================================
+
+
+A direct port of a few of the functions from Python's shutil package for
+high-level filesystem operations.
+
+This project pretty much only exists so that other people don't have to keep
+re-writing this code in their projects, at this time we have been unable to
+find any helpful packages for this in the stdlib or elsewhere.
+
+We don't expect it to be perfect, just better than whatever your first draft
+would have been. Patches welcome.
+
+See also: https://docs.python.org/3.5/library/shutil.html
+
+================
+Functions So Far
+================
+
+We support Copy, CopyFile, CopyMode, and CopyTree. CopyStat would be nice if
+anybody wants to write that. Also the other functions that might be useful in
+the python library :D
diff --git a/vendor/github.com/termie/go-shutil/shutil.go b/vendor/github.com/termie/go-shutil/shutil.go
new file mode 100644
index 0000000..09fcd38
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/shutil.go
@@ -0,0 +1,326 @@
+package shutil
+
+import (
+  "fmt"
+  "io"
+  "io/ioutil"
+  "os"
+  "path/filepath"
+)
+
+
+type SameFileError struct {
+  Src string
+  Dst string
+}
+
+func (e SameFileError) Error() string {
+  return fmt.Sprintf("%s and %s are the same file", e.Src, e.Dst)
+}
+
+type SpecialFileError struct {
+  File string
+  FileInfo os.FileInfo
+}
+
+func (e SpecialFileError) Error() string {
+  return fmt.Sprintf("`%s` is a named pipe", e.File)
+}
+
+type NotADirectoryError struct {
+  Src string
+}
+
+func (e NotADirectoryError) Error() string {
+  return fmt.Sprintf("`%s` is not a directory", e.Src)
+}
+
+
+type AlreadyExistsError struct {
+  Dst string
+}
+
+func (e AlreadyExistsError) Error() string {
+  return fmt.Sprintf("`%s` already exists", e.Dst)
+}
+
+
+func samefile(src string, dst string) bool {
+  srcInfo, _ := os.Stat(src)
+  dstInfo, _ := os.Stat(dst)
+  return os.SameFile(srcInfo, dstInfo)
+}
+
+func specialfile(fi os.FileInfo) bool {
+  return (fi.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe
+}
+
+func stringInSlice(a string, list []string) bool {
+    for _, b := range list {
+        if b == a {
+            return true
+        }
+    }
+    return false
+}
+
+func IsSymlink(fi os.FileInfo) bool {
+  return (fi.Mode() & os.ModeSymlink) == os.ModeSymlink
+}
+
+
+// Copy data from src to dst
+//
+// If followSymlinks is not set and src is a symbolic link, a
+// new symlink will be created instead of copying the file it points
+// to.
+func CopyFile(src, dst string, followSymlinks bool) (error) {
+  if samefile(src, dst) {
+    return &SameFileError{src, dst}
+  }
+
+  // Make sure src exists and neither are special files
+  srcStat, err := os.Lstat(src)
+  if err != nil {
+    return err
+  }
+  if specialfile(srcStat) {
+    return &SpecialFileError{src, srcStat}
+  }
+
+  dstStat, err := os.Stat(dst)
+  if err != nil && !os.IsNotExist(err) {
+    return err
+  } else if err == nil {
+    if specialfile(dstStat) {
+      return &SpecialFileError{dst, dstStat}
+    }
+  }
+
+  // If we don't follow symlinks and it's a symlink, just link it and be done
+  if !followSymlinks && IsSymlink(srcStat) {
+    return os.Symlink(src, dst)
+  }
+
+  // If we are a symlink, follow it
+  if IsSymlink(srcStat) {
+    src, err = os.Readlink(src)
+    if err != nil {
+      return err
+    }
+    srcStat, err = os.Stat(src)
+    if err != nil {
+      return err
+    }
+  }
+
+  // Do the actual copy
+  fsrc, err := os.Open(src)
+  if err != nil {
+    return err
+  }
+  defer fsrc.Close()
+
+  fdst, err := os.Create(dst)
+  if err != nil {
+    return err
+  }
+  defer fdst.Close()
+
+  size, err := io.Copy(fdst, fsrc)
+  if err != nil {
+    return err
+  }
+
+  if size != srcStat.Size() {
+    return fmt.Errorf("%s: %d/%d copied", src, size, srcStat.Size())
+  }
+
+  return nil
+}
+
+
+// Copy mode bits from src to dst.
+//
+// If followSymlinks is false, symlinks aren't followed if and only
+// if both `src` and `dst` are symlinks. If `lchmod` isn't available
+// and both are symlinks this does nothing. (I don't think lchmod is
+// available in Go)
+func CopyMode(src, dst string, followSymlinks bool) error {
+  srcStat, err := os.Lstat(src)
+  if err != nil {
+    return err
+  }
+
+  dstStat, err := os.Lstat(dst)
+  if err != nil {
+    return err
+  }
+
+  // They are both symlinks and we can't change mode on symlinks.
+  if !followSymlinks && IsSymlink(srcStat) && IsSymlink(dstStat) {
+    return nil
+  }
+
+  // Atleast one is not a symlink, get the actual file stats
+  srcStat, _ = os.Stat(src)
+  err = os.Chmod(dst, srcStat.Mode())
+  return err
+}
+
+
+// Copy data and mode bits ("cp src dst"). Return the file's destination.
+//
+// The destination may be a directory.
+//
+// If followSymlinks is false, symlinks won't be followed. This
+// resembles GNU's "cp -P src dst".
+//
+// If source and destination are the same file, a SameFileError will be
+// rased.
+func Copy(src, dst string, followSymlinks bool) (string, error){
+  dstInfo, err := os.Stat(dst)
+
+  if err == nil && dstInfo.Mode().IsDir() {
+    dst = filepath.Join(dst, filepath.Base(src))
+  }
+
+  if err != nil && !os.IsNotExist(err) {
+    return dst, err
+  }
+
+  err = CopyFile(src, dst, followSymlinks)
+  if err != nil {
+    return dst, err
+  }
+
+  err = CopyMode(src, dst, followSymlinks)
+  if err != nil {
+    return dst, err
+  }
+
+  return dst, nil
+}
+
+type CopyTreeOptions struct {
+  Symlinks bool
+  IgnoreDanglingSymlinks bool
+  CopyFunction func (string, string, bool) (string, error)
+  Ignore func (string, []os.FileInfo) []string
+}
+
+// Recursively copy a directory tree.
+//
+// The destination directory must not already exist.
+//
+// If the optional Symlinks flag is true, symbolic links in the
+// source tree result in symbolic links in the destination tree; if
+// it is false, the contents of the files pointed to by symbolic
+// links are copied. If the file pointed by the symlink doesn't
+// exist, an error will be returned.
+//
+// You can set the optional IgnoreDanglingSymlinks flag to true if you
+// want to silence this error. Notice that this has no effect on
+// platforms that don't support os.Symlink.
+//
+// The optional ignore argument is a callable. If given, it
+// is called with the `src` parameter, which is the directory
+// being visited by CopyTree(), and `names` which is the list of
+// `src` contents, as returned by ioutil.ReadDir():
+//
+//   callable(src, entries) -> ignoredNames
+//
+// Since CopyTree() is called recursively, the callable will be
+// called once for each directory that is copied. It returns a
+// list of names relative to the `src` directory that should
+// not be copied.
+//
+// The optional copyFunction argument is a callable that will be used
+// to copy each file. It will be called with the source path and the
+// destination path as arguments. By default, Copy() is used, but any
+// function that supports the same signature (like Copy2() when it
+// exists) can be used.
+func CopyTree(src, dst string, options *CopyTreeOptions) error {
+  if options == nil {
+    options = &CopyTreeOptions{Symlinks:false,
+                               Ignore:nil,
+                               CopyFunction:Copy,
+                               IgnoreDanglingSymlinks:false}
+  }
+
+
+  srcFileInfo, err := os.Stat(src)
+  if err != nil {
+    return err
+  }
+
+  if !srcFileInfo.IsDir() {
+    return &NotADirectoryError{src}
+  }
+
+  _, err = os.Open(dst)
+  if !os.IsNotExist(err) {
+    return &AlreadyExistsError{dst}
+  }
+
+  entries, err := ioutil.ReadDir(src)
+  if err != nil {
+    return err
+  }
+
+  err = os.MkdirAll(dst, srcFileInfo.Mode())
+  if err != nil {
+    return err
+  }
+
+  ignoredNames := []string{}
+  if options.Ignore != nil {
+    ignoredNames = options.Ignore(src, entries)
+  }
+
+  for _, entry := range entries {
+    if stringInSlice(entry.Name(), ignoredNames) {
+      continue
+    }
+    srcPath := filepath.Join(src, entry.Name())
+    dstPath := filepath.Join(dst, entry.Name())
+
+    entryFileInfo, err := os.Lstat(srcPath)
+    if err != nil {
+      return err
+    }
+
+    // Deal with symlinks
+    if IsSymlink(entryFileInfo) {
+      linkTo, err := os.Readlink(srcPath)
+      if err != nil {
+        return err
+      }
+      if options.Symlinks {
+        os.Symlink(linkTo, dstPath)
+        //CopyStat(srcPath, dstPath, false)
+      } else {
+        // ignore dangling symlink if flag is on
+        _, err = os.Stat(linkTo)
+        if os.IsNotExist(err) && options.IgnoreDanglingSymlinks {
+          continue
+        }
+        _, err = options.CopyFunction(srcPath, dstPath, false)
+        if err != nil {
+          return err
+        }
+      }
+    } else if entryFileInfo.IsDir() {
+      err = CopyTree(srcPath, dstPath, options)
+      if err != nil {
+        return err
+      }
+    } else {
+      _, err = options.CopyFunction(srcPath, dstPath, false)
+      if err != nil {
+        return err
+      }
+    }
+  }
+  return nil
+}
diff --git a/vendor/github.com/termie/go-shutil/shutil_test.go b/vendor/github.com/termie/go-shutil/shutil_test.go
new file mode 100644
index 0000000..f6ec261
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/shutil_test.go
@@ -0,0 +1,156 @@
+package shutil
+
+import (
+  "bytes"
+  "io/ioutil"
+  "os"
+  "testing"
+)
+
+
+func filesMatch(src, dst string) (bool, error) {
+  srcContents, err := ioutil.ReadFile(src)
+  if err != nil {
+    return false, err
+  }
+
+  dstContents, err := ioutil.ReadFile(dst)
+  if err != nil {
+    return false, err
+  }
+
+  if bytes.Compare(srcContents, dstContents) != 0 {
+    return false, nil
+  }
+  return true, nil
+}
+
+
+func TestSameFileError(t *testing.T) {
+  _, err := Copy("test/testfile", "test/testfile", false)
+  _, ok := err.(*SameFileError)
+  if !ok {
+    t.Error(err)
+  }
+}
+
+
+func TestCopyFile(t *testing.T) {
+  // clear out existing files if they exist
+  os.Remove("test/testfile3")
+
+  err := CopyFile("test/testfile", "test/testfile3", false)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match, err := filesMatch("test/testfile", "test/testfile3")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+  if !match {
+    t.Fail()
+    return
+  }
+
+  // And again without clearing the files
+  err = CopyFile("test/testfile2", "test/testfile3", false)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match2, err := filesMatch("test/testfile2", "test/testfile3")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  if !match2 {
+    t.Fail()
+    return
+  }
+}
+
+
+func TestCopy(t *testing.T) {
+  // clear out existing files if they exist
+  os.Remove("test/testfile3")
+
+  _, err := Copy("test/testfile", "test/testfile3", false)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match, err := filesMatch("test/testfile", "test/testfile3")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+  if !match {
+    t.Fail()
+    return
+  }
+
+  // And again without clearing the files
+  _, err = Copy("test/testfile2", "test/testfile3", false)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match2, err := filesMatch("test/testfile2", "test/testfile3")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  if !match2 {
+    t.Fail()
+    return
+  }
+}
+
+
+func TestCopyTree(t *testing.T) {
+  // clear out existing files if they exist
+  os.RemoveAll("test/testdir3")
+
+  err := CopyTree("test/testdir", "test/testdir3", nil)
+  if err != nil {
+    t.Error(err)
+    return
+  }
+
+  match, err := filesMatch("test/testdir/file1", "test/testdir3/file1")
+  if err != nil {
+    t.Error(err)
+    return
+  }
+  if !match {
+    t.Fail()
+    return
+  }
+
+  // // And again without clearing the files
+  // _, err = Copy("test/testfile2", "test/testfile3", false)
+  // if err != nil {
+  //   t.Error(err)
+  //   return
+  // }
+
+  // match2, err := filesMatch("test/testfile2", "test/testfile3")
+  // if err != nil {
+  //   t.Error(err)
+  //   return
+  // }
+
+  // if !match2 {
+  //   t.Fail()
+  //   return
+  // }
+}
+
diff --git a/vendor/github.com/termie/go-shutil/test/testdir/file1 b/vendor/github.com/termie/go-shutil/test/testdir/file1
new file mode 100644
index 0000000..e212970
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/test/testdir/file1
@@ -0,0 +1 @@
+file1
diff --git a/vendor/github.com/termie/go-shutil/test/testdir/file2 b/vendor/github.com/termie/go-shutil/test/testdir/file2
new file mode 100644
index 0000000..6c493ff
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/test/testdir/file2
@@ -0,0 +1 @@
+file2
diff --git a/vendor/github.com/termie/go-shutil/test/testfile b/vendor/github.com/termie/go-shutil/test/testfile
new file mode 100644
index 0000000..2691857
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/test/testfile
@@ -0,0 +1 @@
+testfile
diff --git a/vendor/github.com/termie/go-shutil/test/testfile2 b/vendor/github.com/termie/go-shutil/test/testfile2
new file mode 100644
index 0000000..7d57647
--- /dev/null
+++ b/vendor/github.com/termie/go-shutil/test/testfile2
@@ -0,0 +1 @@
+testfile2
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
index 7b8bd86..1884de6 100644
--- a/vendor/gopkg.in/yaml.v2/README.md
+++ b/vendor/gopkg.in/yaml.v2/README.md
@@ -42,7 +42,7 @@
 License
 -------
 
-The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
 
 
 Example